diff --git a/.editorconfig b/.editorconfig index 1ab98bee463..482cea58e85 100644 --- a/.editorconfig +++ b/.editorconfig @@ -47,10 +47,10 @@ dotnet_style_qualification_for_property = false:suggestion dotnet_style_qualification_for_method = false:suggestion dotnet_style_qualification_for_event = false:suggestion -# Types: use keywords instead of BCL types, and permit var only when the type is clear -csharp_style_var_for_built_in_types = false:suggestion -csharp_style_var_when_type_is_apparent = false:none -csharp_style_var_elsewhere = false:suggestion +# Types: use keywords instead of BCL types, and prefer var instead of the explicit type +csharp_style_var_for_built_in_types = true:suggestion +csharp_style_var_when_type_is_apparent = true:suggestion +csharp_style_var_elsewhere = true:suggestion dotnet_style_predefined_type_for_locals_parameters_members = true:suggestion dotnet_style_predefined_type_for_member_access = true:suggestion diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 00000000000..9aa243666d0 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,3 @@ +# .git-blame-ignore-revs +# Cleanup unnecessary whitespaces +1186b9330245593f940cac35e85bcb5494ee9a80 \ No newline at end of file diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml new file mode 100644 index 00000000000..e7081a09e5a --- /dev/null +++ b/.github/workflows/pr.yml @@ -0,0 +1,31 @@ +name: Pull Request validation + +on: + pull_request: + types: + - opened + - reopened + - edited + - labeled + - unlabeled + - synchronize + +jobs: + pull-request-validation: + name: Pull Request validation. + runs-on: ubuntu-latest + steps: + - name: Pull Request should have a label assigned. + if: ${{ always() && github.event.pull_request.labels[0] == null }} + run: | + exit 1 + + - name: Title should start with a Jira ticket. + if: ${{ always() && !(startsWith(github.event.pull_request.title, 'CSHARP-')) }} + run: | + exit 1 + + - name: Title should not end with period or ellipses. + if: ${{ always() && (endsWith(github.event.pull_request.title, '.') || endsWith(github.event.pull_request.title, '…')) }} + run: | + exit 1 diff --git a/CSharpDriver.sln.DotSettings b/CSharpDriver.sln.DotSettings index fc0f06c9c74..b52f6048577 100644 --- a/CSharpDriver.sln.DotSettings +++ b/CSharpDriver.sln.DotSettings @@ -120,6 +120,7 @@ namespace $NAMESPACE$ */ using System.Collections.Generic; +using System.Linq; using MongoDB.Driver.TestHelpers; using FluentAssertions; using Xunit; diff --git a/README.md b/README.md index e2495368ff1..d94fb3b58d4 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,14 @@ MongoDB C# Driver ================= -You can get the latest stable release from the [official Nuget.org feed](https://www.nuget.org/packages/MongoDB.Driver) or from our [github releases page](https://github.com/mongodb/mongo-csharp-driver/releases). +[](https://www.nuget.org/packages/MongoDB.Driver/) +[](https://mongodb.github.io/mongo-csharp-driver/3.4.0/api/index.html) +[](https://www.mongodb.com/docs/drivers/csharp/current/) +[](https://github.com/mongodb/mongo-csharp-driver/blob/main/LICENSE.md) + +The official MongoDB .NET/C# driver. + +The MongoDB .NET/C# driver follows [semantic versioning](https://semver.org/) since v3.0.0 of its releases. Getting Started --------------- @@ -62,7 +69,8 @@ foreach(var person in list) Documentation ------------- * [MongoDB](https://www.mongodb.com/docs) -* [Documentation](https://www.mongodb.com/docs/drivers/csharp/current/) +* [.NET/C# Driver](https://www.mongodb.com/docs/drivers/csharp/current/) +* [API Reference](https://mongodb.github.io/mongo-csharp-driver/3.5.0/api/index.html) Questions/Bug Reports --------------------- diff --git a/benchmarks/MongoDB.Driver.Benchmarks/BenchmarkResult.cs b/benchmarks/MongoDB.Driver.Benchmarks/BenchmarkResult.cs index 1eacc5f79d0..8eefeeef684 100644 --- a/benchmarks/MongoDB.Driver.Benchmarks/BenchmarkResult.cs +++ b/benchmarks/MongoDB.Driver.Benchmarks/BenchmarkResult.cs @@ -41,7 +41,7 @@ public BenchmarkResult(BenchmarkReport benchmarkReport) Name = Categories.Contains(DriverBenchmarkCategory.BulkWriteBench) ? benchmarkReport.BenchmarkCase.Descriptor.WorkloadMethod.Name : benchmarkReport.BenchmarkCase.Descriptor.Type.Name; - + dataSetSize = (int)benchmarkReport.BenchmarkCase.Parameters["BenchmarkDataSetSize"]; } diff --git a/benchmarks/MongoDB.Driver.Benchmarks/DriverBenchmarkCategory.cs b/benchmarks/MongoDB.Driver.Benchmarks/DriverBenchmarkCategory.cs index 3bb57045ccc..f8b1db2057f 100644 --- a/benchmarks/MongoDB.Driver.Benchmarks/DriverBenchmarkCategory.cs +++ b/benchmarks/MongoDB.Driver.Benchmarks/DriverBenchmarkCategory.cs @@ -26,7 +26,7 @@ public static class DriverBenchmarkCategory public const string ReadBench = "ReadBench"; public const string SingleBench = "SingleBench"; public const string WriteBench = "WriteBench"; - + // not included in AllCategories as it's not part of the benchmarking spec public const string BulkWriteBench = "BulkWriteBench"; diff --git a/benchmarks/MongoDB.Driver.Benchmarks/MultiDoc/BulkWriteMixedOpsBenchmark.cs b/benchmarks/MongoDB.Driver.Benchmarks/MultiDoc/BulkWriteMixedOpsBenchmark.cs index bdbf157d64b..d7de47e2e13 100644 --- a/benchmarks/MongoDB.Driver.Benchmarks/MultiDoc/BulkWriteMixedOpsBenchmark.cs +++ b/benchmarks/MongoDB.Driver.Benchmarks/MultiDoc/BulkWriteMixedOpsBenchmark.cs @@ -48,11 +48,11 @@ public void Setup() for (var i = 0; i < 10000; i++) { var collectionName = __collectionNamespaces[i % __collectionNamespaces.Length]; - + _clientBulkWriteMixedOpsModels.Add(new BulkWriteInsertOneModel<BsonDocument>(collectionName, smallDocument.DeepClone().AsBsonDocument)); _clientBulkWriteMixedOpsModels.Add(new BulkWriteReplaceOneModel<BsonDocument>(collectionName, FilterDefinition<BsonDocument>.Empty, smallDocument.DeepClone().AsBsonDocument)); _clientBulkWriteMixedOpsModels.Add(new BulkWriteDeleteOneModel<BsonDocument>(collectionName, FilterDefinition<BsonDocument>.Empty)); - + _collectionBulkWriteMixedOpsModels.Add(new InsertOneModel<BsonDocument>(smallDocument.DeepClone().AsBsonDocument)); _collectionBulkWriteMixedOpsModels.Add(new ReplaceOneModel<BsonDocument>(FilterDefinition<BsonDocument>.Empty, smallDocument.DeepClone().AsBsonDocument)); _collectionBulkWriteMixedOpsModels.Add(new DeleteOneModel<BsonDocument>(FilterDefinition<BsonDocument>.Empty)); @@ -63,13 +63,13 @@ public void Setup() public void BeforeTask() { _client.DropDatabase(MongoConfiguration.PerfTestDatabaseName); - + _database = _client.GetDatabase(MongoConfiguration.PerfTestDatabaseName); foreach (var collectionName in __collectionNamespaces) { _database.CreateCollection(collectionName.Split('.')[1]); } - + _collection = _database.GetCollection<BsonDocument>(MongoConfiguration.PerfTestCollectionName); } @@ -78,7 +78,7 @@ public void SmallDocCollectionBulkWriteMixedOpsBenchmark() { _collection.BulkWrite(_collectionBulkWriteMixedOpsModels, new()); } - + [Benchmark] public void SmallDocClientBulkWriteMixedOpsBenchmark() { diff --git a/benchmarks/MongoDB.Driver.Benchmarks/MultiDoc/LargeDocBulkInsertBenchmark.cs b/benchmarks/MongoDB.Driver.Benchmarks/MultiDoc/LargeDocBulkInsertBenchmark.cs index 4ed26e65226..32576821996 100644 --- a/benchmarks/MongoDB.Driver.Benchmarks/MultiDoc/LargeDocBulkInsertBenchmark.cs +++ b/benchmarks/MongoDB.Driver.Benchmarks/MultiDoc/LargeDocBulkInsertBenchmark.cs @@ -32,7 +32,7 @@ public class LargeDocBulkInsertBenchmark private BsonDocument[] _largeDocuments; private InsertOneModel<BsonDocument>[] _collectionBulkWriteInsertModels; private BulkWriteInsertOneModel<BsonDocument>[] _clientBulkWriteInsertModels; - + private static readonly CollectionNamespace __collectionNamespace = CollectionNamespace.FromFullName($"{MongoConfiguration.PerfTestDatabaseName}.{MongoConfiguration.PerfTestCollectionName}"); @@ -63,13 +63,13 @@ public void InsertManyLargeBenchmark() { _collection.InsertMany(_largeDocuments, new()); } - + [Benchmark] public void LargeDocCollectionBulkWriteInsertBenchmark() { _collection.BulkWrite(_collectionBulkWriteInsertModels, new()); } - + [Benchmark] public void LargeDocClientBulkWriteInsertBenchmark() { diff --git a/benchmarks/MongoDB.Driver.Benchmarks/MultiDoc/SmallDocBulkInsertBenchmark.cs b/benchmarks/MongoDB.Driver.Benchmarks/MultiDoc/SmallDocBulkInsertBenchmark.cs index a1d62e26038..a932541575f 100644 --- a/benchmarks/MongoDB.Driver.Benchmarks/MultiDoc/SmallDocBulkInsertBenchmark.cs +++ b/benchmarks/MongoDB.Driver.Benchmarks/MultiDoc/SmallDocBulkInsertBenchmark.cs @@ -62,13 +62,13 @@ public void InsertManySmallBenchmark() { _collection.InsertMany(_smallDocuments, new()); } - + [Benchmark] public void SmallDocCollectionBulkWriteInsertBenchmark() { _collection.BulkWrite(_collectionBulkWriteInsertModels, new()); } - + [Benchmark] public void SmallDocClientBulkWriteInsertBenchmark() { diff --git a/build.cake b/build.cake index d80fe9bea0d..134248995bb 100644 --- a/build.cake +++ b/build.cake @@ -117,12 +117,6 @@ Task("Test") items: GetFiles("./**/*.Tests.csproj").Where(name => !name.ToString().Contains("Atlas")), action: (BuildConfig buildConfig, Path testProject) => { - if (Environment.GetEnvironmentVariable("MONGODB_API_VERSION") != null && - testProject.ToString().Contains("Legacy")) - { - return; // Legacy tests are exempt from Version API testing - } - var mongoX509ClientCertificatePath = Environment.GetEnvironmentVariable("MONGO_X509_CLIENT_CERTIFICATE_PATH"); if (mongoX509ClientCertificatePath != null) { @@ -134,7 +128,7 @@ Task("Test") Console.WriteLine($"MONGO_X509_CLIENT_CERTIFICATE_PASSWORD={mongoX509ClientCertificatePassword}"); } - RunTests(buildConfig, testProject); + RunTests(buildConfig, testProject, filter: "Category=\"Integration\""); }) .DeferOnError(); @@ -162,13 +156,6 @@ Task("TestAtlasConnectivity") items: GetFiles("./**/AtlasConnectivity.Tests.csproj"), action: (BuildConfig buildConfig, Path testProject) => RunTests(buildConfig, testProject)); -Task("TestAtlasDataLake") - .IsDependentOn("Build") - .DoesForEach( - items: GetFiles("./**/MongoDB.Driver.Tests.csproj"), - action: (BuildConfig buildConfig, Path testProject) => - RunTests(buildConfig, testProject, filter: "Category=\"AtlasDataLake\"")); - Task("TestAtlasSearch") .IsDependentOn("Build") .DoesForEach( @@ -208,17 +195,6 @@ Task("TestMongoDbOidc") action: (BuildConfig buildConfig, Path testProject) => RunTests(buildConfig, testProject, filter: "Category=\"MongoDbOidc\"")); -Task("TestServerless") - .IsDependentOn("Build") - .DoesForEach( - items: GetFiles("./**/MongoDB.Driver.Tests.csproj"), - action: (BuildConfig buildConfig, Path testProject) => - RunTests(buildConfig, testProject, filter: "Category=\"Serverless\"")); - -Task("TestServerlessNet472").IsDependentOn("TestServerless"); -Task("TestServerlessNetStandard21").IsDependentOn("TestServerless"); -Task("TestServerlessNet60").IsDependentOn("TestServerless"); - Task("TestLibMongoCrypt") .IsDependentOn("Build") .DoesForEach( @@ -235,6 +211,10 @@ Task("TestLoadBalanced") Task("TestLoadBalancedNetStandard21").IsDependentOn("TestLoadBalanced"); Task("TestLoadBalancedNet60").IsDependentOn("TestLoadBalanced"); +Task("TestSocks5ProxyNet472").IsDependentOn("TestSocks5Proxy"); +Task("TestSocks5ProxyNetStandard21").IsDependentOn("TestSocks5Proxy"); +Task("TestSocks5ProxyNet60").IsDependentOn("TestSocks5Proxy"); + Task("TestCsfleWithMockedKms") .IsDependentOn("TestLibMongoCrypt") .DoesForEach( @@ -271,6 +251,22 @@ Task("TestCsfleWithGcpKms") action: (BuildConfig buildConfig, Path testProject) => RunTests(buildConfig, testProject, filter: "Category=\"CsfleGCPKMS\"")); +Task("TestX509") + .IsDependentOn("Build") + .DoesForEach( + items: GetFiles("./**/MongoDB.Driver.Tests.csproj"), + action: (BuildConfig buildConfig, Path testProject) => + RunTests(buildConfig, testProject, filter: "Category=\"X509\"")); + +Task("TestX509Net60").IsDependentOn("TestX509"); + +Task("TestSocks5Proxy") + .IsDependentOn("Build") + .DoesForEach( + items: GetFiles("./**/*.Tests.csproj"), + action: (BuildConfig buildConfig, Path testProject) => + RunTests(buildConfig, testProject, filter: "Category=\"Socks5Proxy\"")); + Task("Package") .IsDependentOn("PackageNugetPackages"); diff --git a/build.config b/build.config index 1c2f8500d19..badb4438541 100644 --- a/build.config +++ b/build.config @@ -1,3 +1,3 @@ #!/usr/bin/env bash -CAKE_VERSION=2.2.0 +CAKE_VERSION=2.3.0 DOTNET_VERSION=8.0.204 diff --git a/build.ps1 b/build.ps1 index c4eb3e1ae7d..cc4a4d4aa80 100644 --- a/build.ps1 +++ b/build.ps1 @@ -89,17 +89,16 @@ if($FoundDotNetCliVersion -ne $DotNetVersion) { New-Item -Path $InstallPath -ItemType Directory -Force | Out-Null; } - # N.B. We explicitly install .NET Core 2.1 and 3.1 because .NET 5.0 SDK can build those TFMs + # N.B. We explicitly install .NET Core 3.1 because .NET 5.0 SDK can build those TFMs # but will silently upgrade to a more recent runtime to execute tests if the desired runtime # isn't available. For example, `dotnet run --framework netcoreapp3.0` will silently run # on .NET 5.0 if .NET Core 3.0 and 3.1 aren't installed. - # This solution is admittedly hacky as .NET Core 2.1 and 3.1 won't be installed if + # This solution is admittedly hacky as .NET Core 3.1 won't be installed if # $DOTNET_VERSION matches $DOTNET_INSTALLED_VERSION, but it minimizes the changes required # to install required dependencies on Evergreen. if ($IsMacOS -or $IsLinux) { $ScriptPath = Join-Path $InstallPath 'dotnet-install.sh' (New-Object System.Net.WebClient).DownloadFile($DotNetUnixInstallerUri, $ScriptPath); - & bash $ScriptPath --install-dir "$InstallPath" --channel 2.1 --no-path & bash $ScriptPath --install-dir "$InstallPath" --channel 3.1 --no-path & bash $ScriptPath --install-dir "$InstallPath" --channel 5.0 --no-path & bash $ScriptPath --install-dir "$InstallPath" --channel 6.0 --no-path @@ -111,7 +110,6 @@ if($FoundDotNetCliVersion -ne $DotNetVersion) { else { $ScriptPath = Join-Path $InstallPath 'dotnet-install.ps1' (New-Object System.Net.WebClient).DownloadFile($DotNetInstallerUri, $ScriptPath); - & $ScriptPath -Channel 2.1 -InstallDir $InstallPath; & $ScriptPath -Channel 3.1 -InstallDir $InstallPath; & $ScriptPath -Channel 5.0 -InstallDir $InstallPath; & $ScriptPath -Channel 6.0 -InstallDir $InstallPath; diff --git a/build.sh b/build.sh index cba9c4531b4..a3ce5a72b9f 100755 --- a/build.sh +++ b/build.sh @@ -33,18 +33,17 @@ if [ "$DOTNET_VERSION" != "$DOTNET_INSTALLED_VERSION" ]; then mkdir "$SCRIPT_DIR/.dotnet" fi curl -Lfo "$SCRIPT_DIR/.dotnet/dotnet-install.sh" https://builds.dotnet.microsoft.com/dotnet/scripts/v1/dotnet-install.sh - # N.B. We explicitly install .NET Core 2.1 and 3.1 because .NET 6.0 SDK can build those TFMs + # N.B. We explicitly install .NET Core 3.1 because .NET 6.0 SDK can build those TFMs # but will silently upgrade to a more recent runtime to execute tests if the desired runtime # isn't available. For example, `dotnet run --framework netcoreapp3.0` will silently run # on .NET 6.0 if .NET Core 3.0 and 3.1 aren't installed. - # This solution is admittedly hacky as .NET Core 2.1 and 3.1 won't be installed if + # This solution is admittedly hacky as .NET Core 3.1 won't be installed if # $DOTNET_VERSION matches $DOTNET_INSTALLED_VERSION, but it minimizes the changes required # to install required dependencies on Evergreen. # Since ARM64 support was first added in .NET 6.0, the following commands will install: # | CPU | 2.1 | 3.1 | Latest | # | x64 | x64 | x64 | x64 | # | arm64 | x64 | x64 | arm64 | - bash "$SCRIPT_DIR/.dotnet/dotnet-install.sh" --channel 2.1 --architecture x64 --install-dir .dotnet --no-path bash "$SCRIPT_DIR/.dotnet/dotnet-install.sh" --channel 3.1 --architecture x64 --install-dir .dotnet --no-path bash "$SCRIPT_DIR/.dotnet/dotnet-install.sh" --channel 5.0 --architecture x64 --install-dir .dotnet --no-path bash "$SCRIPT_DIR/.dotnet/dotnet-install.sh" --channel 6.0 --install-dir .dotnet --no-path diff --git a/evergreen/add-ca-certs.sh b/evergreen/add-ca-certs.sh index 52332b1a783..f95d4f49c35 100644 --- a/evergreen/add-ca-certs.sh +++ b/evergreen/add-ca-certs.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash -set -o xtrace # Write all commands first to stderr set -o errexit # Exit the script with an error if any of the commands fail # Supported/used environment variables: diff --git a/evergreen/cleanup-test-resources.sh b/evergreen/cleanup-test-resources.sh index 3a2d1f9c23f..8200363e371 100644 --- a/evergreen/cleanup-test-resources.sh +++ b/evergreen/cleanup-test-resources.sh @@ -1,7 +1,5 @@ #!/usr/bin/env bash -set -o xtrace # Write all commands first to stderr - # Environment variables used as input: # OS The current operating system @@ -15,5 +13,29 @@ else ps -ax | grep mongocryptd pkill -f 'mongocryptd' || echo 'mongocryptd was already killed or not launched' # check that it's actually killed - ps -ax | grep mongocryptd + ps -ax | grep mongocryptd +fi + +# The proxy server processes have almost certainly already been killed by the evergreen process cleaning though. +# This is just to be sure it already happened and delete the file containing the saved PIDs. + +echo "Attempting to kill proxy servers if present and deleting PID file." +PID_FILE="socks5_pids.txt" + +if [[ ! -f "$PID_FILE" ]]; then + echo "No PID file found ($PID_FILE)" + exit 0 fi + +cat "$PID_FILE" | while read -r pid; do + if [[ -n "$pid" ]]; then + if [[ "$OS" =~ Windows|windows ]]; then + powershell -NoProfile -Command "Stop-Process -Id $pid -Force" 2>$null || \ + echo "PID $pid already gone" + else + kill "$pid" 2>/dev/null || echo "PID $pid already gone" + fi + fi +done + +rm -f "$PID_FILE" \ No newline at end of file diff --git a/evergreen/compile.sh b/evergreen/compile.sh index 0c3ea06ab3d..c7a08497898 100755 --- a/evergreen/compile.sh +++ b/evergreen/compile.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash -set -o xtrace # Write all commands first to stderr set -o errexit # Exit the script with error if any of the commands fail ############################################ diff --git a/evergreen/convert-client-cert-to-pkcs12.sh b/evergreen/convert-client-cert-to-pkcs12.sh index 52e800dfa2e..b5523c86d49 100755 --- a/evergreen/convert-client-cert-to-pkcs12.sh +++ b/evergreen/convert-client-cert-to-pkcs12.sh @@ -1,32 +1,41 @@ #!/usr/bin/env bash -set -o xtrace # Write all commands first to stderr set -o errexit # Exit the script with an error if any of the commands fail # Environment variables used as input: -# CLIENT_PEM Path to mongo -orchestration's client.pem: must be set. -# MONGO_X509_CLIENT_P12 Filename for client certificate in p12 format +# CLIENT_PEM Path to mongo client.pem: must be set +# P12_FILENAME Filename for client certificate in p12 format +# P12_PASSWORD Password for client certificate in p12 format +# P12_FRIENDLY_NAME Friendly name for client certificate in p12 format +# OUT_CLIENT_PATH_VAR Name of the output variable containing the path of the p12 certificate +# OUT_CLIENT_PASSWORD_VAR Name of the output variable containing the password for the p12 certificate # # Environment variables produced as output: -# MONGODB_X509_CLIENT_P12_PATH Absolute path to client certificate in p12 format -# MONGO_X509_CLIENT_CERTIFICATE_PASSWORD Password for client certificate +# ${OUT_CLIENT_PATH_VAR} Absolute path to client certificate in p12 format (OUT_CLIENT_PATH_VAR contains the actual variable being exported) +# ${OUT_CLIENT_PASSWORD_VAR} Password for client certificate (OUT_CLIENT_PASSWORD_VAR contains the actual variable being exported) -CLIENT_PEM=${CLIENT_PEM:-nil} -MONGO_X509_CLIENT_P12=${MONGO_X509_CLIENT_P12:-client.p12} -MONGO_X509_CLIENT_CERTIFICATE_PASSWORD=${MONGO_X509_CLIENT_CERTIFICATE_PASSWORD:-Picard-Alpha-Alpha-3-0-5} +# Input environment variables and default values +: "${CLIENT_PEM:=nil}" +: "${P12_FRIENDLY_NAME:="Drivers Client Certificate"}" +: "${P12_FILENAME:="client.p12"}" +: "${P12_PASSWORD:="Picard-Alpha-Alpha-3-0-5"}" +: "${OUT_CLIENT_PATH_VAR:="MONGO_X509_CLIENT_CERTIFICATE_PATH"}" +: "${OUT_CLIENT_PASSWORD_VAR:="MONGO_X509_CLIENT_CERTIFICATE_PASSWORD"}" if [[ "$CLIENT_PEM" == "nil" ]]; then + echo "Error: CLIENT_PEM must be set." exit 1 fi openssl pkcs12 -export -keypbe PBE-SHA1-3DES -certpbe PBE-SHA1-3DES -macalg sha1 -in "${CLIENT_PEM}" \ - -out "${MONGO_X509_CLIENT_P12}" \ - -name "Drivers Client Certificate" \ - -password "pass:${MONGO_X509_CLIENT_CERTIFICATE_PASSWORD}" + -out "${P12_FILENAME}" \ + -name "${P12_FRIENDLY_NAME}" \ + -password "pass:${P12_PASSWORD}" +# Determine path using realpath (compatible across macOS, Linux, and Windows) if [[ "$OS" =~ MAC|Mac|mac ]]; then - # this function is not available on mac OS + # Functionality to mimic `realpath` on macOS function realpath() { OURPWD=$PWD cd "$(dirname "$1")" @@ -40,11 +49,17 @@ if [[ "$OS" =~ MAC|Mac|mac ]]; then echo "$REALPATH" } fi -MONGO_X509_CLIENT_CERTIFICATE_PATH=$(realpath "${MONGO_X509_CLIENT_P12}") + +CERT_PATH=$(realpath "${P12_FILENAME}") if [[ "$OS" =~ Windows|windows ]]; then - MONGO_X509_CLIENT_CERTIFICATE_PATH=$(cygpath -w "${MONGO_X509_CLIENT_CERTIFICATE_PATH}") + CERT_PATH=$(cygpath -w "${CERT_PATH}") fi -export MONGO_X509_CLIENT_CERTIFICATE_PATH -export MONGO_X509_CLIENT_CERTIFICATE_PASSWORD +# Output environment variables +export "${OUT_CLIENT_PASSWORD_VAR}"="${P12_PASSWORD}" +export "${OUT_CLIENT_PATH_VAR}"="${CERT_PATH}" + +echo "Exported variables:" +echo "${OUT_CLIENT_PASSWORD_VAR}=${!OUT_CLIENT_PASSWORD_VAR}" +echo "${OUT_CLIENT_PATH_VAR}=${!OUT_CLIENT_PATH_VAR}" \ No newline at end of file diff --git a/evergreen/evergreen.yml b/evergreen/evergreen.yml index 572d312625f..f55c269e032 100644 --- a/evergreen/evergreen.yml +++ b/evergreen/evergreen.yml @@ -76,7 +76,6 @@ functions: TEST_RESULTS_PATH: "./mongo-csharp-driver/build/test-results/TEST-*.xml" PREPARE_SHELL: | set -o errexit - set -o xtrace export DRIVERS_TOOLS="$DRIVERS_TOOLS" export MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" export MONGODB_BINARIES="$MONGODB_BINARIES" @@ -98,9 +97,13 @@ functions: install-dotnet: - command: shell.exec params: + include_expansions_in_env: + - "OS" + - "DOTNET_SDK_VERSION" + - "FRAMEWORK" script: | ${PREPARE_SHELL} - OS=${OS} DOTNET_SDK_VERSION=${DOTNET_SDK_VERSION} bash ${PROJECT_DIRECTORY}/evergreen/install-dotnet.sh + bash ${PROJECT_DIRECTORY}/evergreen/install-dotnet.sh prepare-resources: - command: shell.exec @@ -314,6 +317,10 @@ functions: bootstrap-mongohoused: - command: shell.exec params: + include_expansions_in_env: + - "AWS_ACCESS_KEY_ID" + - "AWS_SECRET_ACCESS_KEY" + - "AWS_SESSION_TOKEN" script: | DRIVERS_TOOLS="${DRIVERS_TOOLS}" bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/pull-mongohouse-image.sh - command: shell.exec @@ -352,6 +359,18 @@ functions: cd ${DRIVERS_TOOLS}/.evergreen DRIVERS_TOOLS=${DRIVERS_TOOLS} MONGODB_URI=${MONGODB_URI} bash ${DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh stop + run-unit-tests: + - command: shell.exec + type: test + params: + working_dir: mongo-csharp-driver + shell: "bash" + include_expansions_in_env: + - "FRAMEWORK" + script: | + ${PREPARE_SHELL} + bash evergreen/run-unit-tests.sh + run-tests: - command: shell.exec type: test @@ -376,8 +395,6 @@ functions: CRYPT_SHARED_LIB_PATH=${CRYPT_SHARED_LIB_PATH} \ evergreen/run-tests.sh echo "Skipping certificate removal..." - OS=${OS} \ - evergreen/cleanup-test-resources.sh run-csfle-with-mocked-kms-tests: - command: shell.exec @@ -402,8 +419,6 @@ functions: TARGET="TestCsfleWithMockedKms" \ CRYPT_SHARED_LIB_PATH=${CRYPT_SHARED_LIB_PATH} \ evergreen/run-tests.sh - OS=${OS} \ - evergreen/cleanup-test-resources.sh run-csfle-with-mongocryptd-tests: - command: shell.exec @@ -430,8 +445,6 @@ functions: CRYPT_SHARED_LIB_PATH="" \ evergreen/run-tests.sh echo "Skipping certificate removal..." - OS=${OS} \ - evergreen/cleanup-test-resources.sh run-atlas-connectivity-tests: - command: shell.exec @@ -439,6 +452,10 @@ functions: params: shell: "bash" working_dir: mongo-csharp-driver + include_expansions_in_env: + - "AWS_ACCESS_KEY_ID" + - "AWS_SECRET_ACCESS_KEY" + - "AWS_SESSION_TOKEN" script: | . ${DRIVERS_TOOLS}/.evergreen/secrets_handling/setup-secrets.sh drivers/atlas_connect . evergreen/run-atlas-connectivity-tests.sh @@ -476,11 +493,40 @@ functions: script: | ${PREPARE_SHELL} MONGODB_URI="${MONGODB_URI}" ../../evergreen/run-perf-tests.sh - - command: perf.send + - command: shell.exec params: - file: mongo-csharp-driver/benchmarks/MongoDB.Driver.Benchmarks/Benchmark.Artifacts/results/evergreen-results.json + script: | + # We use the requester expansion to determine whether the data is from a mainline evergreen run or not + if [ "${requester}" == "commit" ]; then + is_mainline=true + else + is_mainline=false + fi - assume-ec2-role: + # We parse the username out of the order_id as patches append that in and SPS does not need that information + parsed_order_id=$(echo "${revision_order_id}" | awk -F'_' '{print $NF}') + + # Submit the performance data to the SPS endpoint + response=$(curl -s -w "\nHTTP_STATUS:%{http_code}" -X 'POST' \ + "https://performance-monitoring-api.corp.mongodb.com/raw_perf_results/cedar_report?project=${project_id}&version=${version_id}&variant=${build_variant}&order=$parsed_order_id&task_name=${task_name}&task_id=${task_id}&execution=${execution}&mainline=$is_mainline" \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d @mongo-csharp-driver/benchmarks/MongoDB.Driver.Benchmarks/Benchmark.Artifacts/results/evergreen-results.json) + + http_status=$(echo "$response" | grep "HTTP_STATUS" | awk -F':' '{print $2}') + response_body=$(echo "$response" | sed '/HTTP_STATUS/d') + + # We want to throw an error if the data was not successfully submitted + if [ "$http_status" -ne 200 ]; then + echo "Error: Received HTTP status $http_status" + echo "Response Body: $response_body" + exit 1 + fi + + echo "Response Body: $response_body" + echo "HTTP Status: $http_status" + + assume-aws-test-secrets-role: - command: ec2.assume_role params: role_arn: ${aws_test_secrets_role} @@ -604,24 +650,18 @@ functions: script: | DRIVERS_TOOLS=${DRIVERS_TOOLS} OS=${OS} ASSERT_NO_URI_CREDS=true evergreen/run-mongodb-aws-test.sh session-creds - run-atlas-data-lake-test: - - command: shell.exec - type: test - params: - working_dir: mongo-csharp-driver - script: | - ${PREPARE_SHELL} - evergreen/run-atlas-data-lake-test.sh - run-atlas-search-test: - command: shell.exec type: test params: working_dir: mongo-csharp-driver include_expansions_in_env: - - "ATLAS_SEARCH" + - "AWS_ACCESS_KEY_ID" + - "AWS_SECRET_ACCESS_KEY" + - "AWS_SESSION_TOKEN" script: | ${PREPARE_SHELL} + . ${DRIVERS_TOOLS}/.evergreen/secrets_handling/setup-secrets.sh drivers/atlas-qa evergreen/run-atlas-search-test.sh run-atlas-search-index-helpers-test: @@ -675,19 +715,6 @@ functions: args: - evergreen/run-mongodb-oidc-tests.sh - run-serverless-tests: - - command: shell.exec - type: test - params: - working_dir: mongo-csharp-driver - script: | - ${PREPARE_SHELL} - AUTH=${AUTH} \ - FRAMEWORK=${FRAMEWORK} \ - SSL=${SSL} \ - CRYPT_SHARED_LIB_PATH=${CRYPT_SHARED_LIB_PATH} \ - evergreen/run-serverless-tests.sh - run-smoke-tests: - command: shell.exec type: test @@ -792,6 +819,14 @@ functions: mongo-orchestration stop cd - rm -rf $DRIVERS_TOOLS || true + - command: subprocess.exec + params: + binary: bash + working_dir: mongo-csharp-driver + include_expansions_in_env: + - "OS" + args: + - evergreen/cleanup-test-resources.sh fix-absolute-paths: - command: shell.exec @@ -988,6 +1023,84 @@ functions: bash $DRIVERS_TOOLS/.evergreen/auth_oidc/k8s/run-driver-test.sh bash $DRIVERS_TOOLS/.evergreen/auth_oidc/k8s/teardown-pod.sh + setup-x509-tests: + - command: shell.exec + params: + shell: "bash" + include_expansions_in_env: + - "AWS_ACCESS_KEY_ID" + - "AWS_SECRET_ACCESS_KEY" + - "AWS_SESSION_TOKEN" + script: | + ${DRIVERS_TOOLS}/.evergreen/secrets_handling/setup-secrets.sh drivers/atlas_connect + source secrets-export.sh + + echo $ATLAS_X509_DEV_CERT_BASE64 | base64 --decode > ${DRIVERS_TOOLS}/CLIENT_CERT.pem + echo $ATLAS_X509_DEV_CERT_NOUSER_BASE64 | base64 --decode > ${DRIVERS_TOOLS}/CLIENT_NO_USER_CERT.pem + + run-x509-tests: + - command: shell.exec + type: test + params: + shell: "bash" + working_dir: mongo-csharp-driver + script: | + source ../secrets-export.sh + ${PREPARE_SHELL} + OS=${OS} \ + evergreen/add-ca-certs.sh + P12_FRIENDLY_NAME="Drivers No-User Client Certificate" \ + P12_FILENAME="client_no_user.p12" \ + OUT_CLIENT_PASSWORD_VAR="MONGO_X509_CLIENT_NO_USER_CERTIFICATE_PASSWORD" \ + OUT_CLIENT_PATH_VAR="MONGO_X509_CLIENT_NO_USER_CERTIFICATE_PATH" \ + CLIENT_PEM=${DRIVERS_TOOLS}/CLIENT_NO_USER_CERT.pem \ + source evergreen/convert-client-cert-to-pkcs12.sh + MONGODB_URI="$ATLAS_X509_DEV" \ + CLIENT_PEM=${DRIVERS_TOOLS}/CLIENT_CERT.pem \ + TOPOLOGY=${TOPOLOGY} \ + OS=${OS} \ + FRAMEWORK=${FRAMEWORK} \ + TARGET="TestX509" \ + evergreen/run-tests.sh + + setup-socks5-proxy: + - command: shell.exec + params: + background: true + script: | + ${PREPARE_SHELL} + MONGODB_URI="${MONGODB_URI}" + # Read the MongoDB URI connection string and extract an arbitrary member's host:port + HOST_PORT=$(echo "$MONGODB_URI" | sed 's|mongodb://||' | cut -d',' -f1) + + MAP_ARG="localhost:12345 to $HOST_PORT" + python3 $DRIVERS_TOOLS/.evergreen/socks5srv.py --map "$MAP_ARG" --port 1080 --auth username:p4ssw0rd & PROXY_PID1=$! + python3 $DRIVERS_TOOLS/.evergreen/socks5srv.py --map "$MAP_ARG" --port 1081 & PROXY_PID2=$! + + echo "$PROXY_PID1" > socks5_pids.txt + echo "$PROXY_PID2" >> socks5_pids.txt + + echo "Started proxies with PIDs: $PROXY_PID1, $PROXY_PID2" + + run-socks5-proxy-tests: + - command: shell.exec + type: test + params: + working_dir: "mongo-csharp-driver" + shell: "bash" + script: | + export SOCKS5_PROXY_SERVERS_ENABLED=true + ${PREPARE_SHELL} + OS=${OS} \ + evergreen/add-ca-certs.sh + SSL=${SSL} \ + MONGODB_URI="${MONGODB_URI}" \ + TOPOLOGY=${TOPOLOGY} \ + OS=${OS} \ + FRAMEWORK=${FRAMEWORK} \ + TARGET="TestSocks5Proxy" \ + evergreen/run-tests.sh + pre: - func: fetch-source - func: prepare-resources @@ -1002,6 +1115,36 @@ post: - func: cleanup tasks: + - name: unit-tests-net472 + commands: + - command: expansions.update + params: + updates: + - key: 'FRAMEWORK' + value: 'net472' + - func: install-dotnet + - func: run-unit-tests + + - name: unit-tests-netstandard21 + commands: + - command: expansions.update + params: + updates: + - key: 'FRAMEWORK' + value: 'netstandard2.1' + - func: install-dotnet + - func: run-unit-tests + + - name: unit-tests-net60 + commands: + - command: expansions.update + params: + updates: + - key: 'FRAMEWORK' + value: 'net6.0' + - func: install-dotnet + - func: run-unit-tests + - name: test-net472 commands: - func: setup-csfle-secrets @@ -1101,7 +1244,7 @@ tasks: - name: atlas-connectivity-tests commands: - - func: assume-ec2-role + - func: assume-aws-test-secrets-role - func: run-atlas-connectivity-tests - name: test-gssapi @@ -1126,6 +1269,30 @@ tasks: vars: FRAMEWORK: net60 + - name: test-socks5-proxy-net472 + commands: + - func: bootstrap-mongo-orchestration + - func: setup-socks5-proxy + - func: run-socks5-proxy-tests + vars: + FRAMEWORK: net472 + + - name: test-socks5-proxy-netstandard21 + commands: + - func: bootstrap-mongo-orchestration + - func: setup-socks5-proxy + - func: run-socks5-proxy-tests + vars: + FRAMEWORK: netstandard21 + + - name: test-socks5-proxy-net60 + commands: + - func: bootstrap-mongo-orchestration + - func: setup-socks5-proxy + - func: run-socks5-proxy-tests + vars: + FRAMEWORK: net60 + - name: plain-auth-tests commands: - func: run-plain-auth-tests @@ -1181,13 +1348,9 @@ tasks: FRAMEWORK: net60 REQUIRE_API_VERSION: true - - name: atlas-data-lake-test - commands: - - func: bootstrap-mongohoused - - func: run-atlas-data-lake-test - - name: atlas-search-test commands: + - func: assume-aws-test-secrets-role - func: run-atlas-search-test - name: atlas-search-index-helpers-test @@ -1245,7 +1408,7 @@ tasks: ${PREPARE_SHELL} dotnet build tar czf /tmp/mongo-csharp-driver.tgz tests/*.Tests/bin/Debug/net6.0/ ./evergreen/run-mongodb-oidc-env-tests.sh - - func: assume-ec2-role + - func: assume-aws-test-secrets-role - func: run_oidc_k8s_tests vars: K8S_VARIANT: eks @@ -1256,12 +1419,6 @@ tasks: vars: K8S_VARIANT: aks - - name: test-serverless - exec_timeout_secs: 2700 # 45 minutes: 15 for setup + 30 for tests - commands: - - func: setup-csfle-secrets - - func: run-serverless-tests - - name: test-ocsp-rsa-valid-cert-server-staples-ca-responder tags: ["ocsp"] commands: @@ -1637,6 +1794,9 @@ tasks: - name: test-efcore commands: - func: install-dotnet + vars: + DOTNET_SDK_VERSION: 9.0 + FRAMEWORK: net8.0 - func: bootstrap-mongo-orchestration - command: expansions.update params: @@ -1649,8 +1809,20 @@ tasks: LOCAL_PATH: ${workdir}/efcore SCRIPT: | ${PREPARE_SHELL} + if [[ $OS =~ [Ww]indows.* ]]; then + export ATLAS_URI=Disabled + fi + CRYPT_SHARED_LIB_PATH="${CRYPT_SHARED_LIB_PATH}" DRIVER_VERSION="${PACKAGE_VERSION}" MONGODB_VERSION="${VERSION}" bash ./evergreen/run-tests.sh + - name: x509-auth-tests + commands: + - func: assume-aws-test-secrets-role + - func: setup-x509-tests + - func: run-x509-tests + vars: + FRAMEWORK: net60 + axes: - id: version display_name: MongoDB Version @@ -1687,10 +1859,6 @@ axes: display_name: "4.2" variables: VERSION: "4.2" - - id: "4.0" - display_name: "4.0" - variables: - VERSION: "4.0" - id: os display_name: OS @@ -1820,14 +1988,6 @@ axes: variables: FRAMEWORK: net80 - - id: serverless - display_name: Serverless - values: - - id: "Passthrough" - display_name: "Serverless Passthrough Proxy" - variables: - VAULT_NAME: "serverless" - task_groups: - name: testazurekms-task-group setup_group_can_fail_task: true @@ -1839,7 +1999,7 @@ task_groups: - func: fix-absolute-paths - func: init-test-results - func: make-files-executable - - func: assume-ec2-role + - func: assume-aws-test-secrets-role - command: subprocess.exec params: binary: bash @@ -1885,7 +2045,7 @@ task_groups: - func: fix-absolute-paths - func: init-test-results - func: make-files-executable - - func: assume-ec2-role + - func: assume-aws-test-secrets-role - command: subprocess.exec params: binary: bash @@ -1929,7 +2089,7 @@ task_groups: - func: fix-absolute-paths - func: init-test-results - func: make-files-executable - - func: assume-ec2-role + - func: assume-aws-test-secrets-role - command: subprocess.exec params: binary: bash @@ -1937,14 +2097,9 @@ task_groups: - "AWS_ACCESS_KEY_ID" - "AWS_SECRET_ACCESS_KEY" - "AWS_SESSION_TOKEN" - args: - - ${DRIVERS_TOOLS}/.evergreen/atlas/setup-secrets.sh - - "atlas" - - command: subprocess.exec - params: - binary: bash env: - LAMBDA_STACK_NAME: dbx-csharp-lambda + CLUSTER_PREFIX: dbx-csharp-search-index + MONGODB_VERSION: "7.0" args: - ${DRIVERS_TOOLS}/.evergreen/atlas/setup-atlas-cluster.sh - command: expansions.update @@ -1955,8 +2110,6 @@ task_groups: - command: subprocess.exec params: binary: bash - env: - LAMBDA_STACK_NAME: dbx-csharp-lambda args: - ${DRIVERS_TOOLS}/.evergreen/atlas/teardown-atlas-cluster.sh tasks: @@ -1968,11 +2121,17 @@ task_groups: setup_group: - func: fetch-source - func: prepare-resources + - func: assume-aws-test-secrets-role - command: subprocess.exec params: binary: bash + include_expansions_in_env: + - "AWS_ACCESS_KEY_ID" + - "AWS_SECRET_ACCESS_KEY" + - "AWS_SESSION_TOKEN" env: LAMBDA_STACK_NAME: dbx-csharp-lambda + MONGODB_VERSION: "7.0" args: - ${DRIVERS_TOOLS}/.evergreen/atlas/setup-atlas-cluster.sh - command: expansions.update @@ -1997,7 +2156,7 @@ task_groups: - func: fix-absolute-paths - func: init-test-results - func: make-files-executable - - func: assume-ec2-role + - func: assume-aws-test-secrets-role - command: subprocess.exec params: binary: bash @@ -2025,12 +2184,17 @@ task_groups: - func: prepare-resources - func: fix-absolute-paths - func: make-files-executable + - func: assume-aws-test-secrets-role - func: install-dotnet - command: subprocess.exec params: binary: bash env: AZUREOIDC_VMNAME_PREFIX: "CSHARP_DRIVER" + include_expansions_in_env: + - "AWS_ACCESS_KEY_ID" + - "AWS_SECRET_ACCESS_KEY" + - "AWS_SESSION_TOKEN" args: - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/azure/create-and-setup-vm.sh teardown_group: @@ -2051,12 +2215,17 @@ task_groups: - func: prepare-resources - func: fix-absolute-paths - func: make-files-executable + - func: assume-aws-test-secrets-role - func: install-dotnet - command: subprocess.exec params: binary: bash env: GCPOIDC_VMNAME_PREFIX: "CSHARP_DRIVER" + include_expansions_in_env: + - "AWS_ACCESS_KEY_ID" + - "AWS_SECRET_ACCESS_KEY" + - "AWS_SESSION_TOKEN" args: - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/gcp/setup.sh teardown_group: @@ -2077,7 +2246,7 @@ task_groups: - func: fix-absolute-paths - func: make-files-executable - func: install-dotnet - - func: assume-ec2-role + - func: assume-aws-test-secrets-role - command: subprocess.exec params: binary: bash @@ -2096,35 +2265,6 @@ task_groups: tasks: - test-oidc-k8s - - name: serverless-task-group - setup_group_can_fail_task: true - setup_group_timeout_secs: 1800 # 30 minutes - setup_group: - - func: fetch-source - - func: prepare-resources - - func: fix-absolute-paths - - func: make-files-executable - - func: assume-ec2-role - - command: subprocess.exec - params: - binary: bash - env: - VAULT_NAME: ${VAULT_NAME} - args: - - ${DRIVERS_TOOLS}/.evergreen/serverless/create-instance.sh - - command: expansions.update - params: - file: serverless-expansion.yml - teardown_group: - - func: upload-test-results - - command: subprocess.exec - params: - binary: bash - args: - - ${DRIVERS_TOOLS}/.evergreen/serverless/delete-instance.sh - tasks: - - test-serverless - - name: validate-apicompat-task-group setup_group_can_fail_task: true setup_group_timeout_secs: 1800 # 30 minutes @@ -2150,8 +2290,44 @@ task_groups: - validate-apicompat buildvariants: +- name: unit-tests-windows + display_name: Unit Tests on Windows + run_on: windows-64-vs2017-test + expansions: + OS: "windows-64" + tasks: + - name: unit-tests-net472 + - name: unit-tests-netstandard21 + - name: unit-tests-net60 + +- name: unit-tests-ubuntu + display_name: Unit Tests on Ubuntu + run_on: ubuntu2004-small + expansions: + OS: "ubuntu-2004" + tasks: + - name: unit-tests-netstandard21 + - name: unit-tests-net60 + +- name: unit-tests-macos + display_name: Unit Tests on MacOs + run_on: macos-14 + expansions: + OS: "macos-14" + tasks: + - name: unit-tests-netstandard21 + - name: unit-tests-net60 + +- name: unit-tests-macos-arm + display_name: Unit Tests on MacOs Arm + run_on: macos-14-arm64 + expansions: + OS: "macos-14-arm64" + tasks: + - name: unit-tests-net60 + - matrix_name: stable-api-tests - matrix_spec: { version: ["5.0", "6.0", "7.0", "8.0", "rapid", "latest"], topology: "standalone", auth: "auth", ssl: "nossl", os: "windows-64" } + matrix_spec: { version: ["5.0", "6.0", "7.0", "8.0", "rapid", "latest"], topology: ["standalone", "sharded-cluster"], auth: "auth", ssl: "nossl", os: "windows-64" } display_name: "Stable API ${version} ${topology} ${auth} ${ssl} ${os}" run_on: - windows-64-vs2017-test @@ -2180,7 +2356,7 @@ buildvariants: - name: test-net60 - matrix_name: "secure-tests-linux-1804" - matrix_spec: { version: ["4.0", "4.2", "4.4", "5.0", "6.0"], topology: "*", auth: "auth", ssl: "ssl", os: "ubuntu-1804" } + matrix_spec: { version: ["4.2", "4.4", "5.0", "6.0"], topology: "*", auth: "auth", ssl: "ssl", os: "ubuntu-1804" } display_name: "${version} ${topology} ${auth} ${ssl} ${os}" tags: ["tests-variant"] tasks: @@ -2215,7 +2391,7 @@ buildvariants: - name: test-net60 - matrix_name: "unsecure-tests-linux-1804" - matrix_spec: { version: ["4.0", "4.2", "4.4", "5.0", "6.0"], topology: "*", auth: "noauth", ssl: "nossl", os: "ubuntu-1804" } + matrix_spec: { version: ["4.2", "4.4", "5.0", "6.0"], topology: "*", auth: "noauth", ssl: "nossl", os: "ubuntu-1804" } display_name: "${version} ${topology} ${auth} ${ssl} ${os}" tags: ["tests-variant"] tasks: @@ -2250,7 +2426,7 @@ buildvariants: - name: test-net60 - matrix_name: "tests-compression-linux-1804" - matrix_spec: { compressor: "*", auth: "noauth", ssl: "nossl", version: ["4.0", "4.2", "4.4", "5.0", "6.0"], topology: "standalone", os: "ubuntu-1804" } + matrix_spec: { compressor: "*", auth: "noauth", ssl: "nossl", version: ["4.2", "4.4", "5.0", "6.0"], topology: "standalone", os: "ubuntu-1804" } display_name: "${version} ${compressor} ${topology} ${auth} ${ssl} ${os} " tags: ["tests-variant"] tasks: @@ -2344,6 +2520,12 @@ buildvariants: - name: test-gssapi-netstandard21 - name: test-gssapi-net60 +- matrix_name: "x509-tests" + matrix_spec: { os: ["ubuntu-2004", "macos-14", "windows-64"], ssl: ["ssl"], version: ["latest"], topology: ["standalone"] } + display_name: "X509 tests ${version} ${os}" + tasks: + - name: x509-auth-tests + # Load balancer tests - matrix_name: load-balancer-tests matrix_spec: { version: ["5.0", "6.0", "7.0", "8.0", "rapid", "latest"], auth: "noauth", ssl: "nossl", topology: "sharded-cluster", os: "ubuntu-2004" } @@ -2359,21 +2541,6 @@ buildvariants: - name: "test-load-balancer-netstandard21" - name: "test-load-balancer-net60" -# Serverless tests -- matrix_name: serverless-tests-windows - matrix_spec: { auth: "auth", ssl: "ssl", compressor: "zlib", os: "windows-64", target_framework: ["net472", "netstandard21", "net60"], serverless: "*" } - display_name: "${serverless} ${compressor} ${auth} ${ssl} ${os} ${target_framework}" - batchtime: 10080 # 7 days - tasks: - - name: serverless-task-group - -- matrix_name: serverless-tests-ubuntu - matrix_spec: { auth: "auth", ssl: "ssl", compressor: "zlib", os: "ubuntu-2004", target_framework: ["netstandard21", "net60"], serverless: "*" } - display_name: "${serverless} ${compressor} ${auth} ${ssl} ${os} ${target_framework}" - batchtime: 10080 # 7 days - tasks: - - name: serverless-task-group - # Performance tests - name: driver-performance-tests display_name: "Driver Performance Tests" @@ -2398,13 +2565,6 @@ buildvariants: tasks: - name: atlas-connectivity-tests -- name: atlas-data-lake-test - display_name: "Atlas Data Lake Tests" - run_on: - - ubuntu2004-large - tasks: - - name: atlas-data-lake-test - - name: atlas-search-test display_name: "Atlas Search Tests" run_on: @@ -2419,6 +2579,29 @@ buildvariants: tasks: - name: atlas-search-index-helpers-task-group +# Socks5 Proxy tests +- matrix_name: "socks5-proxy-tests-linux" + matrix_spec: { os: "ubuntu-2004", ssl: ["nossl", "ssl"], version: ["latest"], topology: ["replicaset"] } + display_name: "Socks5 Proxy ${version} ${os} ${ssl}" + tasks: + - name: test-socks5-proxy-netstandard21 + - name: test-socks5-proxy-net60 + +- matrix_name: "socks5-proxy-tests-windows" + matrix_spec: { os: "windows-64", ssl: ["nossl", "ssl"], version: ["latest"], topology: ["replicaset"] } + display_name: "Socks5 Proxy ${version} ${os} ${ssl}" + tasks: + - name: test-socks5-proxy-net472 + - name: test-socks5-proxy-netstandard21 + - name: test-socks5-proxy-net60 + +- matrix_name: "socks5-proxy-tests-macos" + matrix_spec: { os: "macos-14", ssl: ["nossl", "ssl"], version: ["latest"], topology: ["replicaset"] } + display_name: "Socks5 Proxy ${version} ${os} ${ssl}" + tasks: + - name: test-socks5-proxy-netstandard21 + - name: test-socks5-proxy-net60 + # CSFLE tests - matrix_name: "csfle-with-mocked-kms-tests-windows" matrix_spec: { os: "windows-64", ssl: "nossl", version: ["4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "rapid", "latest"], topology: ["replicaset"] } diff --git a/evergreen/install-dotnet.sh b/evergreen/install-dotnet.sh index 230eb2ab5bd..2798a24c15f 100644 --- a/evergreen/install-dotnet.sh +++ b/evergreen/install-dotnet.sh @@ -4,14 +4,34 @@ set -o errexit # Exit the script with error if any of the commands fail DOTNET_SDK_PATH="${DOTNET_SDK_PATH:-./.dotnet}" DOTNET_SDK_VERSION="${DOTNET_SDK_VERSION:-8.0}" +echo "runtime: $FRAMEWORK" + +if [ -n "$FRAMEWORK" ]; then + if [ "$FRAMEWORK" = "net6.0" ]; then + RUNTIME_VERSION="6.0" + elif [ "$FRAMEWORK" = "net8.0" ]; then + RUNTIME_VERSION="8.0" + elif [ "$FRAMEWORK" = "netstandard2.1" ]; then + RUNTIME_VERSION="3.1" + fi +fi + if [[ $OS =~ [Ww]indows.* ]]; then echo "Downloading Windows .NET SDK installer into $DOTNET_SDK_PATH folder..." curl -Lfo ./dotnet-install.ps1 https://builds.dotnet.microsoft.com/dotnet/scripts/v1/dotnet-install.ps1 - echo "Installing .NET 8.0 SDK..." + echo "Installing .NET ${DOTNET_SDK_VERSION} SDK..." powershell.exe ./dotnet-install.ps1 -Channel "$DOTNET_SDK_VERSION" -InstallDir "$DOTNET_SDK_PATH" -NoPath + if [ -n "$RUNTIME_VERSION" ]; then + echo "Installing .NET ${RUNTIME_VERSION} runtime..." + powershell.exe ./dotnet-install.ps1 -Channel "$RUNTIME_VERSION" -Runtime dotnet -InstallDir "$DOTNET_SDK_PATH" -NoPath + fi else echo "Downloading .NET SDK installer into $DOTNET_SDK_PATH folder..." curl -Lfo ./dotnet-install.sh https://builds.dotnet.microsoft.com/dotnet/scripts/v1/dotnet-install.sh - echo "Installing .NET 8.0 SDK..." + echo "Installing .NET ${DOTNET_SDK_VERSION} SDK..." bash ./dotnet-install.sh --channel "$DOTNET_SDK_VERSION" --install-dir "$DOTNET_SDK_PATH" --no-path + if [ -n "$RUNTIME_VERSION" ]; then + echo "Installing .NET ${RUNTIME_VERSION} runtime..." + bash ./dotnet-install.sh --channel "$RUNTIME_VERSION" --runtime dotnet --install-dir "$DOTNET_SDK_PATH" --no-path + fi fi diff --git a/evergreen/release-notes.py b/evergreen/release-notes.py index 5d5d5367a10..ff388d8da5d 100644 --- a/evergreen/release-notes.py +++ b/evergreen/release-notes.py @@ -37,7 +37,7 @@ def load_config(opts): def mapPullRequest(pullRequest, opts): - title = pullRequest["title"] + title = pullRequest["title"].encode('ascii', 'backslashreplace').decode().replace("<", "\<") for regex in opts.template["autoformat"]: title = re.sub(regex["match"], regex["replace"], title) @@ -91,7 +91,9 @@ def load_pull_requests(opts): github_api_base_url=opts.github_api_base_url, repo=opts.repo, commit_sha=commit["sha"]) - pullrequests = requests.get(pullrequests_url, headers=opts.github_headers).json() + pullrequests_response = requests.get(pullrequests_url, headers=opts.github_headers) + pullrequests_response.raise_for_status() + pullrequests = pullrequests_response.json() for pullrequest in pullrequests: mapped = mapPullRequest(pullrequest, opts) if is_in_section(mapped, ignore_section): @@ -140,7 +142,6 @@ def publish_release_notes(opts, title, content): print("Publishing release notes...") url = '{github_api_base_url}{repo}/releases/tags/{tag}'.format(github_api_base_url=opts.github_api_base_url, repo=opts.repo, tag=opts.version_tag) response = requests.get(url, headers=opts.github_headers) - response.raise_for_status() if response.status_code != 404: raise SystemExit("Release with the tag already exists") diff --git a/evergreen/run-atlas-connectivity-tests.sh b/evergreen/run-atlas-connectivity-tests.sh index cc0fc5e842a..3ddcfc06643 100755 --- a/evergreen/run-atlas-connectivity-tests.sh +++ b/evergreen/run-atlas-connectivity-tests.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash -# DO NOT set xtrace set -o errexit # Exit the script with error if any of the commands fail ############################################ diff --git a/evergreen/run-atlas-data-lake-test.sh b/evergreen/run-atlas-data-lake-test.sh deleted file mode 100755 index 7aabe67e019..00000000000 --- a/evergreen/run-atlas-data-lake-test.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash - -set -o xtrace -set -o errexit # Exit the script with error if any of the commands fail - -############################################ -# Main Program # -############################################ - -echo "Running Atlas Data Lake driver tests" - -export MONGODB_URI="mongodb://mhuser:pencil@localhost" -export ATLAS_DATA_LAKE_TESTS_ENABLED=true - -./build.sh --target=TestAtlasDataLake diff --git a/evergreen/run-atlas-search-index-helpers-test.sh b/evergreen/run-atlas-search-index-helpers-test.sh index aacc74fb56e..24bb944bcc8 100644 --- a/evergreen/run-atlas-search-index-helpers-test.sh +++ b/evergreen/run-atlas-search-index-helpers-test.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash -set -o xtrace set -o errexit # Exit the script with error if any of the commands fail # Environment variables produced as output diff --git a/evergreen/run-atlas-search-test.sh b/evergreen/run-atlas-search-test.sh index 88b9b151a85..41bd7822ec3 100644 --- a/evergreen/run-atlas-search-test.sh +++ b/evergreen/run-atlas-search-test.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash -set -o xtrace set -o errexit # Exit the script with error if any of the commands fail # Environment variables produced as output diff --git a/evergreen/run-csfle-azure-tests.sh b/evergreen/run-csfle-azure-tests.sh index 52556db44b9..58614af1b56 100644 --- a/evergreen/run-csfle-azure-tests.sh +++ b/evergreen/run-csfle-azure-tests.sh @@ -1,6 +1,5 @@ #!/bin/bash -set -o xtrace set -o errexit # Exit the script with error if any of the commands fail # Environment variables used as input: diff --git a/evergreen/run-csfle-gcp-tests.sh b/evergreen/run-csfle-gcp-tests.sh index 7f4819ea4b9..6eb56d87e10 100644 --- a/evergreen/run-csfle-gcp-tests.sh +++ b/evergreen/run-csfle-gcp-tests.sh @@ -1,6 +1,5 @@ #!/bin/bash -set -o xtrace set -o errexit # Exit the script with error if any of the commands fail # Supported/used environment variables: diff --git a/evergreen/run-deployed-lambda-aws-tests.sh b/evergreen/run-deployed-lambda-aws-tests.sh index 5a573dbc5eb..75cb935cb7a 100755 --- a/evergreen/run-deployed-lambda-aws-tests.sh +++ b/evergreen/run-deployed-lambda-aws-tests.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash -set -o xtrace # Write all commands first to stderr set -o errexit # Exit the script with error if any of the commands fail # SAM CLI installs the "Amazon.Lambda.Tools" tool in this location so we need to add it to our PATH diff --git a/evergreen/run-external-script.sh b/evergreen/run-external-script.sh index 9f5aea52270..fe635425286 100644 --- a/evergreen/run-external-script.sh +++ b/evergreen/run-external-script.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash -set -o xtrace # Write all commands first to stderr set -o errexit # Exit the script with error if any of the commands fail # clone the repo diff --git a/evergreen/run-load-balancer-tests.sh b/evergreen/run-load-balancer-tests.sh index 446659eeb84..7935521eb86 100644 --- a/evergreen/run-load-balancer-tests.sh +++ b/evergreen/run-load-balancer-tests.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash -set -o xtrace # Write all commands first to stderr set -o errexit # Exit the script with error if any of the commands fail # Supported/used environment variables: diff --git a/evergreen/run-mongodb-aws-ecs-test.sh b/evergreen/run-mongodb-aws-ecs-test.sh index cbf6390c6eb..0c0aafb2b0d 100644 --- a/evergreen/run-mongodb-aws-ecs-test.sh +++ b/evergreen/run-mongodb-aws-ecs-test.sh @@ -25,8 +25,6 @@ if echo "$MONGODB_URI" | grep -q "@"; then echo "MONGODB_URI unexpectedly contains user credentials in ECS test!"; exit 1 fi -# Now we can safely enable xtrace -set -o xtrace export AWS_TESTS_ENABLED=true export AWS_ECS_ENABLED=true diff --git a/evergreen/run-mongodb-aws-test.sh b/evergreen/run-mongodb-aws-test.sh index b78af9c5fe9..c1c9beb32df 100755 --- a/evergreen/run-mongodb-aws-test.sh +++ b/evergreen/run-mongodb-aws-test.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash -set -o xtrace set -o errexit # Exit the script with error if any of the commands fail # Supported/used environment variables: diff --git a/evergreen/run-serverless-tests.sh b/evergreen/run-serverless-tests.sh deleted file mode 100644 index f9831386709..00000000000 --- a/evergreen/run-serverless-tests.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env bash - -# Don't trace since the URI contains a password that shouldn't show up in the logs -set -o errexit # Exit the script with error if any of the commands fail - -# Supported/used environment variables: -# AUTH Authentication flag, must be "auth" -# FRAMEWORK Used in build.cake "TestServerless" task, must be set -# OS Operating system, must be set -# SSL TLS connection flag, must be "ssl" -# CRYPT_SHARED_LIB_PATH The path to crypt_shared library -# Modified/exported environment variables: -# MONGODB_URI MONGODB_URI for single host with auth details and TLS and compressor parameters -# MONGODB_URI_WITH_MULTIPLE_MONGOSES MONGODB_URI with auth details and TLS and compressor parameters -# SERVERLESS Flag for the tests, since there's no other way to determine if running serverless - -############################################ -# Main Program # -############################################ - -echo "CRYPT_SHARED_LIB_PATH: ${CRYPT_SHARED_LIB_PATH}" - -if [[ "$AUTH" != "auth" ]]; then - echo "Serverless tests require AUTH to be enabled" - exit 1 -fi - -if [ -z "$FRAMEWORK" ]; then - echo "Serverless tests require FRAMEWORK to be configured" - exit 1 -fi - -if [[ "$SSL" != "ssl" ]]; then - echo "Serverless tests require SSL to be enabled" - exit 1 -fi - -if [ "$OS" = "Windows_NT" ]; then - for var in TMP TEMP NUGET_PACKAGES NUGET_HTTP_CACHE_PATH APPDATA; do - setx $var z:\\data\\tmp - export $var=z:\\data\\tmp - done -else - for var in TMP TEMP NUGET_PACKAGES NUGET_HTTP_CACHE_PATH APPDATA; do - export $var=/data/tmp; - done -fi - -source ${DRIVERS_TOOLS}/.evergreen/serverless/secrets-export.sh -if [ -f "$DRIVERS_TOOLS/.evergreen/csfle/secrets-export.sh" ]; then - source $DRIVERS_TOOLS/.evergreen/csfle/secrets-export.sh -fi - -# Assume "mongodb+srv" protocol -export MONGODB_URI="mongodb+srv://${SERVERLESS_ATLAS_USER}:${SERVERLESS_ATLAS_PASSWORD}@${SERVERLESS_URI:14}" -export SERVERLESS="true" - -if [ "Windows_NT" = "$OS" ]; then - powershell.exe .\\build.ps1 --target "TestServerless${FRAMEWORK}" -else - ./build.sh --target="TestServerless${FRAMEWORK}" -fi diff --git a/evergreen/run-tests.sh b/evergreen/run-tests.sh index 69a6da9577f..6ec9620ff3b 100755 --- a/evergreen/run-tests.sh +++ b/evergreen/run-tests.sh @@ -1,6 +1,5 @@ #!/usr/bin/env bash -set -o xtrace # Write all commands first to stderr set -o errexit # Exit the script with error if any of the commands fail # Environment variables used as input: diff --git a/evergreen/run-unit-tests.sh b/evergreen/run-unit-tests.sh new file mode 100644 index 00000000000..397d5bb6701 --- /dev/null +++ b/evergreen/run-unit-tests.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +set -o errexit # Exit the script with error if any of the commands fail + +FRAMEWORK=${FRAMEWORK:-net6.0} + +if [ "$FRAMEWORK" = "netstandard2.1" ]; then + FRAMEWORK="netcoreapp3.1" +fi + +dotnet build +dotnet test --no-build --filter "Category!=Integration" -f "$FRAMEWORK" --results-directory ./build/test-results --logger "junit;verbosity=detailed;LogFileName=TEST-{assembly}.xml;FailureBodyFormat=Verbose" --logger "console;verbosity=detailed" diff --git a/purls.txt b/purls.txt index da8e9f8aa14..9fd699321e8 100644 --- a/purls.txt +++ b/purls.txt @@ -1 +1 @@ -pkg:github/mongodb/libmongocrypt@1.13.0 +pkg:github/mongodb/libmongocrypt@1.15.1 diff --git a/sbom.json b/sbom.json index c0cab4adeb1..ad8e295c265 100644 --- a/sbom.json +++ b/sbom.json @@ -1,31 +1,31 @@ { "components": [ { - "bom-ref": "pkg:github/mongodb/libmongocrypt@1.13.0", + "bom-ref": "pkg:github/mongodb/libmongocrypt@1.15.1", "externalReferences": [ { "type": "distribution", - "url": "https://github.com/mongodb/libmongocrypt/archive/refs/tags/1.13.0.tar.gz" + "url": "https://github.com/mongodb/libmongocrypt/archive/1.15.1.tar.gz" }, { "type": "website", - "url": "https://github.com/mongodb/libmongocrypt/tree/1.13.0" + "url": "https://github.com/mongodb/libmongocrypt/tree/1.15.1" } ], "group": "mongodb", "name": "libmongocrypt", - "purl": "pkg:github/mongodb/libmongocrypt@1.13.0", + "purl": "pkg:github/mongodb/libmongocrypt@1.15.1", "type": "library", - "version": "1.13.0" + "version": "1.15.1" } ], "dependencies": [ { - "ref": "pkg:github/mongodb/libmongocrypt@1.13.0" + "ref": "pkg:github/mongodb/libmongocrypt@1.15.1" } ], "metadata": { - "timestamp": "2025-02-27T17:40:47.367492+00:00", + "timestamp": "2025-08-27T19:31:06.459222+00:00", "tools": [ { "externalReferences": [ @@ -68,7 +68,7 @@ } ] }, - "serialNumber": "urn:uuid:03b0f88b-8efc-4e12-9853-00b138b64e67", + "serialNumber": "urn:uuid:a24dfa6a-26a8-44d8-94bd-a2488e01185b", "version": 1, "$schema": "http://cyclonedx.org/schema/bom-1.5.schema.json", "bomFormat": "CycloneDX", diff --git a/specifications/atlas-data-lake-testing/tests/README.md b/specifications/atlas-data-lake-testing/tests/README.md deleted file mode 100644 index 6653fc66a93..00000000000 --- a/specifications/atlas-data-lake-testing/tests/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# Atlas Data Lake Tests - -## Introduction - -The YAML and JSON files in this directory are platform-independent tests that drivers can use to assert compatibility -with [Atlas Data Lake](https://www.mongodb.com/docs/datalake/). These tests utilize the -[Unified Test Format](../../unified-test-format/unified-test-format.md). - -Several prose tests, which are not easily expressed in YAML, are also presented in this file. Those tests will need to -be manually implemented by each driver. - -## Test Considerations - -Running these integration tests will require a running `mongohoused` with data available in its `test.driverdata` -collection. See the -[ADL directory in drivers-evergreen-tools](https://github.com/mongodb-labs/drivers-evergreen-tools/tree/master/.evergreen/atlas_data_lake) -and [10gen/mongohouse README](https://github.com/10gen/mongohouse/blob/master/README.md) for more information. - -The test runner for Atlas Data Lake testing MUST NOT drop the collection and/or database under test. In contrast to most -other tests, which insert their own data fixtures into an empty collection, the data for these tests is specified in the -`mongohoused` configuration file. - -Additionally, the test runner MUST NOT execute `killAllSessions` (see: -[Terminating Open Transactions](../../unified-test-format/unified-test-format.md#terminating-open-transactions)) when -connected to Atlas Data Lake. - -## Prose Tests - -The following tests MUST be implemented to fully test compatibility with Atlas Data Lake. - -### 1. Support for `killCursors` command - -Test that the driver properly constructs and issues a -[killCursors](https://www.mongodb.com/docs/manual/reference/command/killCursors/) command to Atlas Data Lake. For this -test, configure an APM listener on a client and execute a query on the `test.driverdata` collection that will leave a -cursor open on the server (e.g. specify `batchSize=2` for a query that would match 3+ documents). Drivers MAY iterate -the cursor if necessary to execute the initial `find` command but MUST NOT iterate further to avoid executing a -`getMore`. - -Observe the CommandSucceededEvent event for the `find` command and extract the cursor's ID and namespace from the -response document's `cursor.id` and `cursor.ns` fields, respectively. Destroy the cursor object and observe a -CommandStartedEvent and CommandSucceededEvent for the `killCursors` command. Assert that the cursor ID and target -namespace in the outgoing command match the values from the `find` command's CommandSucceededEvent. When matching the -namespace, note that the `killCursors` field will contain the collection name and the database may be inferred from -either the `$db` field or accessed via the CommandStartedEvent directly. Finally, assert that the `killCursors` -CommandSucceededEvent indicates that the expected cursor was killed in the `cursorsKilled` field. - -Note: this test assumes that drivers only issue a `killCursors` command internally when destroying a cursor that may -still exist on the server. If a driver constructs and issues `killCursors` commands in other ways (e.g. public API), -this test MUST be adapted to test all such code paths. - -### 2. Connect without authentication - -Test that the driver can establish a connection with Atlas Data Lake without authentication. For these tests, create a -MongoClient using a valid connection string without auth credentials and execute a ping command. - -### 3. Connect with authentication - -Test that the driver can establish a connection with Atlas Data Lake with authentication. For these tests, create a -MongoClient using a valid connection string with SCRAM-SHA-1 and credentials from the drivers-evergreen-tools ADL -configuration and execute a ping command. Repeat this test using SCRAM-SHA-256. - -## Changelog - -- 2024-03-08: Convert legacy ADL tests to unified format. Convert test README from reStructuredText to Markdown. - -- 2022-10-05: Add spec front matter - -- 2020-07-15: Link to CRUD test runner implementation and note that the collection under test must not be dropped before - each test. diff --git a/specifications/atlas-data-lake-testing/tests/unified/aggregate.json b/specifications/atlas-data-lake-testing/tests/unified/aggregate.json deleted file mode 100644 index 68a3467c71c..00000000000 --- a/specifications/atlas-data-lake-testing/tests/unified/aggregate.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "description": "aggregate", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "test" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "driverdata" - } - } - ], - "tests": [ - { - "description": "Aggregate with pipeline (project, sort, limit)", - "operations": [ - { - "object": "collection0", - "name": "aggregate", - "arguments": { - "pipeline": [ - { - "$project": { - "_id": 0 - } - }, - { - "$sort": { - "a": 1 - } - }, - { - "$limit": 2 - } - ] - }, - "expectResult": [ - { - "a": 1, - "b": 2, - "c": 3 - }, - { - "a": 2, - "b": 3, - "c": 4 - } - ] - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "aggregate": "driverdata" - }, - "commandName": "aggregate", - "databaseName": "test" - } - } - ] - } - ] - } - ] -} diff --git a/specifications/atlas-data-lake-testing/tests/unified/aggregate.yml b/specifications/atlas-data-lake-testing/tests/unified/aggregate.yml deleted file mode 100644 index 54bad5aeaae..00000000000 --- a/specifications/atlas-data-lake-testing/tests/unified/aggregate.yml +++ /dev/null @@ -1,38 +0,0 @@ -description: "aggregate" - -schemaVersion: "1.0" - -createEntities: - - client: - id: &client0 client0 - observeEvents: [ commandStartedEvent ] - - database: - id: &database0 database0 - client: *client0 - databaseName: &database0Name test - - collection: - id: &collection0 collection0 - database: *database0 - collectionName: &collection0Name driverdata - -tests: - - description: "Aggregate with pipeline (project, sort, limit)" - operations: - - object: *collection0 - name: aggregate - arguments: - pipeline: - - $project: { _id: 0 } - - $sort: { a: 1 } - - $limit: 2 - expectResult: - - { a: 1, b: 2, c: 3 } - - { a: 2, b: 3, c: 4 } - expectEvents: - - client: *client0 - events: - - commandStartedEvent: - command: - aggregate: *collection0Name - commandName: aggregate - databaseName: *database0Name diff --git a/specifications/atlas-data-lake-testing/tests/unified/estimatedDocumentCount.json b/specifications/atlas-data-lake-testing/tests/unified/estimatedDocumentCount.json deleted file mode 100644 index b7515a44182..00000000000 --- a/specifications/atlas-data-lake-testing/tests/unified/estimatedDocumentCount.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "description": "estimatedDocumentCount", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "test" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "driverdata" - } - } - ], - "tests": [ - { - "description": "estimatedDocumentCount succeeds", - "operations": [ - { - "object": "collection0", - "name": "estimatedDocumentCount", - "expectResult": 15 - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "count": "driverdata" - }, - "commandName": "count", - "databaseName": "test" - } - } - ] - } - ] - } - ] -} diff --git a/specifications/atlas-data-lake-testing/tests/unified/estimatedDocumentCount.yml b/specifications/atlas-data-lake-testing/tests/unified/estimatedDocumentCount.yml deleted file mode 100644 index fe3c353879c..00000000000 --- a/specifications/atlas-data-lake-testing/tests/unified/estimatedDocumentCount.yml +++ /dev/null @@ -1,31 +0,0 @@ -description: "estimatedDocumentCount" - -schemaVersion: "1.0" - -createEntities: - - client: - id: &client0 client0 - observeEvents: [ commandStartedEvent ] - - database: - id: &database0 database0 - client: *client0 - databaseName: &database0Name test - - collection: - id: &collection0 collection0 - database: *database0 - collectionName: &collection0Name driverdata - -tests: - - description: "estimatedDocumentCount succeeds" - operations: - - object: *collection0 - name: estimatedDocumentCount - expectResult: 15 - expectEvents: - - client: *client0 - events: - - commandStartedEvent: - command: - count: *collection0Name - commandName: count - databaseName: *database0Name diff --git a/specifications/atlas-data-lake-testing/tests/unified/find.json b/specifications/atlas-data-lake-testing/tests/unified/find.json deleted file mode 100644 index d0652dc720f..00000000000 --- a/specifications/atlas-data-lake-testing/tests/unified/find.json +++ /dev/null @@ -1,96 +0,0 @@ -{ - "description": "find", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "test" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "driverdata" - } - } - ], - "tests": [ - { - "description": "Find with projection and sort", - "operations": [ - { - "object": "collection0", - "name": "find", - "arguments": { - "filter": { - "b": { - "$gt": 5 - } - }, - "projection": { - "_id": 0 - }, - "sort": { - "a": 1 - }, - "limit": 5 - }, - "expectResult": [ - { - "a": 5, - "b": 6, - "c": 7 - }, - { - "a": 6, - "b": 7, - "c": 8 - }, - { - "a": 7, - "b": 8, - "c": 9 - }, - { - "a": 8, - "b": 9, - "c": 10 - }, - { - "a": 9, - "b": 10, - "c": 11 - } - ] - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "find": "driverdata" - }, - "commandName": "find", - "databaseName": "test" - } - } - ] - } - ] - } - ] -} diff --git a/specifications/atlas-data-lake-testing/tests/unified/find.yml b/specifications/atlas-data-lake-testing/tests/unified/find.yml deleted file mode 100644 index 10384f78f4c..00000000000 --- a/specifications/atlas-data-lake-testing/tests/unified/find.yml +++ /dev/null @@ -1,41 +0,0 @@ -description: "find" - -schemaVersion: "1.0" - -createEntities: - - client: - id: &client0 client0 - observeEvents: [ commandStartedEvent ] - - database: - id: &database0 database0 - client: *client0 - databaseName: &database0Name test - - collection: - id: &collection0 collection0 - database: *database0 - collectionName: &collection0Name driverdata - -tests: - - description: "Find with projection and sort" - operations: - - object: *collection0 - name: find - arguments: - filter: { b: { $gt: 5 } } - projection: { _id: 0 } - sort: { a: 1 } - limit: 5 - expectResult: - - { a: 5, b: 6, c: 7 } - - { a: 6, b: 7, c: 8 } - - { a: 7, b: 8, c: 9 } - - { a: 8, b: 9, c: 10 } - - { a: 9, b: 10, c: 11 } - expectEvents: - - client: *client0 - events: - - commandStartedEvent: - command: - find: *collection0Name - commandName: find - databaseName: *database0Name diff --git a/specifications/atlas-data-lake-testing/tests/unified/getMore.json b/specifications/atlas-data-lake-testing/tests/unified/getMore.json deleted file mode 100644 index 109b6d3d8e7..00000000000 --- a/specifications/atlas-data-lake-testing/tests/unified/getMore.json +++ /dev/null @@ -1,95 +0,0 @@ -{ - "description": "getMore", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "test" - } - }, - { - "collection": { - "id": "collection0", - "database": "database0", - "collectionName": "driverdata" - } - } - ], - "tests": [ - { - "description": "A successful find event with getMore", - "operations": [ - { - "object": "collection0", - "name": "find", - "arguments": { - "filter": { - "a": { - "$gte": 2 - } - }, - "sort": { - "a": 1 - }, - "batchSize": 3, - "limit": 4 - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "find": "driverdata", - "filter": { - "a": { - "$gte": 2 - } - }, - "sort": { - "a": 1 - }, - "batchSize": 3, - "limit": 4 - }, - "commandName": "find", - "databaseName": "test" - } - }, - { - "commandStartedEvent": { - "command": { - "getMore": { - "$$type": [ - "int", - "long" - ] - }, - "collection": { - "$$type": "string" - }, - "batchSize": 1 - }, - "commandName": "getMore", - "databaseName": "cursors" - } - } - ] - } - ] - } - ] -} diff --git a/specifications/atlas-data-lake-testing/tests/unified/getMore.yml b/specifications/atlas-data-lake-testing/tests/unified/getMore.yml deleted file mode 100644 index f02fbfe9a18..00000000000 --- a/specifications/atlas-data-lake-testing/tests/unified/getMore.yml +++ /dev/null @@ -1,48 +0,0 @@ -description: "getMore" - -schemaVersion: "1.0" - -createEntities: - - client: - id: &client0 client0 - observeEvents: [ commandStartedEvent ] - - database: - id: &database0 database0 - client: *client0 - databaseName: &database0Name test - - collection: - id: &collection0 collection0 - database: *database0 - collectionName: &collection0Name driverdata - -tests: - - description: "A successful find event with getMore" - operations: - - object: *collection0 - name: find - arguments: - filter: { a: { $gte: 2 } } - sort: { a: 1 } - batchSize: 3 - limit: 4 - expectEvents: - - client: *client0 - events: - - commandStartedEvent: - command: - find: *collection0Name - filter: { a: { $gte : 2 } } - sort: { a: 1 } - batchSize: 3 - limit: 4 - commandName: find - databaseName: *database0Name - - commandStartedEvent: - command: - getMore: { $$type: [ int, long ] } - # collection name will be an internal identifier - collection: { $$type: string } - batchSize: 1 - commandName: getMore - # mongohoused always expects getMores on the "cursors" database - databaseName: cursors diff --git a/specifications/atlas-data-lake-testing/tests/unified/listCollections.json b/specifications/atlas-data-lake-testing/tests/unified/listCollections.json deleted file mode 100644 index 642e7ed328a..00000000000 --- a/specifications/atlas-data-lake-testing/tests/unified/listCollections.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "description": "listCollections", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "test" - } - } - ], - "tests": [ - { - "description": "ListCollections succeeds", - "operations": [ - { - "object": "database0", - "name": "listCollections" - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "listCollections": 1 - }, - "commandName": "listCollections", - "databaseName": "test" - } - } - ] - } - ] - } - ] -} diff --git a/specifications/atlas-data-lake-testing/tests/unified/listCollections.yml b/specifications/atlas-data-lake-testing/tests/unified/listCollections.yml deleted file mode 100644 index 2b9cd1cc94f..00000000000 --- a/specifications/atlas-data-lake-testing/tests/unified/listCollections.yml +++ /dev/null @@ -1,26 +0,0 @@ -description: "listCollections" - -schemaVersion: "1.0" - -createEntities: - - client: - id: &client0 client0 - observeEvents: [ commandStartedEvent ] - - database: - id: &database0 database0 - client: *client0 - databaseName: &database0Name test - -tests: - - description: "ListCollections succeeds" - operations: - - object: *database0 - name: listCollections - expectEvents: - - client: *client0 - events: - - commandStartedEvent: - command: - listCollections: 1 - commandName: listCollections - databaseName: *database0Name diff --git a/specifications/atlas-data-lake-testing/tests/unified/listDatabases.json b/specifications/atlas-data-lake-testing/tests/unified/listDatabases.json deleted file mode 100644 index 64506ee54e4..00000000000 --- a/specifications/atlas-data-lake-testing/tests/unified/listDatabases.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "description": "listDatabases", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - } - ], - "tests": [ - { - "description": "ListCollections succeeds", - "operations": [ - { - "object": "client0", - "name": "listDatabases" - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "listDatabases": 1 - }, - "commandName": "listDatabases", - "databaseName": "admin" - } - } - ] - } - ] - } - ] -} diff --git a/specifications/atlas-data-lake-testing/tests/unified/listDatabases.yml b/specifications/atlas-data-lake-testing/tests/unified/listDatabases.yml deleted file mode 100644 index cb70ee6c73f..00000000000 --- a/specifications/atlas-data-lake-testing/tests/unified/listDatabases.yml +++ /dev/null @@ -1,22 +0,0 @@ -description: "listDatabases" - -schemaVersion: "1.0" - -createEntities: - - client: - id: &client0 client0 - observeEvents: [ commandStartedEvent ] - -tests: - - description: "ListCollections succeeds" - operations: - - object: *client0 - name: listDatabases - expectEvents: - - client: *client0 - events: - - commandStartedEvent: - command: - listDatabases: 1 - commandName: listDatabases - databaseName: admin diff --git a/specifications/atlas-data-lake-testing/tests/unified/runCommand.json b/specifications/atlas-data-lake-testing/tests/unified/runCommand.json deleted file mode 100644 index 325b6b3f30a..00000000000 --- a/specifications/atlas-data-lake-testing/tests/unified/runCommand.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "description": "runCommand", - "schemaVersion": "1.0", - "createEntities": [ - { - "client": { - "id": "client0", - "observeEvents": [ - "commandStartedEvent" - ] - } - }, - { - "database": { - "id": "database0", - "client": "client0", - "databaseName": "test" - } - } - ], - "tests": [ - { - "description": "ping succeeds using runCommand", - "operations": [ - { - "object": "database0", - "name": "runCommand", - "arguments": { - "command": { - "ping": 1 - }, - "commandName": "ping" - } - } - ], - "expectEvents": [ - { - "client": "client0", - "events": [ - { - "commandStartedEvent": { - "command": { - "ping": 1 - }, - "commandName": "ping", - "databaseName": "test" - } - } - ] - } - ] - } - ] -} diff --git a/specifications/atlas-data-lake-testing/tests/unified/runCommand.yml b/specifications/atlas-data-lake-testing/tests/unified/runCommand.yml deleted file mode 100644 index 8ab0d7b478d..00000000000 --- a/specifications/atlas-data-lake-testing/tests/unified/runCommand.yml +++ /dev/null @@ -1,29 +0,0 @@ -description: "runCommand" - -schemaVersion: "1.0" - -createEntities: - - client: - id: &client0 client0 - observeEvents: [ commandStartedEvent ] - - database: - id: &database0 database0 - client: *client0 - databaseName: &database0Name test - -tests: - - description: "ping succeeds using runCommand" - operations: - - object: *database0 - name: runCommand - arguments: - command: { ping: 1 } - commandName: ping - expectEvents: - - client: *client0 - events: - - commandStartedEvent: - command: - ping: 1 - commandName: ping - databaseName: *database0Name diff --git a/specifications/client-side-encryption/prose-tests/etc/data/encryptedFields-prefix-suffix.json b/specifications/client-side-encryption/prose-tests/etc/data/encryptedFields-prefix-suffix.json new file mode 100644 index 00000000000..ec4489fa09b --- /dev/null +++ b/specifications/client-side-encryption/prose-tests/etc/data/encryptedFields-prefix-suffix.json @@ -0,0 +1,38 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "prefixPreview", + "strMinQueryLength": { + "$numberInt": "2" + }, + "strMaxQueryLength": { + "$numberInt": "10" + }, + "caseSensitive": true, + "diacriticSensitive": true + }, + { + "queryType": "suffixPreview", + "strMinQueryLength": { + "$numberInt": "2" + }, + "strMaxQueryLength": { + "$numberInt": "10" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] +} diff --git a/specifications/client-side-encryption/prose-tests/etc/data/encryptedFields-substring.json b/specifications/client-side-encryption/prose-tests/etc/data/encryptedFields-substring.json new file mode 100644 index 00000000000..ee22def77b8 --- /dev/null +++ b/specifications/client-side-encryption/prose-tests/etc/data/encryptedFields-substring.json @@ -0,0 +1,30 @@ +{ + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "substringPreview", + "strMaxLength": { + "$numberInt": "10" + }, + "strMinQueryLength": { + "$numberInt": "2" + }, + "strMaxQueryLength": { + "$numberInt": "10" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] +} diff --git a/specifications/client-side-encryption/tests/unified/QE-Text-cleanupStructuredEncryptionData.json b/specifications/client-side-encryption/tests/unified/QE-Text-cleanupStructuredEncryptionData.json new file mode 100644 index 00000000000..24f33ab3ecb --- /dev/null +++ b/specifications/client-side-encryption/tests/unified/QE-Text-cleanupStructuredEncryptionData.json @@ -0,0 +1,219 @@ +{ + "description": "QE-Text-cleanupStructuredEncryptionData", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "suffixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "QE Text cleanupStructuredEncryptionData works", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "cleanupStructuredEncryptionData": "coll" + }, + "commandName": "cleanupStructuredEncryptionData" + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "cleanupStructuredEncryptionData": "coll", + "cleanupTokens": { + "encryptedText": { + "ecoc": { + "$binary": { + "base64": "SWO8WEoZ2r2Kx/muQKb7+COizy85nIIUFiHh4K9kcvA=", + "subType": "00" + } + }, + "anchorPaddingToken": { + "$binary": { + "base64": "YAiF7Iwhqq1UyfxPvm70xfQJtrIRPrjfD2yRLG1+saQ=", + "subType": "00" + } + } + } + } + }, + "commandName": "cleanupStructuredEncryptionData" + } + } + ] + } + ] + } + ] +} diff --git a/specifications/client-side-encryption/tests/unified/QE-Text-cleanupStructuredEncryptionData.yml b/specifications/client-side-encryption/tests/unified/QE-Text-cleanupStructuredEncryptionData.yml new file mode 100644 index 00000000000..a326cca63db --- /dev/null +++ b/specifications/client-side-encryption/tests/unified/QE-Text-cleanupStructuredEncryptionData.yml @@ -0,0 +1,128 @@ +description: QE-Text-cleanupStructuredEncryptionData +schemaVersion: "1.25" +runOnRequirements: + - minServerVersion: "8.2.0" # Server 8.2.0 adds preview support for QE text queries. + topologies: ["replicaset", "sharded", "load-balanced"] # QE does not support standalone. + csfle: + minLibmongocryptVersion: 1.15.0 # For SPM-4158. +createEntities: + - client: + id: &client "client" + autoEncryptOpts: + keyVaultNamespace: keyvault.datakeys + kmsProviders: + local: + key: Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk + observeEvents: + - commandStartedEvent + - database: + id: &db "db" + client: *client + databaseName: *db + - collection: + id: &coll "coll" + database: *db + collectionName: *coll +initialData: + # Insert data encryption key: + - databaseName: keyvault + collectionName: datakeys + documents: + [ + { + "_id": &keyid { "$binary": { "base64": "q83vqxI0mHYSNBI0VniQEg==", "subType": "04" } }, + "keyMaterial": + { + "$binary": + { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00", + }, + }, + "creationDate": { "$date": { "$numberLong": "1648914851981" } }, + "updateDate": { "$date": { "$numberLong": "1648914851981" } }, + "status": { "$numberInt": "0" }, + "masterKey": { "provider": "local" }, + }, + ] + # Create encrypted collection: + - databaseName: *db + collectionName: *coll + documents: [] + createOptions: + encryptedFields: + { + "fields": + [ + { + "keyId": *keyid, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "suffixPreview", + "contention": { "$numberLong": "0" }, + "strMinQueryLength": { "$numberLong": "3" }, + "strMaxQueryLength": { "$numberLong": "30" }, + "caseSensitive": true, + "diacriticSensitive": true, + }, + ], + }, + ], + } +tests: + - description: "QE Text cleanupStructuredEncryptionData works" + operations: + - name: runCommand + object: *db + arguments: + command: + cleanupStructuredEncryptionData: *coll + commandName: cleanupStructuredEncryptionData + expectResult: { ok: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + listCollections: 1 + filter: + name: *coll + commandName: listCollections + - commandStartedEvent: + command: + find: datakeys + filter: + { + "$or": + [ + "_id": { "$in": [ *keyid ] }, + "keyAltNames": { "$in": [] }, + ], + } + $db: keyvault + readConcern: { level: "majority" } + commandName: find + - commandStartedEvent: + command: + { + "cleanupStructuredEncryptionData": *coll, + "cleanupTokens": { + "encryptedText": { + "ecoc": { + "$binary": { + "base64": "SWO8WEoZ2r2Kx/muQKb7+COizy85nIIUFiHh4K9kcvA=", + "subType": "00" + } + }, + "anchorPaddingToken": { + "$binary": { + "base64": "YAiF7Iwhqq1UyfxPvm70xfQJtrIRPrjfD2yRLG1+saQ=", + "subType": "00" + } + } + } + } + } + commandName: cleanupStructuredEncryptionData diff --git a/specifications/client-side-encryption/tests/unified/QE-Text-compactStructuredEncryptionData.json b/specifications/client-side-encryption/tests/unified/QE-Text-compactStructuredEncryptionData.json new file mode 100644 index 00000000000..c7abfe2d4bc --- /dev/null +++ b/specifications/client-side-encryption/tests/unified/QE-Text-compactStructuredEncryptionData.json @@ -0,0 +1,261 @@ +{ + "description": "QE-Text-compactStructuredEncryptionData", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "suffixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "QE Text compactStructuredEncryptionData works", + "operations": [ + { + "name": "runCommand", + "object": "db", + "arguments": { + "command": { + "compactStructuredEncryptionData": "coll" + }, + "commandName": "compactStructuredEncryptionData" + }, + "expectResult": { + "ok": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "compactStructuredEncryptionData": "coll", + "encryptionInformation": { + "type": { + "$numberInt": "1" + }, + "schema": { + "db.coll": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "suffixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ], + "strEncodeVersion": { + "$numberInt": "1" + }, + "escCollection": "enxcol_.coll.esc", + "ecocCollection": "enxcol_.coll.ecoc" + } + } + }, + "compactionTokens": { + "encryptedText": { + "ecoc": { + "$binary": { + "base64": "SWO8WEoZ2r2Kx/muQKb7+COizy85nIIUFiHh4K9kcvA=", + "subType": "00" + } + }, + "anchorPaddingToken": { + "$binary": { + "base64": "YAiF7Iwhqq1UyfxPvm70xfQJtrIRPrjfD2yRLG1+saQ=", + "subType": "00" + } + } + } + } + }, + "commandName": "compactStructuredEncryptionData" + } + } + ] + } + ] + } + ] +} diff --git a/specifications/client-side-encryption/tests/unified/QE-Text-compactStructuredEncryptionData.yml b/specifications/client-side-encryption/tests/unified/QE-Text-compactStructuredEncryptionData.yml new file mode 100644 index 00000000000..994e6209eab --- /dev/null +++ b/specifications/client-side-encryption/tests/unified/QE-Text-compactStructuredEncryptionData.yml @@ -0,0 +1,137 @@ +description: QE-Text-compactStructuredEncryptionData +schemaVersion: "1.25" +runOnRequirements: + - minServerVersion: "8.2.0" # Server 8.2.0 adds preview support for QE text queries. + topologies: ["replicaset", "sharded", "load-balanced"] # QE does not support standalone. + csfle: + minLibmongocryptVersion: 1.15.0 # For SPM-4158. +createEntities: + - client: + id: &client "client" + autoEncryptOpts: + keyVaultNamespace: keyvault.datakeys + kmsProviders: + local: + key: Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk + observeEvents: + - commandStartedEvent + - database: + id: &db "db" + client: *client + databaseName: *db + - collection: + id: &coll "coll" + database: *db + collectionName: *coll +initialData: + # Insert data encryption key: + - databaseName: keyvault + collectionName: datakeys + documents: + [ + { + "_id": &keyid { "$binary": { "base64": "q83vqxI0mHYSNBI0VniQEg==", "subType": "04" } }, + "keyMaterial": + { + "$binary": + { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00", + }, + }, + "creationDate": { "$date": { "$numberLong": "1648914851981" } }, + "updateDate": { "$date": { "$numberLong": "1648914851981" } }, + "status": { "$numberInt": "0" }, + "masterKey": { "provider": "local" }, + }, + ] + # Create encrypted collection: + - databaseName: *db + collectionName: *coll + documents: [] + createOptions: + encryptedFields: &encryptedFields + { + "fields": + [ + { + "keyId": *keyid, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "suffixPreview", + "contention": { "$numberLong": "0" }, + "strMinQueryLength": { "$numberLong": "3" }, + "strMaxQueryLength": { "$numberLong": "30" }, + "caseSensitive": true, + "diacriticSensitive": true, + }, + ], + }, + ], + } +tests: + - description: "QE Text compactStructuredEncryptionData works" + operations: + - name: runCommand + object: *db + arguments: + command: + compactStructuredEncryptionData: *coll + commandName: compactStructuredEncryptionData + expectResult: { ok: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + command: + listCollections: 1 + filter: + name: *coll + commandName: listCollections + - commandStartedEvent: + command: + find: datakeys + filter: + { + "$or": + [ + "_id": { "$in": [ *keyid ] }, + "keyAltNames": { "$in": [] }, + ], + } + $db: keyvault + readConcern: { level: "majority" } + commandName: find + - commandStartedEvent: + command: + compactStructuredEncryptionData: *coll + encryptionInformation: + type: { "$numberInt": "1" } + schema: + db.coll: + <<: *encryptedFields + # libmongocrypt applies strEncodeVersion, escCollection, and ecocCollection: + strEncodeVersion: { "$numberInt": "1" } + escCollection: "enxcol_.coll.esc" + ecocCollection: "enxcol_.coll.ecoc" + compactionTokens: + encryptedText: + ecoc: + { + "$binary": + { + "base64": "SWO8WEoZ2r2Kx/muQKb7+COizy85nIIUFiHh4K9kcvA=", + "subType": "00", + }, + } + anchorPaddingToken: + { + "$binary": + { + "base64": "YAiF7Iwhqq1UyfxPvm70xfQJtrIRPrjfD2yRLG1+saQ=", + "subType": "00", + }, + } + commandName: compactStructuredEncryptionData diff --git a/specifications/client-side-encryption/tests/unified/QE-Text-prefixPreview.json b/specifications/client-side-encryption/tests/unified/QE-Text-prefixPreview.json new file mode 100644 index 00000000000..7279385743f --- /dev/null +++ b/specifications/client-side-encryption/tests/unified/QE-Text-prefixPreview.json @@ -0,0 +1,338 @@ +{ + "description": "QE-Text-prefixPreview", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "prefixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "Insert QE prefixPreview", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 1, + "encryptedText": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Query with matching $encStrStartsWith", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrStartsWith": { + "input": "$encryptedText", + "prefix": "foo" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": { + "$numberInt": "1" + }, + "encryptedText": "foobar", + "__safeContent__": [ + { + "$binary": { + "base64": "wpaMBVDjL4bHf9EtSP52PJFzyNn1R19+iNI/hWtvzdk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "fmUMXTMV/XRiN0IL3VXxSEn6SQG9E6Po30kJKB8JJlQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vZIDMiFDgjmLNYVrrbnq1zT4hg7sGpe/PMtighSsnRc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "26Z5G+sHTzV3D7F8Y0m08389USZ2afinyFV3ez9UEBQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "q/JEq8of7bE0QE5Id0XuOsNQ4qVpANYymcPQDUL2Ywk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Uvvv46LkfbgLoPqZ6xTBzpgoYRTM6FUgRdqZ9eaVojI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "nMxdq2lladuBJA3lv3JC2MumIUtRJBNJVLp3PVE6nQk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hS3V0qq5CF/SkTl3ZWWWgXcAJ8G5yGtkY2RwcHNc5Oc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "McgwYUxfKj5+4D0vskZymy4KA82s71MR25iV/Enutww=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Ciqdk1b+t+Vrr6oIlFFk0Zdym5BPmwN3glQ0/VcsVdM=", + "subType": "00" + } + } + ] + } + ] + } + ] + }, + { + "description": "Query with non-matching $encStrStartsWith", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrStartsWith": { + "input": "$encryptedText", + "prefix": "bar" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + } + ] +} diff --git a/specifications/client-side-encryption/tests/unified/QE-Text-prefixPreview.yml b/specifications/client-side-encryption/tests/unified/QE-Text-prefixPreview.yml new file mode 100644 index 00000000000..6f228e2d708 --- /dev/null +++ b/specifications/client-side-encryption/tests/unified/QE-Text-prefixPreview.yml @@ -0,0 +1,225 @@ +description: QE-Text-prefixPreview +schemaVersion: "1.25" +runOnRequirements: + - minServerVersion: "8.2.0" # Server 8.2.0 adds preview support for QE text queries. + topologies: ["replicaset", "sharded", "load-balanced"] # QE does not support standalone. + csfle: + minLibmongocryptVersion: 1.15.0 # For SPM-4158. +createEntities: + - client: + id: &client "client" + autoEncryptOpts: + keyVaultNamespace: keyvault.datakeys + kmsProviders: + local: + key: Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk + observeEvents: + - commandStartedEvent + - database: + id: &db "db" + client: *client + databaseName: *db + - collection: + id: &coll "coll" + database: *db + collectionName: *coll +initialData: + # Insert data encryption key: + - databaseName: keyvault + collectionName: datakeys + documents: + [ + { + "_id": &keyid { "$binary": { "base64": "q83vqxI0mHYSNBI0VniQEg==", "subType": "04" } }, + "keyMaterial": + { + "$binary": + { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00", + }, + }, + "creationDate": { "$date": { "$numberLong": "1648914851981" } }, + "updateDate": { "$date": { "$numberLong": "1648914851981" } }, + "status": { "$numberInt": "0" }, + "masterKey": { "provider": "local" }, + }, + ] + # Create encrypted collection: + - databaseName: *db + collectionName: *coll + documents: [] + createOptions: + encryptedFields: + { + "fields": + [ + { + "keyId": *keyid, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + # Use zero contention for deterministic __safeContent__: + { + "queryType": "prefixPreview", + "contention": { "$numberLong": "0" }, + "strMinQueryLength": { "$numberLong": "3" }, + "strMaxQueryLength": { "$numberLong": "30" }, + "caseSensitive": true, + "diacriticSensitive": true, + }, + ], + }, + ], + } +tests: + - description: "Insert QE prefixPreview" + operations: + - name: insertOne + arguments: + document: { _id: 1, encryptedText: "foobar" } + object: *coll + expectEvents: + - client: "client" + events: + - commandStartedEvent: + command: + listCollections: 1 + filter: + name: *coll + commandName: listCollections + - commandStartedEvent: + command: + find: datakeys + filter: + { + "$or": + [ + "_id": { "$in": [ *keyid ] }, + "keyAltNames": { "$in": [] }, + ], + } + $db: keyvault + readConcern: { level: "majority" } + commandName: find + - commandStartedEvent: + command: + insert: *coll + documents: + - { "_id": 1, "encryptedText": { $$type: "binData" } } # Sends encrypted payload + ordered: true + commandName: insert + - description: "Query with matching $encStrStartsWith" + operations: + - name: insertOne + arguments: + document: { _id: 1, encryptedText: "foobar" } + object: *coll + - name: find + arguments: + filter: + { + $expr: + { + $encStrStartsWith: { input: "$encryptedText", prefix: "foo" }, + }, + } + object: *coll + expectResult: + [ + { + "_id": { "$numberInt": "1" }, + "encryptedText": "foobar", + "__safeContent__": + [ + { + "$binary": + { + "base64": "wpaMBVDjL4bHf9EtSP52PJFzyNn1R19+iNI/hWtvzdk=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "fmUMXTMV/XRiN0IL3VXxSEn6SQG9E6Po30kJKB8JJlQ=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "vZIDMiFDgjmLNYVrrbnq1zT4hg7sGpe/PMtighSsnRc=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "26Z5G+sHTzV3D7F8Y0m08389USZ2afinyFV3ez9UEBQ=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "q/JEq8of7bE0QE5Id0XuOsNQ4qVpANYymcPQDUL2Ywk=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "Uvvv46LkfbgLoPqZ6xTBzpgoYRTM6FUgRdqZ9eaVojI=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "nMxdq2lladuBJA3lv3JC2MumIUtRJBNJVLp3PVE6nQk=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "hS3V0qq5CF/SkTl3ZWWWgXcAJ8G5yGtkY2RwcHNc5Oc=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "McgwYUxfKj5+4D0vskZymy4KA82s71MR25iV/Enutww=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "Ciqdk1b+t+Vrr6oIlFFk0Zdym5BPmwN3glQ0/VcsVdM=", + "subType": "00", + }, + }, + ], + }, + ] + + - description: "Query with non-matching $encStrStartsWith" + operations: + - name: insertOne + arguments: + document: { _id: 1, encryptedText: "foobar" } + object: *coll + - name: find + arguments: + filter: + { + $expr: + { + $encStrStartsWith: { input: "$encryptedText", prefix: "bar" }, + }, + } + object: *coll + expectResult: [] diff --git a/specifications/client-side-encryption/tests/unified/QE-Text-substringPreview.json b/specifications/client-side-encryption/tests/unified/QE-Text-substringPreview.json new file mode 100644 index 00000000000..6a8f133eac5 --- /dev/null +++ b/specifications/client-side-encryption/tests/unified/QE-Text-substringPreview.json @@ -0,0 +1,551 @@ +{ + "description": "QE-Text-substringPreview", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "substringPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "10" + }, + "strMaxLength": { + "$numberLong": "20" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "Insert QE suffixPreview", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 1, + "encryptedText": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Query with matching $encStrContains", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrContains": { + "input": "$encryptedText", + "substring": "oba" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": { + "$numberInt": "1" + }, + "encryptedText": "foobar", + "__safeContent__": [ + { + "$binary": { + "base64": "wpaMBVDjL4bHf9EtSP52PJFzyNn1R19+iNI/hWtvzdk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IpY3x/jjm8j/74jAdUhgxdM5hk68zR0zv/lTKm/72Vg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "G+ky260C6QiOfIxKz14FmaMbAxvui1BKJO/TnLOHlGk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "7dv3gAKe9vwJMZmpB40pRCwRTmc7ds9UkGhxH8j084E=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "o0V+Efn6x8XQdE80F1tztNaT3qxHjcsd9DOQ47BtmQk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "sJvrCjyVot7PIZFsdRehWFANKAj6fmBaj3FLbz/dZLE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "e98auxFmu02h5MfBIARk29MI7hSmvN3F9DaQ0xjqoEM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "US83krGNov/ezL6IhsY5eEOCxv1xUPDIEL/nmY0IKi0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "P2Aq5+OHZPG0CWIdmZvWq9c/18ZKVYW3vbxd+WU/TXU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "8AdPRPnSzcd5uhq4TZfNvNeF0XjLNVwAsJJMTtktw84=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "9O6u/G51I4ZHFLhL4ZLuudbr0s202A2QnPfThmOXPhI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "N7AjYVyVlv6+lVSTM+cIxRL3SMgs3G5LgxSs+jrgDkI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "RbGF7dQbPGYQFd9DDO1hPz1UlLOJ77FAC6NsjGwJeos=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "m7srHMgKm6kZwsNx8rc45pmw0/9Qro6xuQ8lZS3+RYk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "K75CNU3JyKFqZWPiIsVi4+n7DhYmcPl/nEhQ3d88mVI=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "c7bwGpUZc/7JzEnMS7qQ/TPuXZyrmMihFaAV6zIqbZc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "rDvEdUgEk8u4Srt3ETokWs2FXcnyJaRGQ+NbkFwi2rQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "VcdZj9zfveRBRlpCR2OYWau2+GokOFb73TE3gpElNiU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "eOa9o2xfA6OgkbYUxd6wQJicaeN6guhy2V66W3ALsaA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "1xGkJh+um70XiRd8lKLDtyHgDqrf7/59Mg7X0+KZh8k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OSvllqHxycbcZN4phR6NDujY3ttA59o7nQJ6V9eJpX0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "ZTX1pyk8Vdw0BSbJx7GeJNcQf3tGKxbrrNSTqBqUWkg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "cn7V05zb5iXwYrePGMHztC+GRq+Tj8IMpRDraauPhSE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "E9bV9KyrZxHJSUmMg0HrDK4gGN+75ruelAnrM6hXQgY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "WrssTNmdgXoTGpbaF0JLRCGH6cDQuz1XEFNTy98nrb0=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "jZmyOJP35dsxQ/OY5U4ISpVRIYr8iedNfcwZiKt29Qc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "d2mocORMbX9MX+/itAW8r1kxVw2/uii4vzXtc+2CIRQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "JBnJy58eRPhDo3DuZvsHbvQDiHXxdtAx1Eif66k5SfA=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "OjbDulC8s62v0pgweBSsQqtJjJBwH5JinfJpj7nVr+A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "85i7KT2GP9nSda3Gsil5LKubhq0LDtc22pxBxHpR+nE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "u9Fvsclwrs9lwIcMPV/fMZD7L3d5anSfJQVjQb9mgLg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "LZ32ttmLJGOIw9oFaUCn3Sx5uHPTYJPSFpeGRWNqlUc=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "mMsZvGEePTqtl0FJAL/jAdyWNQIlpwN61YIlZsSIZ6s=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "XZcu1a/ZGsIzAl3j4MXQlLo4v2p7kvIqRHtIQYFmL6k=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "Zse27LinlYCEnX6iTmJceI33mEJxFb0LdPxp0RiMOaQ=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vOv2Hgb2/sBpnX9XwFbIN6yDxhjchwlmczUf82W2tp4=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oQxZ9A6j3x5j6x1Jqw/N9tpP4rfWMjcV3y+a3PkrL7c=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "/D7ew3EijyUnmT22awVFspcuyo3JChJcDeCPwpljzVM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "BEmmwqyamt9X3bcWDld61P01zquy8fBHAXq3SHAPP0M=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "wygD9/kAo1KsRvtr1v+9/lvqoWdKwgh6gDHvAQfXPPk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "pRTKgF/uksrF1c1AcfSTY6ZhqBKVud1vIztQ4/36SLs=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "C4iUo8oNJsjJ37BqnBgIgSQpf99X2Bb4W5MZEAmakHU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "icoE53jIq6Fu/YGKUiSUTYyZ8xdiTQY9jJiGxVJObpw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oubCwk0V6G2RFWtcOnYDU4uUBoXBrhBRi4nZgrYj9JY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "IyqhQ9nGhzEi5YW2W6v1kGU5DY2u2qSqbM/qXdLdWVU=", + "subType": "00" + } + } + ] + } + ] + } + ] + }, + { + "description": "Query with non-matching $encStrContains", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrContains": { + "input": "$encryptedText", + "substring": "blah" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + } + ] +} diff --git a/specifications/client-side-encryption/tests/unified/QE-Text-substringPreview.yml b/specifications/client-side-encryption/tests/unified/QE-Text-substringPreview.yml new file mode 100644 index 00000000000..cee6a9f7ca4 --- /dev/null +++ b/specifications/client-side-encryption/tests/unified/QE-Text-substringPreview.yml @@ -0,0 +1,472 @@ +description: QE-Text-substringPreview +schemaVersion: "1.25" +runOnRequirements: + - minServerVersion: "8.2.0" # Server 8.2.0 adds preview support for QE text queries. + topologies: ["replicaset", "sharded", "load-balanced"] # QE does not support standalone. + csfle: + minLibmongocryptVersion: 1.15.0 # For SPM-4158. +createEntities: + - client: + id: &client "client" + autoEncryptOpts: + keyVaultNamespace: keyvault.datakeys + kmsProviders: + local: + key: Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk + observeEvents: + - commandStartedEvent + - database: + id: &db "db" + client: *client + databaseName: *db + - collection: + id: &coll "coll" + database: *db + collectionName: *coll +initialData: + # Insert data encryption key: + - databaseName: keyvault + collectionName: datakeys + documents: + [ + { + "_id": &keyid { "$binary": { "base64": "q83vqxI0mHYSNBI0VniQEg==", "subType": "04" } }, + "keyMaterial": + { + "$binary": + { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00", + }, + }, + "creationDate": { "$date": { "$numberLong": "1648914851981" } }, + "updateDate": { "$date": { "$numberLong": "1648914851981" } }, + "status": { "$numberInt": "0" }, + "masterKey": { "provider": "local" }, + }, + ] + # Create encrypted collection: + - databaseName: *db + collectionName: *coll + documents: [] + createOptions: + encryptedFields: + { + "fields": + [ + { + "keyId": *keyid, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + # Use zero contention for deterministic __safeContent__: + { + "queryType": "substringPreview", + "contention": { "$numberLong": "0" }, + "strMinQueryLength": { "$numberLong": "3" }, + "strMaxQueryLength": { "$numberLong": "10" }, + "strMaxLength": { "$numberLong": "20" }, + "caseSensitive": true, + "diacriticSensitive": true, + }, + ], + }, + ], + } +tests: + - description: "Insert QE suffixPreview" + operations: + - name: insertOne + arguments: + document: { _id: 1, encryptedText: "foobar" } + object: *coll + expectEvents: + - client: "client" + events: + - commandStartedEvent: + command: + listCollections: 1 + filter: + name: *coll + commandName: listCollections + - commandStartedEvent: + command: + find: datakeys + filter: + { + "$or": + [ + "_id": { "$in": [ *keyid ] }, + "keyAltNames": { "$in": [] }, + ], + } + $db: keyvault + readConcern: { level: "majority" } + commandName: find + - commandStartedEvent: + command: + insert: *coll + documents: + - { "_id": 1, "encryptedText": { $$type: "binData" } } # Sends encrypted payload + ordered: true + commandName: insert + - description: "Query with matching $encStrContains" + operations: + - name: insertOne + arguments: + document: { _id: 1, encryptedText: "foobar" } + object: *coll + - name: find + arguments: + filter: + { + $expr: + { + $encStrContains: + { input: "$encryptedText", substring: "oba" }, + }, + } + object: *coll + expectResult: + [ + { + "_id": { "$numberInt": "1" }, + "encryptedText": "foobar", + "__safeContent__": + [ + { + "$binary": + { + "base64": "wpaMBVDjL4bHf9EtSP52PJFzyNn1R19+iNI/hWtvzdk=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "IpY3x/jjm8j/74jAdUhgxdM5hk68zR0zv/lTKm/72Vg=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "G+ky260C6QiOfIxKz14FmaMbAxvui1BKJO/TnLOHlGk=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "7dv3gAKe9vwJMZmpB40pRCwRTmc7ds9UkGhxH8j084E=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "o0V+Efn6x8XQdE80F1tztNaT3qxHjcsd9DOQ47BtmQk=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "sJvrCjyVot7PIZFsdRehWFANKAj6fmBaj3FLbz/dZLE=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "e98auxFmu02h5MfBIARk29MI7hSmvN3F9DaQ0xjqoEM=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "US83krGNov/ezL6IhsY5eEOCxv1xUPDIEL/nmY0IKi0=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "P2Aq5+OHZPG0CWIdmZvWq9c/18ZKVYW3vbxd+WU/TXU=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "8AdPRPnSzcd5uhq4TZfNvNeF0XjLNVwAsJJMTtktw84=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "9O6u/G51I4ZHFLhL4ZLuudbr0s202A2QnPfThmOXPhI=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "N7AjYVyVlv6+lVSTM+cIxRL3SMgs3G5LgxSs+jrgDkI=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "RbGF7dQbPGYQFd9DDO1hPz1UlLOJ77FAC6NsjGwJeos=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "m7srHMgKm6kZwsNx8rc45pmw0/9Qro6xuQ8lZS3+RYk=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "K75CNU3JyKFqZWPiIsVi4+n7DhYmcPl/nEhQ3d88mVI=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "c7bwGpUZc/7JzEnMS7qQ/TPuXZyrmMihFaAV6zIqbZc=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "rDvEdUgEk8u4Srt3ETokWs2FXcnyJaRGQ+NbkFwi2rQ=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "VcdZj9zfveRBRlpCR2OYWau2+GokOFb73TE3gpElNiU=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "eOa9o2xfA6OgkbYUxd6wQJicaeN6guhy2V66W3ALsaA=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "1xGkJh+um70XiRd8lKLDtyHgDqrf7/59Mg7X0+KZh8k=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "OSvllqHxycbcZN4phR6NDujY3ttA59o7nQJ6V9eJpX0=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "ZTX1pyk8Vdw0BSbJx7GeJNcQf3tGKxbrrNSTqBqUWkg=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "cn7V05zb5iXwYrePGMHztC+GRq+Tj8IMpRDraauPhSE=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "E9bV9KyrZxHJSUmMg0HrDK4gGN+75ruelAnrM6hXQgY=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "WrssTNmdgXoTGpbaF0JLRCGH6cDQuz1XEFNTy98nrb0=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "jZmyOJP35dsxQ/OY5U4ISpVRIYr8iedNfcwZiKt29Qc=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "d2mocORMbX9MX+/itAW8r1kxVw2/uii4vzXtc+2CIRQ=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "JBnJy58eRPhDo3DuZvsHbvQDiHXxdtAx1Eif66k5SfA=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "OjbDulC8s62v0pgweBSsQqtJjJBwH5JinfJpj7nVr+A=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "85i7KT2GP9nSda3Gsil5LKubhq0LDtc22pxBxHpR+nE=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "u9Fvsclwrs9lwIcMPV/fMZD7L3d5anSfJQVjQb9mgLg=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "LZ32ttmLJGOIw9oFaUCn3Sx5uHPTYJPSFpeGRWNqlUc=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "mMsZvGEePTqtl0FJAL/jAdyWNQIlpwN61YIlZsSIZ6s=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "XZcu1a/ZGsIzAl3j4MXQlLo4v2p7kvIqRHtIQYFmL6k=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "Zse27LinlYCEnX6iTmJceI33mEJxFb0LdPxp0RiMOaQ=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "vOv2Hgb2/sBpnX9XwFbIN6yDxhjchwlmczUf82W2tp4=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "oQxZ9A6j3x5j6x1Jqw/N9tpP4rfWMjcV3y+a3PkrL7c=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "/D7ew3EijyUnmT22awVFspcuyo3JChJcDeCPwpljzVM=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "BEmmwqyamt9X3bcWDld61P01zquy8fBHAXq3SHAPP0M=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "wygD9/kAo1KsRvtr1v+9/lvqoWdKwgh6gDHvAQfXPPk=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "pRTKgF/uksrF1c1AcfSTY6ZhqBKVud1vIztQ4/36SLs=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "C4iUo8oNJsjJ37BqnBgIgSQpf99X2Bb4W5MZEAmakHU=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "icoE53jIq6Fu/YGKUiSUTYyZ8xdiTQY9jJiGxVJObpw=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "oubCwk0V6G2RFWtcOnYDU4uUBoXBrhBRi4nZgrYj9JY=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "IyqhQ9nGhzEi5YW2W6v1kGU5DY2u2qSqbM/qXdLdWVU=", + "subType": "00", + }, + }, + ], + }, + ] + + - description: "Query with non-matching $encStrContains" + operations: + - name: insertOne + arguments: + document: { _id: 1, encryptedText: "foobar" } + object: *coll + - name: find + arguments: + filter: + { + $expr: + { + $encStrContains: { input: "$encryptedText", substring: "blah" }, + }, + } + object: *coll + expectResult: [] diff --git a/specifications/client-side-encryption/tests/unified/QE-Text-suffixPreview.json b/specifications/client-side-encryption/tests/unified/QE-Text-suffixPreview.json new file mode 100644 index 00000000000..deec5e63b07 --- /dev/null +++ b/specifications/client-side-encryption/tests/unified/QE-Text-suffixPreview.json @@ -0,0 +1,338 @@ +{ + "description": "QE-Text-suffixPreview", + "schemaVersion": "1.25", + "runOnRequirements": [ + { + "minServerVersion": "8.2.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "csfle": { + "minLibmongocryptVersion": "1.15.0" + } + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "db", + "client": "client", + "databaseName": "db" + } + }, + { + "collection": { + "id": "coll", + "database": "db", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "db", + "collectionName": "coll", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + { + "queryType": "suffixPreview", + "contention": { + "$numberLong": "0" + }, + "strMinQueryLength": { + "$numberLong": "3" + }, + "strMaxQueryLength": { + "$numberLong": "30" + }, + "caseSensitive": true, + "diacriticSensitive": true + } + ] + } + ] + } + } + } + ], + "tests": [ + { + "description": "Insert QE suffixPreview", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "coll" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 1, + "encryptedText": { + "$$type": "binData" + } + } + ], + "ordered": true + }, + "commandName": "insert" + } + } + ] + } + ] + }, + { + "description": "Query with matching $encStrStartsWith", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrEndsWith": { + "input": "$encryptedText", + "suffix": "bar" + } + } + } + }, + "object": "coll", + "expectResult": [ + { + "_id": { + "$numberInt": "1" + }, + "encryptedText": "foobar", + "__safeContent__": [ + { + "$binary": { + "base64": "wpaMBVDjL4bHf9EtSP52PJFzyNn1R19+iNI/hWtvzdk=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "uDCWsucUsJemUP7pmeb+Kd8B9qupVzI8wnLFqX1rkiU=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "W3E1x4bHZ8SEHFz4zwXM0G5Z5WSwBhnxE8x5/qdP6JM=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "6g/TXVDDf6z+ntResIvTKWdmIy4ajQ1rhwdNZIiEG7A=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "hU+u/T3D6dHDpT3d/v5AlgtRoAufCXCAyO2jQlgsnCw=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "vrPnq0AtBIURNgNGA6HJL+5/p5SBWe+qz8505TRo/dE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "W5pylBxdv2soY2NcBfPiHDVLTS6tx+0ULkI8gysBeFY=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "oWO3xX3x0bYUJGK2S1aPAmlU3Xtfsgb9lTZ6flGAlsg=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "SjZGucTEUbdpd86O8yj1pyMyBOOKxvAQ9C8ngZ9C5UE=", + "subType": "00" + } + }, + { + "$binary": { + "base64": "CEaMZkxVDVbnXr+To0DOyvsva04UQkIYP3KtgYVVwf8=", + "subType": "00" + } + } + ] + } + ] + } + ] + }, + { + "description": "Query with non-matching $encStrEndsWith", + "operations": [ + { + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedText": "foobar" + } + }, + "object": "coll" + }, + { + "name": "find", + "arguments": { + "filter": { + "$expr": { + "$encStrEndsWith": { + "input": "$encryptedText", + "suffix": "foo" + } + } + } + }, + "object": "coll", + "expectResult": [] + } + ] + } + ] +} diff --git a/specifications/client-side-encryption/tests/unified/QE-Text-suffixPreview.yml b/specifications/client-side-encryption/tests/unified/QE-Text-suffixPreview.yml new file mode 100644 index 00000000000..9a6925a2a1c --- /dev/null +++ b/specifications/client-side-encryption/tests/unified/QE-Text-suffixPreview.yml @@ -0,0 +1,221 @@ +description: QE-Text-suffixPreview +schemaVersion: "1.25" +runOnRequirements: + - minServerVersion: "8.2.0" # Server 8.2.0 adds preview support for QE text queries. + topologies: ["replicaset", "sharded", "load-balanced"] # QE does not support standalone. + csfle: + minLibmongocryptVersion: 1.15.0 # For SPM-4158. +createEntities: + - client: + id: &client "client" + autoEncryptOpts: + keyVaultNamespace: keyvault.datakeys + kmsProviders: + local: + key: Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk + observeEvents: + - commandStartedEvent + - database: + id: &db "db" + client: *client + databaseName: *db + - collection: + id: &coll "coll" + database: *db + collectionName: *coll +initialData: + # Insert data encryption key: + - databaseName: keyvault + collectionName: datakeys + documents: + [ + { + "_id": &keyid { "$binary": { "base64": "q83vqxI0mHYSNBI0VniQEg==", "subType": "04" } }, + "keyMaterial": + { + "$binary": + { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00", + }, + }, + "creationDate": { "$date": { "$numberLong": "1648914851981" } }, + "updateDate": { "$date": { "$numberLong": "1648914851981" } }, + "status": { "$numberInt": "0" }, + "masterKey": { "provider": "local" }, + }, + ] + # Create encrypted collection: + - databaseName: *db + collectionName: *coll + documents: [] + createOptions: + encryptedFields: + { + "fields": + [ + { + "keyId": *keyid, + "path": "encryptedText", + "bsonType": "string", + "queries": [ + # Use zero contention for deterministic __safeContent__: + { + "queryType": "suffixPreview", + "contention": { "$numberLong": "0" }, + "strMinQueryLength": { "$numberLong": "3" }, + "strMaxQueryLength": { "$numberLong": "30" }, + "caseSensitive": true, + "diacriticSensitive": true, + }, + ], + }, + ], + } +tests: + - description: "Insert QE suffixPreview" + operations: + - name: insertOne + arguments: + document: { _id: 1, encryptedText: "foobar" } + object: *coll + expectEvents: + - client: "client" + events: + - commandStartedEvent: + command: + listCollections: 1 + filter: + name: *coll + commandName: listCollections + - commandStartedEvent: + command: + find: datakeys + filter: + { + "$or": + [ + "_id": { "$in": [ *keyid ] }, + "keyAltNames": { "$in": [] }, + ], + } + $db: keyvault + readConcern: { level: "majority" } + commandName: find + - commandStartedEvent: + command: + insert: *coll + documents: + - { "_id": 1, "encryptedText": { $$type: "binData" } } # Sends encrypted payload + ordered: true + commandName: insert + - description: "Query with matching $encStrStartsWith" + operations: + - name: insertOne + arguments: + document: { _id: 1, encryptedText: "foobar" } + object: *coll + - name: find + arguments: + filter: + { + $expr: + { $encStrEndsWith: { input: "$encryptedText", suffix: "bar" } }, + } + object: *coll + expectResult: + [ + { + "_id": { "$numberInt": "1" }, + "encryptedText": "foobar", + "__safeContent__": + [ + { + "$binary": + { + "base64": "wpaMBVDjL4bHf9EtSP52PJFzyNn1R19+iNI/hWtvzdk=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "uDCWsucUsJemUP7pmeb+Kd8B9qupVzI8wnLFqX1rkiU=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "W3E1x4bHZ8SEHFz4zwXM0G5Z5WSwBhnxE8x5/qdP6JM=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "6g/TXVDDf6z+ntResIvTKWdmIy4ajQ1rhwdNZIiEG7A=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "hU+u/T3D6dHDpT3d/v5AlgtRoAufCXCAyO2jQlgsnCw=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "vrPnq0AtBIURNgNGA6HJL+5/p5SBWe+qz8505TRo/dE=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "W5pylBxdv2soY2NcBfPiHDVLTS6tx+0ULkI8gysBeFY=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "oWO3xX3x0bYUJGK2S1aPAmlU3Xtfsgb9lTZ6flGAlsg=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "SjZGucTEUbdpd86O8yj1pyMyBOOKxvAQ9C8ngZ9C5UE=", + "subType": "00", + }, + }, + { + "$binary": + { + "base64": "CEaMZkxVDVbnXr+To0DOyvsva04UQkIYP3KtgYVVwf8=", + "subType": "00", + }, + }, + ], + }, + ] + + - description: "Query with non-matching $encStrEndsWith" + operations: + - name: insertOne + arguments: + document: { _id: 1, encryptedText: "foobar" } + object: *coll + - name: find + arguments: + filter: + { + $expr: + { $encStrEndsWith: { input: "$encryptedText", suffix: "foo" } }, + } + object: *coll + expectResult: [] diff --git a/specifications/client-side-encryption/tests/unified/fle2v2-BypassQueryAnalysis.json b/specifications/client-side-encryption/tests/unified/fle2v2-BypassQueryAnalysis.json new file mode 100644 index 00000000000..0817508f8f8 --- /dev/null +++ b/specifications/client-side-encryption/tests/unified/fle2v2-BypassQueryAnalysis.json @@ -0,0 +1,322 @@ +{ + "description": "fle2v2-BypassQueryAnalysis", + "schemaVersion": "1.23", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "csfle": true, + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + }, + "keyVaultNamespace": "keyvault.datakeys", + "bypassQueryAnalysis": true + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "encryptedDB", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "encryptedColl", + "database": "encryptedDB", + "collectionName": "default" + } + }, + { + "client": { + "id": "client1" + } + }, + { + "database": { + "id": "unencryptedDB", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "unencryptedColl", + "database": "unencryptedDB", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + ], + "tests": [ + { + "description": "BypassQueryAnalysis decrypts", + "operations": [ + { + "object": "encryptedColl", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedIndexed": { + "$binary": { + "base64": "C18BAAAFZAAgAAAAANnt+eLTkv4GdDPl8IAfJOvTzArOgFJQ2S/DcLza4W0DBXMAIAAAAAD2u+omZme3P2gBPehMQyQHQ153tPN1+z7bksYA9jKTpAVwADAAAAAAUnCOQqIvmR65YKyYnsiVfVrg9hwUVO3RhhKExo3RWOzgaS0QdsBL5xKFS0JhZSoWBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AFAAAAAAEjRWeBI0mHYSNBI0VniQEpQbp/ZJpWBKeDtKLiXb0P2E9wvc0g3f373jnYQYlJquOrlPOoEy3ngsHPJuSUijvWDsrQzqYa349K7G/66qaXEFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsBWwAIAAAAACkm0o9bj6j0HuADKc0svbqO2UHj6GrlNdF6yKNxh63xRJrAAAAAAAAAAAAAA==", + "subType": "06" + } + } + } + } + }, + { + "object": "encryptedColl", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": "123" + } + ] + }, + { + "object": "unencryptedColl", + "name": "find", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedIndexed": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", + "subType": "00" + } + } + ] + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "listCollections": 1, + "filter": { + "name": "default" + } + }, + "commandName": "listCollections" + } + }, + { + "commandStartedEvent": { + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedIndexed": { + "$binary": { + "base64": "C18BAAAFZAAgAAAAANnt+eLTkv4GdDPl8IAfJOvTzArOgFJQ2S/DcLza4W0DBXMAIAAAAAD2u+omZme3P2gBPehMQyQHQ153tPN1+z7bksYA9jKTpAVwADAAAAAAUnCOQqIvmR65YKyYnsiVfVrg9hwUVO3RhhKExo3RWOzgaS0QdsBL5xKFS0JhZSoWBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AFAAAAAAEjRWeBI0mHYSNBI0VniQEpQbp/ZJpWBKeDtKLiXb0P2E9wvc0g3f373jnYQYlJquOrlPOoEy3ngsHPJuSUijvWDsrQzqYa349K7G/66qaXEFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsBWwAIAAAAACkm0o9bj6j0HuADKc0svbqO2UHj6GrlNdF6yKNxh63xRJrAAAAAAAAAAAAAA==", + "subType": "06" + } + } + } + ], + "ordered": true, + "encryptionInformation": { + "type": 1, + "schema": { + "default.default": { + "escCollection": "enxcol_.default.esc", + "ecocCollection": "enxcol_.default.ecoc", + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + }, + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "default", + "filter": { + "_id": 1 + } + }, + "commandName": "find" + } + }, + { + "commandStartedEvent": { + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + }, + "commandName": "find" + } + } + ] + } + ] + } + ] +} diff --git a/specifications/client-side-encryption/tests/unified/fle2v2-BypassQueryAnalysis.yml b/specifications/client-side-encryption/tests/unified/fle2v2-BypassQueryAnalysis.yml new file mode 100644 index 00000000000..2b4a5ec1143 --- /dev/null +++ b/specifications/client-side-encryption/tests/unified/fle2v2-BypassQueryAnalysis.yml @@ -0,0 +1,130 @@ +description: fle2v2-BypassQueryAnalysis + +schemaVersion: "1.23" + +runOnRequirements: + - minServerVersion: "7.0.0" + # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Test has not run on Serverless. + # Serverless tests are planned for removal: DRIVERS-3115 + serverless: forbid + csfle: true + topologies: [ "replicaset", "sharded", "load-balanced" ] + +createEntities: + - client: + id: &client0 client0 + autoEncryptOpts: + kmsProviders: + local: + key: Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk + keyVaultNamespace: keyvault.datakeys + bypassQueryAnalysis: true + observeEvents: [ commandStartedEvent ] + - database: + id: &encryptedDB encryptedDB + client: *client0 + databaseName: &encryptedDBName default + - collection: + id: &encryptedColl encryptedColl + database: *encryptedDB + collectionName: &encryptedCollName default + - client: + id: &client1 client1 + - database: + id: &unencryptedDB unencryptedDB + client: *client1 + databaseName: *encryptedDBName + - collection: + id: &unencryptedColl unencryptedColl + database: *unencryptedDB + collectionName: *encryptedCollName + +initialData: + - databaseName: &keyvaultDBName keyvault + collectionName: &datakeysCollName datakeys + documents: + - {'_id': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} + - databaseName: *encryptedDBName + collectionName: *encryptedCollName + documents: [] + createOptions: + encryptedFields: &encrypted_fields {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedIndexed', 'bsonType': 'string', 'queries': {'queryType': 'equality', 'contention': {'$numberLong': '0'}}}, {'keyId': {'$binary': {'base64': 'q83vqxI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedUnindexed', 'bsonType': 'string'}]} + +tests: + - description: "BypassQueryAnalysis decrypts" + operations: + - object: *encryptedColl + name: insertOne + arguments: + document: &doc0_encrypted { + "_id": 1, + "encryptedIndexed": { + "$binary": { + # Payload has an IndexKey of key1 and UserKey of key1. + "base64": "C18BAAAFZAAgAAAAANnt+eLTkv4GdDPl8IAfJOvTzArOgFJQ2S/DcLza4W0DBXMAIAAAAAD2u+omZme3P2gBPehMQyQHQ153tPN1+z7bksYA9jKTpAVwADAAAAAAUnCOQqIvmR65YKyYnsiVfVrg9hwUVO3RhhKExo3RWOzgaS0QdsBL5xKFS0JhZSoWBXUAEAAAAAQSNFZ4EjSYdhI0EjRWeJASEHQAAgAAAAV2AFAAAAAAEjRWeBI0mHYSNBI0VniQEpQbp/ZJpWBKeDtKLiXb0P2E9wvc0g3f373jnYQYlJquOrlPOoEy3ngsHPJuSUijvWDsrQzqYa349K7G/66qaXEFZQAgAAAAAOuac/eRLYakKX6B0vZ1r3QodOQFfjqJD+xlGiPu4/PsBWwAIAAAAACkm0o9bj6j0HuADKc0svbqO2UHj6GrlNdF6yKNxh63xRJrAAAAAAAAAAAAAA==", + "subType": "06" + } + } + } + - object: *encryptedColl + name: find + arguments: + filter: { "_id": 1 } + expectResult: [{"_id": 1, "encryptedIndexed": "123" }] + - object: *unencryptedColl + name: find + arguments: + filter: {} + expectResult: + - {"_id": 1, "encryptedIndexed": { "$$type": "binData" }, "__safeContent__": [{ "$binary" : { "base64" : "31eCYlbQoVboc5zwC8IoyJVSkag9PxREka8dkmbXJeY=", "subType" : "00" } }] } + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + listCollections: 1 + filter: + name: *encryptedCollName + commandName: listCollections + - commandStartedEvent: + command: + insert: *encryptedCollName + documents: + - *doc0_encrypted + ordered: true + encryptionInformation: + type: 1 + schema: + "default.default": + # libmongocrypt applies escCollection and ecocCollection to outgoing command. + escCollection: "enxcol_.default.esc" + ecocCollection: "enxcol_.default.ecoc" + <<: *encrypted_fields + commandName: insert + - commandStartedEvent: + command: + find: *encryptedCollName + filter: { "_id": 1 } + commandName: find + - commandStartedEvent: + command: + find: *datakeysCollName + filter: { + "$or": [ + { + "_id": { + "$in": [ + {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}} + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + } + $db: *keyvaultDBName + readConcern: { level: "majority" } + commandName: find \ No newline at end of file diff --git a/specifications/client-side-encryption/tests/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json b/specifications/client-side-encryption/tests/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json new file mode 100644 index 00000000000..b5f848c080a --- /dev/null +++ b/specifications/client-side-encryption/tests/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.json @@ -0,0 +1,256 @@ +{ + "description": "fle2v2-EncryptedFields-vs-EncryptedFieldsMap", + "schemaVersion": "1.23", + "runOnRequirements": [ + { + "minServerVersion": "7.0.0", + "serverless": "forbid", + "csfle": true, + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + }, + "keyVaultNamespace": "keyvault.datakeys", + "encryptedFieldsMap": { + "default.default": { + "fields": [] + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "encryptedDB", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "encryptedColl", + "database": "encryptedDB", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648914851981" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "default", + "collectionName": "default", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedIndexed", + "bsonType": "string", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + }, + { + "keyId": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedUnindexed", + "bsonType": "string" + } + ] + } + } + } + ], + "tests": [ + { + "description": "encryptedFieldsMap is preferred over remote encryptedFields", + "operations": [ + { + "object": "encryptedColl", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + } + }, + { + "object": "encryptedColl", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encryptedUnindexed": "value123" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "default", + "commandName": "insert", + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ], + "ordered": true + } + } + }, + { + "commandStartedEvent": { + "databaseName": "default", + "commandName": "find", + "command": { + "find": "default", + "filter": { + "_id": 1 + } + } + } + }, + { + "commandStartedEvent": { + "databaseName": "keyvault", + "commandName": "find", + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "$db": "keyvault", + "readConcern": { + "level": "majority" + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "default", + "databaseName": "default", + "documents": [ + { + "_id": 1, + "encryptedUnindexed": { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + ] + } + ] + } + ] +} diff --git a/specifications/client-side-encryption/tests/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.yml b/specifications/client-side-encryption/tests/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.yml new file mode 100644 index 00000000000..67cca9b4348 --- /dev/null +++ b/specifications/client-side-encryption/tests/unified/fle2v2-EncryptedFields-vs-EncryptedFieldsMap.yml @@ -0,0 +1,114 @@ +description: fle2v2-EncryptedFields-vs-EncryptedFieldsMap + +schemaVersion: "1.23" + +runOnRequirements: + - minServerVersion: "7.0.0" + # Skip QEv2 (also referred to as FLE2v2) tests on Serverless. Test has not run on Serverless. + # Serverless tests are planned for removal: DRIVERS-3115 + serverless: forbid + csfle: true + topologies: [ "replicaset", "sharded", "load-balanced" ] + +createEntities: + - client: + id: &client0 client0 + autoEncryptOpts: + kmsProviders: + local: + key: Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk + keyVaultNamespace: keyvault.datakeys + encryptedFieldsMap: { + "default.default": { + "fields": [] + } + } + observeEvents: [ commandStartedEvent ] + - database: + id: &encryptedDB encryptedDB + client: *client0 + databaseName: &encryptedDBName default + - collection: + id: &encryptedColl encryptedColl + database: *encryptedDB + collectionName: &encryptedCollName default + +initialData: + - databaseName: &keyvaultDBName keyvault + collectionName: &datakeysCollName datakeys + documents: + - {'_id': {'$binary': {'base64': 'q83vqxI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'keyMaterial': {'$binary': {'base64': 'HBk9BWihXExNDvTp1lUxOuxuZK2Pe2ZdVdlsxPEBkiO1bS4mG5NNDsQ7zVxJAH8BtdOYp72Ku4Y3nwc0BUpIKsvAKX4eYXtlhv5zUQxWdeNFhg9qK7qb8nqhnnLeT0f25jFSqzWJoT379hfwDeu0bebJHr35QrJ8myZdPMTEDYF08QYQ48ShRBli0S+QzBHHAQiM2iJNr4svg2WR8JSeWQ==', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1648914851981'}}, 'updateDate': {'$date': {'$numberLong': '1648914851981'}}, 'status': {'$numberInt': '0'}, 'masterKey': {'provider': 'local'}} + - databaseName: *encryptedDBName + collectionName: *encryptedCollName + documents: [] + createOptions: + encryptedFields: {'fields': [{'keyId': {'$binary': {'base64': 'EjRWeBI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedIndexed', 'bsonType': 'string', 'queries': {'queryType': 'equality', 'contention': {'$numberLong': '0'}}}, {'keyId': {'$binary': {'base64': 'q83vqxI0mHYSNBI0VniQEg==', 'subType': '04'}}, 'path': 'encryptedUnindexed', 'bsonType': 'string'}]} + +tests: + - description: "encryptedFieldsMap is preferred over remote encryptedFields" + operations: + # EncryptedFieldsMap overrides remote encryptedFields. + # Automatic encryption does not occur on encryptedUnindexed. The value is validated on the server. + - object: *encryptedColl + name: insertOne + arguments: + document: &doc0 { + _id: 1, + encryptedUnindexed: { + "$binary": { + "base64": "BqvN76sSNJh2EjQSNFZ4kBICTQaVZPWgXp41I7mPV1rLFTtw1tXzjcdSEyxpKKqujlko5TeizkB9hHQ009dVY1+fgIiDcefh+eQrm3CkhQ==", + "subType": "06" + } + } + } + - object: *encryptedColl + name: find + arguments: + filter: { "_id": 1 } + expectResult: + - {"_id": 1, "encryptedUnindexed": "value123" } + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + databaseName: *encryptedDBName + commandName: insert + command: + insert: *encryptedCollName + documents: + - *doc0 + ordered: true + - commandStartedEvent: + databaseName: *encryptedDBName + commandName: find + command: + find: *encryptedCollName + filter: { "_id": 1} + - commandStartedEvent: + databaseName: *keyvaultDBName + commandName: find + command: + find: *datakeysCollName + filter: { + "$or": [ + { + "_id": { + "$in": [ + {'$binary': {'base64': 'q83vqxI0mHYSNBI0VniQEg==', 'subType': '04'}} + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + } + $db: *keyvaultDBName + readConcern: { level: "majority" } + outcome: + - collectionName: *encryptedCollName + databaseName: *encryptedDBName + documents: + - *doc0 \ No newline at end of file diff --git a/specifications/client-side-encryption/tests/unified/localSchema.json b/specifications/client-side-encryption/tests/unified/localSchema.json new file mode 100644 index 00000000000..0836a46b801 --- /dev/null +++ b/specifications/client-side-encryption/tests/unified/localSchema.json @@ -0,0 +1,343 @@ +{ + "description": "localSchema", + "schemaVersion": "1.23", + "runOnRequirements": [ + { + "minServerVersion": "4.1.10", + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "encrypted_w_altname": { + "encrypt": { + "keyId": "/altname", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "OyQRAeK7QlWMr0E2xWapYg==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + }, + "random": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "OyQRAeK7QlWMr0E2xWapYg==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "encrypted_string_equivalent": { + "encrypt": { + "keyId": [ + { + "$binary": { + "base64": "OyQRAeK7QlWMr0E2xWapYg==", + "subType": "04" + } + } + ], + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "bsonType": "object" + } + }, + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + }, + "sessionToken": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "client1", + "autoEncryptOpts": { + "schemaMap": { + "default.default": { + "properties": { + "test": { + "bsonType": "string" + } + }, + "bsonType": "object", + "required": [ + "test" + ] + } + }, + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "aws": { + "accessKeyId": { + "$$placeholder": 1 + }, + "secretAccessKey": { + "$$placeholder": 1 + }, + "sessionToken": { + "$$placeholder": 1 + } + } + } + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "encryptedDB", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "encryptedColl", + "database": "encryptedDB", + "collectionName": "default" + } + }, + { + "database": { + "id": "encryptedDB2", + "client": "client1", + "databaseName": "default" + } + }, + { + "collection": { + "id": "encryptedColl2", + "database": "encryptedDB2", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "OyQRAeK7QlWMr0E2xWapYg==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + }, + { + "databaseName": "default", + "collectionName": "default", + "documents": [] + } + ], + "tests": [ + { + "description": "A local schema should override", + "operations": [ + { + "object": "encryptedColl", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + } + }, + { + "object": "encryptedColl", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "encrypted_string": "string0" + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "databaseName": "keyvault", + "commandName": "find", + "command": { + "find": "datakeys", + "filter": { + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": { + "base64": "OyQRAeK7QlWMr0E2xWapYg==", + "subType": "04" + } + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] + }, + "readConcern": { + "level": "majority" + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "command": { + "insert": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "ATskEQHiu0JVjK9BNsVmqWIClDjVEWlpmVRN76InSQuFW2piVbYFkh0QhZCKyx9DdvFBUG+FWluh0kXyhdq3b2Vt/nqNWjXn2y0+JPhrc4W+wQ==", + "subType": "06" + } + } + } + ], + "ordered": true + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "command": { + "find": "default", + "filter": { + "_id": 1 + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "default", + "databaseName": "default", + "documents": [ + { + "_id": 1, + "encrypted_string": { + "$binary": { + "base64": "ATskEQHiu0JVjK9BNsVmqWIClDjVEWlpmVRN76InSQuFW2piVbYFkh0QhZCKyx9DdvFBUG+FWluh0kXyhdq3b2Vt/nqNWjXn2y0+JPhrc4W+wQ==", + "subType": "06" + } + } + } + ] + } + ] + }, + { + "description": "A local schema with no encryption is an error", + "operations": [ + { + "object": "encryptedColl2", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encrypted_string": "string0" + } + }, + "expectError": { + "isError": true, + "errorContains": "JSON schema keyword 'required' is only allowed with a remote schema" + } + } + ] + } + ] +} diff --git a/specifications/client-side-encryption/tests/unified/localSchema.yml b/specifications/client-side-encryption/tests/unified/localSchema.yml new file mode 100644 index 00000000000..495b2774eb9 --- /dev/null +++ b/specifications/client-side-encryption/tests/unified/localSchema.yml @@ -0,0 +1,103 @@ +description: localSchema + +schemaVersion: "1.23" + +runOnRequirements: + - minServerVersion: "4.1.10" + csfle: true + +createEntities: + - client: + id: &client0 client0 + autoEncryptOpts: + schemaMap: + "default.default": {'properties': {'encrypted_w_altname': {'encrypt': {'keyId': '/altname', 'bsonType': 'string', 'algorithm': 'AEAD_AES_256_CBC_HMAC_SHA_512-Random'}}, 'encrypted_string': {'encrypt': {'keyId': [{'$binary': {'base64': 'AAAAAAAAAAAAAAAAAAAAAA==', 'subType': '04'}}], 'bsonType': 'string', 'algorithm': 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic'}}, 'random': {'encrypt': {'keyId': [{'$binary': {'base64': 'AAAAAAAAAAAAAAAAAAAAAA==', 'subType': '04'}}], 'bsonType': 'string', 'algorithm': 'AEAD_AES_256_CBC_HMAC_SHA_512-Random'}}, 'encrypted_string_equivalent': {'encrypt': {'keyId': [{'$binary': {'base64': 'AAAAAAAAAAAAAAAAAAAAAA==', 'subType': '04'}}], 'bsonType': 'string', 'algorithm': 'AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic'}}}, 'bsonType': 'object'} + keyVaultNamespace: keyvault.datakeys + kmsProviders: + aws: { accessKeyId: { $$placeholder: 1 }, secretAccessKey: { $$placeholder: 1 }, sessionToken: { $$placeholder: 1 } } + observeEvents: [ commandStartedEvent ] + - client: + id: &client1 client1 + autoEncryptOpts: + schemaMap: + "default.default": {'properties': {'test': {'bsonType': 'string'}}, 'bsonType': 'object', 'required': ['test']} + keyVaultNamespace: keyvault.datakeys + kmsProviders: + aws: { accessKeyId: { $$placeholder: 1 }, secretAccessKey: { $$placeholder: 1 }, sessionToken: { $$placeholder: 1 } } + observeEvents: [ commandStartedEvent ] + - database: + id: &encryptedDB encryptedDB + client: *client0 + databaseName: &encryptedDBName default + - collection: + id: &encryptedColl encryptedColl + database: *encryptedDB + collectionName: &encryptedCollName default + # intentionally the same DB and collection name as encryptedDB/Coll + - database: + id: &encryptedDB2 encryptedDB2 + client: *client1 + databaseName: *encryptedDBName + - collection: + id: &encryptedColl2 encryptedColl2 + database: *encryptedDB2 + collectionName: *encryptedDBName + +initialData: + - databaseName: &keyvaultDBName keyvault + collectionName: &datakeysCollName datakeys + documents: + - {'status': 1, '_id': {'$binary': {'base64': 'AAAAAAAAAAAAAAAAAAAAAA==', 'subType': '04'}}, 'masterKey': {'provider': 'aws', 'key': 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0', 'region': 'us-east-1'}, 'updateDate': {'$date': {'$numberLong': '1552949630483'}}, 'keyMaterial': {'$binary': {'base64': 'AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1552949630483'}}, 'keyAltNames': ['altname', 'another_altname']} + - databaseName: *encryptedDBName + collectionName: *encryptedCollName + documents: [] + +tests: + - description: "A local schema should override" + operations: + - object: *encryptedColl + name: insertOne + arguments: + document: &doc0 { _id: 1, encrypted_string: "string0" } + - object: *encryptedColl + name: find + arguments: + filter: { _id: 1 } + expectResult: [*doc0] + expectEvents: + # Then key is fetched from the key vault. + - client: *client0 + events: + - commandStartedEvent: + databaseName: *keyvaultDBName + commandName: find + command: + find: *datakeysCollName + filter: {"$or": [{"_id": {"$in": [ {'$binary': {'base64': 'AAAAAAAAAAAAAAAAAAAAAA==', 'subType': '04'}} ] }}, {"keyAltNames": {"$in": []}}]} + readConcern: { level: "majority" } + - commandStartedEvent: + commandName: insert + command: + insert: *encryptedCollName + documents: + - &doc0_encrypted { _id: 1, encrypted_string: {'$binary': {'base64': 'AQAAAAAAAAAAAAAAAAAAAAACwj+3zkv2VM+aTfk60RqhXq6a/77WlLwu/BxXFkL7EppGsju/m8f0x5kBDD3EZTtGALGXlym5jnpZAoSIkswHoA==', 'subType': '06'}} } + ordered: true + - commandStartedEvent: + commandName: find + command: + find: *encryptedCollName + filter: { _id: 1 } + outcome: + - collectionName: *encryptedCollName + databaseName: *encryptedDBName + documents: + - *doc0_encrypted + - description: "A local schema with no encryption is an error" + operations: + - object: *encryptedColl2 + name: insertOne + arguments: + document: &doc0 { _id: 1, encrypted_string: "string0" } + expectError: + isError: true + errorContains: "JSON schema keyword 'required' is only allowed with a remote schema" diff --git a/specifications/client-side-encryption/tests/unified/maxWireVersion.json b/specifications/client-side-encryption/tests/unified/maxWireVersion.json new file mode 100644 index 00000000000..d0af75ac992 --- /dev/null +++ b/specifications/client-side-encryption/tests/unified/maxWireVersion.json @@ -0,0 +1,101 @@ +{ + "description": "maxWireVersion", + "schemaVersion": "1.23", + "runOnRequirements": [ + { + "maxServerVersion": "4.0.99", + "csfle": true + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "kmsProviders": { + "aws": {} + }, + "keyVaultNamespace": "keyvault.datakeys", + "extraOptions": { + "mongocryptdBypassSpawn": true + } + } + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "default" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "default" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "status": 1, + "_id": { + "$binary": { + "base64": "AAAAAAAAAAAAAAAAAAAAAA==", + "subType": "04" + } + }, + "masterKey": { + "provider": "aws", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "region": "us-east-1" + }, + "updateDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1552949630483" + } + }, + "keyAltNames": [ + "altname", + "another_altname" + ] + } + ] + } + ], + "tests": [ + { + "description": "operation fails with maxWireVersion < 8", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "encrypted_string": "string0" + } + }, + "expectError": { + "errorContains": "Auto-encryption requires a minimum MongoDB version of 4.2" + } + } + ] + } + ] +} diff --git a/specifications/client-side-encryption/tests/unified/maxWireVersion.yml b/specifications/client-side-encryption/tests/unified/maxWireVersion.yml new file mode 100644 index 00000000000..75a51dd4e55 --- /dev/null +++ b/specifications/client-side-encryption/tests/unified/maxWireVersion.yml @@ -0,0 +1,41 @@ +description: maxWireVersion + +schemaVersion: "1.23" + +runOnRequirements: + - maxServerVersion: "4.0.99" + csfle: true + +createEntities: + - client: + id: &client0 client0 + autoEncryptOpts: + kmsProviders: + aws: {} + keyVaultNamespace: keyvault.datakeys + extraOptions: + mongocryptdBypassSpawn: true # mongocryptd probably won't be on the path. mongocryptd was introduced in server 4.2. + - database: + id: &database0 database0 + client: *client0 + databaseName: default + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: default + +initialData: + - databaseName: keyvault + collectionName: datakeys + documents: + - {'status': 1, '_id': {'$binary': {'base64': 'AAAAAAAAAAAAAAAAAAAAAA==', 'subType': '04'}}, 'masterKey': {'provider': 'aws', 'key': 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0', 'region': 'us-east-1'}, 'updateDate': {'$date': {'$numberLong': '1552949630483'}}, 'keyMaterial': {'$binary': {'base64': 'AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO', 'subType': '00'}}, 'creationDate': {'$date': {'$numberLong': '1552949630483'}}, 'keyAltNames': ['altname', 'another_altname']} + +tests: + - description: "operation fails with maxWireVersion < 8" + operations: + - name: insertOne + object: *collection0 + arguments: + document: { encrypted_string: "string0" } + expectError: + errorContains: "Auto-encryption requires a minimum MongoDB version of 4.2" \ No newline at end of file diff --git a/specifications/client-side-operations-timeout/tests/bulkWrite.json b/specifications/client-side-operations-timeout/tests/bulkWrite.json new file mode 100644 index 00000000000..9a05809f77c --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/bulkWrite.json @@ -0,0 +1,160 @@ +{ + "description": "timeoutMS behaves correctly for bulkWrite operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "w": 1 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to entire bulkWrite, not individual commands", + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": {} + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert", + "update" + ], + "blockConnection": true, + "blockTimeMS": 120 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + }, + { + "replaceOne": { + "filter": { + "_id": 1 + }, + "replacement": { + "x": 1 + } + } + } + ], + "timeoutMS": 200 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/specifications/client-side-operations-timeout/tests/bulkWrite.yml b/specifications/client-side-operations-timeout/tests/bulkWrite.yml new file mode 100644 index 00000000000..0459dbbc16a --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/bulkWrite.yml @@ -0,0 +1,87 @@ +description: "timeoutMS behaves correctly for bulkWrite operations" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + uriOptions: + # Used to speed up the test + w: 1 + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + # Test that drivers do not refresh timeoutMS between commands. This is done by running a bulkWrite that will require + # two commands with timeoutMS=200 and blocking each command for 120ms. The server should take over 200ms total, so the + # bulkWrite should fail with a timeout error. + - description: "timeoutMS applied to entire bulkWrite, not individual commands" + operations: + # Do an operation without a timeout to ensure the servers are discovered. + - name: insertOne + object: *collection + arguments: + document: {} + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert", "update"] + blockConnection: true + blockTimeMS: 120 + - name: bulkWrite + object: *collection + arguments: + requests: + - insertOne: + document: { _id: 1 } + - replaceOne: + filter: { _id: 1 } + replacement: { x: 1 } + timeoutMS: 200 + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } diff --git a/specifications/client-side-operations-timeout/tests/command-execution.json b/specifications/client-side-operations-timeout/tests/command-execution.json new file mode 100644 index 00000000000..212cd41089d --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/command-execution.json @@ -0,0 +1,399 @@ +{ + "description": "timeoutMS behaves correctly during command execution", + "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4.7", + "topologies": [ + "single", + "replicaset", + "sharded" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + }, + { + "collectionName": "timeoutColl", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "maxTimeMS value in the command is less than timeoutMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "reduceMaxTimeMSTest", + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 500, + "heartbeatFrequencyMS": 500, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1000 + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl", + "maxTimeMS": { + "$$lte": 450 + } + } + } + } + ] + } + ] + }, + { + "description": "command is not sent if RTT is greater than timeoutMS", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "rttTooHighTest", + "blockConnection": true, + "blockTimeMS": 50 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "rttTooHighTest", + "w": 1, + "timeoutMS": 10, + "heartbeatFrequencyMS": 500, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "wait", + "object": "testRunner", + "arguments": { + "ms": 1000 + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + }, + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 3 + } + }, + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 4 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + } + ] + } + ] + }, + { + "description": "short-circuit is not enabled with only 1 RTT measurement", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "reduceMaxTimeMSTest", + "blockConnection": true, + "blockTimeMS": 100 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "uriOptions": { + "appName": "reduceMaxTimeMSTest", + "w": 1, + "timeoutMS": 90, + "heartbeatFrequencyMS": 100000, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "timeoutCollection", + "database": "database", + "collectionName": "timeoutColl" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 1 + }, + "timeoutMS": 100000 + } + }, + { + "name": "insertOne", + "object": "timeoutCollection", + "arguments": { + "document": { + "_id": 2 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "timeoutColl", + "maxTimeMS": { + "$$lte": 450 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/specifications/client-side-operations-timeout/tests/command-execution.yml b/specifications/client-side-operations-timeout/tests/command-execution.yml new file mode 100644 index 00000000000..95a9abeefc7 --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/command-execution.yml @@ -0,0 +1,255 @@ +description: "timeoutMS behaves correctly during command execution" + +schemaVersion: "1.26" + +runOnRequirements: + # Require SERVER-49336 for failCommand + appName on the initial handshake. + - minServerVersion: "4.4.7" + # Skip load-balanced and serverless which do not support RTT measurements. + topologies: [ single, replicaset, sharded ] + serverless: forbid + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + +initialData: + # The corresponding entities for the collections defined here are created in test-level createEntities operations. + # This is done so that tests can set fail points that will affect all of the handshakes and heartbeats done by a + # client. The collection and database names are listed here so that the collections will be dropped and re-created at + # the beginning of each test. + - collectionName: ®ularCollectionName coll + databaseName: &databaseName test + documents: [] + - collectionName: &timeoutCollectionName timeoutColl + databaseName: &databaseName test + documents: [] + +tests: + - description: "maxTimeMS value in the command is less than timeoutMS" + operations: + # Artificially increase the server RTT to ~50ms. + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: "alwaysOn" + data: + failCommands: ["hello", "isMaster"] + appName: &appName reduceMaxTimeMSTest + blockConnection: true + blockTimeMS: 50 + # Create a client with the app name specified in the fail point and timeoutMS higher than blockTimeMS. + # Also create database and collection entities derived from the new client. + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + uriOptions: + appName: *appName + w: 1 # Override server's w:majority default to speed up the test. + timeoutMS: 500 + heartbeatFrequencyMS: 500 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &timeoutCollection timeoutCollection + database: *database + collectionName: *timeoutCollectionName + # Do an operation with a large timeout to ensure the servers are discovered. + - name: insertOne + object: *timeoutCollection + arguments: + document: { _id: 1 } + timeoutMS: 100000 + # Wait until short-circuiting has been enabled (at least 2 RTT measurements). + - name: wait + object: testRunner + arguments: + ms: 1000 + # Do an operation with timeoutCollection so the event will include a maxTimeMS field. + - name: insertOne + object: *timeoutCollection + arguments: + document: { _id: 2 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *timeoutCollectionName + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *timeoutCollectionName + maxTimeMS: { $$lte: 450 } + + - description: "command is not sent if RTT is greater than timeoutMS" + operations: + # Artificially increase the server RTT to ~50ms. + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: "alwaysOn" + data: + failCommands: ["hello", "isMaster"] + appName: &appName rttTooHighTest + blockConnection: true + blockTimeMS: 50 + # Create a client with the app name specified in the fail point. Also create database and collection entities + # derived from the new client. There is one collection entity with no timeoutMS and another with a timeoutMS + # that's lower than the fail point's blockTimeMS value. + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + uriOptions: + appName: *appName + w: 1 # Override server's w:majority default to speed up the test. + timeoutMS: 10 + heartbeatFrequencyMS: 500 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &timeoutCollection timeoutCollection + database: *database + collectionName: *timeoutCollectionName + # Do an operation with a large timeout to ensure the servers are discovered. + - name: insertOne + object: *timeoutCollection + arguments: + document: { _id: 1 } + timeoutMS: 100000 + # Wait until short-circuiting has been enabled (at least 2 RTT measurements). + - name: wait + object: testRunner + arguments: + ms: 1000 + # Do an operation with timeoutCollection which will error. + - name: insertOne + object: *timeoutCollection + arguments: + document: { _id: 2 } + expectError: + isTimeoutError: true + # Do an operation with timeoutCollection which will error. + - name: insertOne + object: *timeoutCollection + arguments: + document: { _id: 3 } + expectError: + isTimeoutError: true + # Do an operation with timeoutCollection which will error. + - name: insertOne + object: *timeoutCollection + arguments: + document: { _id: 4 } + expectError: + isTimeoutError: true + expectEvents: + # There should only be one event, which corresponds to the first + # insertOne call. For the subsequent insertOne calls, drivers should + # fail client-side. + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *timeoutCollectionName + + - description: "short-circuit is not enabled with only 1 RTT measurement" + operations: + # Artificially increase the server RTT to ~300ms. + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: "alwaysOn" + data: + failCommands: ["hello", "isMaster"] + appName: &appName reduceMaxTimeMSTest + blockConnection: true + blockTimeMS: 100 + # Create a client with the app name specified in the fail point and timeoutMS lower than blockTimeMS. + # Also create database and collection entities derived from the new client. + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + uriOptions: + appName: *appName + w: 1 # Override server's w:majority default to speed up the test. + timeoutMS: 90 + heartbeatFrequencyMS: 100000 # Override heartbeatFrequencyMS to ensure only 1 RTT is recorded. + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &timeoutCollection timeoutCollection + database: *database + collectionName: *timeoutCollectionName + # Do an operation with a large timeout to ensure the servers are discovered. + - name: insertOne + object: *timeoutCollection + arguments: + document: { _id: 1 } + timeoutMS: 100000 + # Do an operation with timeoutCollection which will succeed. If this + # fails it indicates the driver mistakenly used the min RTT even though + # there has only been one sample. + - name: insertOne + object: *timeoutCollection + arguments: + document: { _id: 2 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *timeoutCollectionName + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *timeoutCollectionName + maxTimeMS: { $$lte: 450 } diff --git a/specifications/client-side-operations-timeout/tests/convenient-transactions.json b/specifications/client-side-operations-timeout/tests/convenient-transactions.json new file mode 100644 index 00000000000..f9d03429db9 --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/convenient-transactions.json @@ -0,0 +1,211 @@ +{ + "description": "timeoutMS behaves correctly for the withTransaction API", + "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 500, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "withTransaction raises a client-side error if timeoutMS is overridden inside the callback", + "operations": [ + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + }, + "session": "session", + "timeoutMS": 100 + }, + "expectError": { + "isClientError": true + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [] + } + ] + }, + { + "description": "timeoutMS is not refreshed for each operation in the callback", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 300 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + }, + "session": "session" + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + }, + "session": "session" + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/specifications/client-side-operations-timeout/tests/convenient-transactions.yml b/specifications/client-side-operations-timeout/tests/convenient-transactions.yml new file mode 100644 index 00000000000..55b72481dfb --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/convenient-transactions.yml @@ -0,0 +1,112 @@ +description: "timeoutMS behaves correctly for the withTransaction API" + +schemaVersion: "1.26" + +runOnRequirements: + - minServerVersion: "4.4" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 500 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + - session: + id: &session session + client: *client + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + - description: "withTransaction raises a client-side error if timeoutMS is overridden inside the callback" + operations: + - name: withTransaction + object: *session + arguments: + callback: + - name: insertOne + object: *collection + arguments: + document: { _id: 1 } + session: *session + timeoutMS: 100 + expectError: + isClientError: true + expectError: + isClientError: true + expectEvents: + # The only operation run fails with a client-side error, so there should be no events for the client. + - client: *client + events: [] + + - description: "timeoutMS is not refreshed for each operation in the callback" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 300 + - name: withTransaction + object: *session + arguments: + callback: + - name: insertOne + object: *collection + arguments: + document: { _id: 1 } + session: *session + - name: insertOne + object: *collection + arguments: + document: { _id: 2 } + session: *session + expectError: + isTimeoutError: true + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } diff --git a/specifications/client-side-operations-timeout/tests/deprecated-options.json b/specifications/client-side-operations-timeout/tests/deprecated-options.json new file mode 100644 index 00000000000..647e1bf792d --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/deprecated-options.json @@ -0,0 +1,7222 @@ +{ + "description": "operations ignore deprecated timeout options if timeoutMS is set", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.2", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "commitTransaction ignores socketTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 20 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "commitTransaction ignores wTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "commitTransaction ignores maxCommitTimeMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTransactionOptions": { + "maxCommitTimeMS": 5000 + } + } + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "abortTransaction ignores socketTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 20 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "abortTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "abortTransaction ignores wTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "aggregate" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + }, + { + "name": "abortTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "withTransaction ignores socketTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 20 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000, + "callback": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "withTransaction ignores wTimeoutMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 10000, + "callback": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "withTransaction ignores maxCommitTimeMS if timeoutMS is set", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTransactionOptions": { + "maxCommitTimeMS": 5000 + } + } + } + } + ] + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 1000, + "callback": [ + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {}, + "session": "session" + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "fieldName": "x", + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "document": { + "x": 1 + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "documents": [ + { + "x": 1 + } + ] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "timeoutMS": 100000, + "name": "x_1" + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "name": "x_1" + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "timeoutMS": 100000, + "name": "x_1" + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 100000, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + }, + { + "description": "socketTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 1 + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 5 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1", + "timeoutMS": 100000 + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ] + }, + { + "description": "wTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "wTimeoutMS": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 100000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "writeConcern": { + "$$exists": false + }, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS is ignored if timeoutMS is set - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "bucket": { + "id": "bucket", + "database": "database" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ] + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "maxTimeMS": 5000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$lte": 1000 + } + } + } + } + ] + } + ] + } + ] +} diff --git a/specifications/client-side-operations-timeout/tests/deprecated-options.yml b/specifications/client-side-operations-timeout/tests/deprecated-options.yml new file mode 100644 index 00000000000..e3378d5fa8f --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/deprecated-options.yml @@ -0,0 +1,4008 @@ +description: "operations ignore deprecated timeout options if timeoutMS is set" + +schemaVersion: "1.9" + +# Most tests in this file can be executed against any server version, but some tests execute operations that are only +# available on higher server versions (e.g. abortTransaction). To avoid too many special cases in templated tests, the +# min server version is set to 4.2 for all. +runOnRequirements: + - minServerVersion: "4.2" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + +initialData: + - collectionName: &collectionName coll + databaseName: &databaseName test + documents: [] + +tests: + # For each operation, run these tests: + # + # 1. socketTimeoutMS is ignored if timeoutMS is set. The test creates a client with socketTimeoutMS=1, configures and + # a failpoint to block the operation for 5ms, runs the operation with timeoutMS=10000, and expects it to succeed. + # + # 2. wTimeoutMS is ignored if timeoutMS is set. The test creates a client with wTimeoutMS=1, runs the operation with + # timeoutMS=10000, expects the operation to succeed, and uses command monitoring expectations to assert that the + # command sent to the server does not contain a writeConcern field. + # + # 3. If the operation supports maxTimeMS, it ignores maxTimeMS if timeoutMS is set. The test executes the operation + # with timeoutMS=1000 and maxTimeMS=5000. It expects the operation to succeed and uses command monitoring expectations + # to assert that the actual maxTimeMS value sent was less than or equal to 100, thereby asserting that it was + # actually derived from timeoutMS. + + # Tests for commitTransaction. These are not included in the operations loop because the tests need to execute + # additional "startTransaction" and "insertOne" operations to establish a server-side transaction. There is also one + # additional test to assert that maxCommitTimeMS is ignored if timeoutMS is set. + + - description: "commitTransaction ignores socketTimeoutMS if timeoutMS is set" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + # This test uses 20 instead of 1 like other tests because socketTimeoutMS also applies to the + # operation done to start the server-side transaction and it needs time to succeed. + socketTimeoutMS: 20 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: ["aggregate"] + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["commitTransaction"] + blockConnection: true + blockTimeMS: 5 + - name: startTransaction + object: *session + - name: countDocuments + object: *collection + arguments: + filter: {} + session: *session + - name: commitTransaction + object: *session + arguments: + timeoutMS: 10000 + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "commitTransaction ignores wTimeoutMS if timeoutMS is set" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: ["aggregate"] + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - session: + id: &session session + client: *client + - name: startTransaction + object: *session + - name: countDocuments + object: *collection + arguments: + filter: {} + session: *session + - name: commitTransaction + object: *session + arguments: + timeoutMS: 10000 + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "commitTransaction ignores maxCommitTimeMS if timeoutMS is set" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: ["aggregate"] + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - session: + id: &session session + client: *client + sessionOptions: + defaultTransactionOptions: + maxCommitTimeMS: 5000 + - name: startTransaction + object: *session + - name: countDocuments + object: *collection + arguments: + filter: {} + session: *session + - name: commitTransaction + object: *session + arguments: + timeoutMS: &timeoutMS 1000 + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + # Assert that the final maxTimeMS field is derived from timeoutMS, not maxCommitTimeMS. + maxTimeMS: { $$lte: *timeoutMS } + + # Tests for abortTransaction. These are not included in the operations loop because the tests need to execute + # additional "startTransaction" and "insertOne" operations to establish a server-side transaction. + + - description: "abortTransaction ignores socketTimeoutMS if timeoutMS is set" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + # This test uses 20 instead of 1 like other tests because socketTimeoutMS also applies to the + # operation done to start the server-side transaction and it needs time to succeed. + socketTimeoutMS: 20 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: ["aggregate"] + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["abortTransaction"] + blockConnection: true + blockTimeMS: 5 + - name: startTransaction + object: *session + - name: countDocuments + object: *collection + arguments: + filter: {} + session: *session + - name: abortTransaction + object: *session + arguments: + timeoutMS: 10000 + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "abortTransaction ignores wTimeoutMS if timeoutMS is set" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: ["aggregate"] + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - session: + id: &session session + client: *client + - name: startTransaction + object: *session + - name: countDocuments + object: *collection + arguments: + filter: {} + session: *session + - name: abortTransaction + object: *session + arguments: + timeoutMS: 10000 + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + # Tests for withTransaction. These are not included in the operations loop because the command monitoring + # expectations contain multiple commands. There is also one additional test to assert that maxCommitTimeMS is ignored + # if timeoutMS is set. + + - description: "withTransaction ignores socketTimeoutMS if timeoutMS is set" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + # This test uses 20 instead of 1 like other tests because socketTimeoutMS also applies to the + # operation done to start the server-side transaction and it needs time to succeed. + socketTimeoutMS: 20 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["commitTransaction"] + blockConnection: true + blockTimeMS: 5 + - name: withTransaction + object: *session + arguments: + timeoutMS: 10000 + callback: + - name: countDocuments + object: *collection + arguments: + filter: {} + session: *session + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "withTransaction ignores wTimeoutMS if timeoutMS is set" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - session: + id: &session session + client: *client + - name: withTransaction + object: *session + arguments: + timeoutMS: 10000 + callback: + - name: countDocuments + object: *collection + arguments: + filter: {} + session: *session + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "withTransaction ignores maxCommitTimeMS if timeoutMS is set" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - session: + id: &session session + client: *client + sessionOptions: + defaultTransactionOptions: + maxCommitTimeMS: 5000 + - name: withTransaction + object: *session + arguments: + timeoutMS: &timeoutMS 1000 + callback: + - name: countDocuments + object: *collection + arguments: + filter: {} + session: *session + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + # Assert that the final maxTimeMS field is derived from timeoutMS, not maxCommitTimeMS. + maxTimeMS: { $$lte: *timeoutMS } + + # Tests for operations that can be generated. + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - listDatabases on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 5 + - name: listDatabases + object: *client + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - listDatabases on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: listDatabases + object: *client + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - listDatabaseNames on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 5 + - name: listDatabaseNames + object: *client + arguments: + timeoutMS: 100000 + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - listDatabaseNames on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: listDatabaseNames + object: *client + arguments: + timeoutMS: 100000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 5 + - name: createChangeStream + object: *client + arguments: + timeoutMS: 100000 + pipeline: [] + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: createChangeStream + object: *client + arguments: + timeoutMS: 100000 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - aggregate on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 5 + - name: aggregate + object: *database + arguments: + timeoutMS: 100000 + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - aggregate on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: aggregate + object: *database + arguments: + timeoutMS: 100000 + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - aggregate on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: aggregate + object: *database + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - listCollections on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 5 + - name: listCollections + object: *database + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - listCollections on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: listCollections + object: *database + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - listCollectionNames on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 5 + - name: listCollectionNames + object: *database + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - listCollectionNames on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: listCollectionNames + object: *database + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - runCommand on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["ping"] + blockConnection: true + blockTimeMS: 5 + - name: runCommand + object: *database + arguments: + timeoutMS: 100000 + command: { ping: 1 } + commandName: ping + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - runCommand on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: runCommand + object: *database + arguments: + timeoutMS: 100000 + command: { ping: 1 } + commandName: ping + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: ping + databaseName: *databaseName + command: + ping: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 5 + - name: createChangeStream + object: *database + arguments: + timeoutMS: 100000 + pipeline: [] + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: createChangeStream + object: *database + arguments: + timeoutMS: 100000 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - aggregate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 5 + - name: aggregate + object: *collection + arguments: + timeoutMS: 100000 + pipeline: [] + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - aggregate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: aggregate + object: *collection + arguments: + timeoutMS: 100000 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - aggregate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: aggregate + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - count on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 5 + - name: count + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - count on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: count + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - count on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: count + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - countDocuments on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 5 + - name: countDocuments + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - countDocuments on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: countDocuments + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - estimatedDocumentCount on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 5 + - name: estimatedDocumentCount + object: *collection + arguments: + timeoutMS: 100000 + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - estimatedDocumentCount on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: estimatedDocumentCount + object: *collection + arguments: + timeoutMS: 100000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - estimatedDocumentCount on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: estimatedDocumentCount + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - distinct on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 5 + - name: distinct + object: *collection + arguments: + timeoutMS: 100000 + fieldName: x + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - distinct on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: distinct + object: *collection + arguments: + timeoutMS: 100000 + fieldName: x + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - distinct on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: distinct + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + fieldName: x + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - find on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 5 + - name: find + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - find on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: find + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - find on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: find + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - findOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 5 + - name: findOne + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - findOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: findOne + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - findOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: findOne + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - listIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 5 + - name: listIndexes + object: *collection + arguments: + timeoutMS: 100000 + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - listIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: listIndexes + object: *collection + arguments: + timeoutMS: 100000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - listIndexNames on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 5 + - name: listIndexNames + object: *collection + arguments: + timeoutMS: 100000 + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - listIndexNames on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: listIndexNames + object: *collection + arguments: + timeoutMS: 100000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - createChangeStream on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 5 + - name: createChangeStream + object: *collection + arguments: + timeoutMS: 100000 + pipeline: [] + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - createChangeStream on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: createChangeStream + object: *collection + arguments: + timeoutMS: 100000 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - insertOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 5 + - name: insertOne + object: *collection + arguments: + timeoutMS: 100000 + document: { x: 1 } + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - insertOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: insertOne + object: *collection + arguments: + timeoutMS: 100000 + document: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - insertMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 5 + - name: insertMany + object: *collection + arguments: + timeoutMS: 100000 + documents: + - { x: 1 } + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - insertMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: insertMany + object: *collection + arguments: + timeoutMS: 100000 + documents: + - { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - deleteOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 5 + - name: deleteOne + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - deleteOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: deleteOne + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - deleteMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 5 + - name: deleteMany + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - deleteMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: deleteMany + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - replaceOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 5 + - name: replaceOne + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + replacement: { x: 1 } + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - replaceOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: replaceOne + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - updateOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 5 + - name: updateOne + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + update: { $set: { x: 1 } } + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - updateOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: updateOne + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - updateMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 5 + - name: updateMany + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + update: { $set: { x: 1 } } + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - updateMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: updateMany + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - findOneAndDelete on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 5 + - name: findOneAndDelete + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - findOneAndDelete on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: findOneAndDelete + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - findOneAndDelete on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: findOneAndDelete + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - findOneAndReplace on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 5 + - name: findOneAndReplace + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + replacement: { x: 1 } + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - findOneAndReplace on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: findOneAndReplace + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - findOneAndReplace on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: findOneAndReplace + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - findOneAndUpdate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 5 + - name: findOneAndUpdate + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + update: { $set: { x: 1 } } + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - findOneAndUpdate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: findOneAndUpdate + object: *collection + arguments: + timeoutMS: 100000 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - findOneAndUpdate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: findOneAndUpdate + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - bulkWrite on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 5 + - name: bulkWrite + object: *collection + arguments: + timeoutMS: 100000 + requests: + - insertOne: + document: { _id: 1 } + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - bulkWrite on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: bulkWrite + object: *collection + arguments: + timeoutMS: 100000 + requests: + - insertOne: + document: { _id: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + + - description: "socketTimeoutMS is ignored if timeoutMS is set - createIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 5 + - name: createIndex + object: *collection + arguments: + timeoutMS: 100000 + keys: { x: 1 } + name: "x_1" + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - createIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: createIndex + object: *collection + arguments: + timeoutMS: 100000 + keys: { x: 1 } + name: "x_1" + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + command: + createIndexes: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - createIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: createIndex + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + keys: { x: 1 } + name: "x_1" + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + command: + createIndexes: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - dropIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 5 + # Create the index first so dropIndex doesn't return IndexNotFound on + # servers prior to 8.3. + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + timeoutMS: 100000 + name: "x_1" + - name: dropIndex + object: *collection + arguments: + timeoutMS: 100000 + name: "x_1" + - description: "wTimeoutMS is ignored if timeoutMS is set - dropIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + # Create the index first so dropIndex doesn't return IndexNotFound on + # servers prior to 8.3. + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + timeoutMS: 100000 + name: "x_1" + - name: dropIndex + object: *collection + arguments: + timeoutMS: 100000 + name: "x_1" + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - dropIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + # Create the index first so dropIndex doesn't return IndexNotFound on + # servers prior to 8.3. + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + name: "x_1" + - name: dropIndex + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + name: "x_1" + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + + - description: "socketTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + socketTimeoutMS: 1 + useMultipleMongoses: false + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 5 + # Create the index first so dropIndex doesn't return IndexNotFound on + # servers prior to 8.3. + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + name: "x_1" + timeoutMS: 100000 + - name: dropIndexes + object: *collection + arguments: + timeoutMS: 100000 + + + + - description: "wTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + wTimeoutMS: 1 + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: dropIndexes + object: *collection + arguments: + timeoutMS: 100000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + writeConcern: { $$exists: false } + maxTimeMS: { $$type: ["int", "long"] } + + - description: "maxTimeMS is ignored if timeoutMS is set - dropIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - bucket: + id: &bucket bucket + database: *database + - session: + id: &session session + client: *client + - name: dropIndexes + object: *collection + arguments: + timeoutMS: &timeoutMS 1000 + maxTimeMS: 5000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$lte: *timeoutMS } + diff --git a/specifications/client-side-operations-timeout/tests/error-transformations.json b/specifications/client-side-operations-timeout/tests/error-transformations.json new file mode 100644 index 00000000000..89be49f0a4f --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/error-transformations.json @@ -0,0 +1,182 @@ +{ + "description": "MaxTimeMSExpired server errors are transformed into a custom timeout error", + "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "basic MaxTimeMSExpired error is transformed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 50 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "write concern error MaxTimeMSExpired is transformed", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "writeConcernError": { + "code": 50, + "errmsg": "maxTimeMS expired" + } + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + } + ] +} diff --git a/specifications/client-side-operations-timeout/tests/error-transformations.yml b/specifications/client-side-operations-timeout/tests/error-transformations.yml new file mode 100644 index 00000000000..0ea9373b7dd --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/error-transformations.yml @@ -0,0 +1,98 @@ +description: "MaxTimeMSExpired server errors are transformed into a custom timeout error" + +schemaVersion: "1.26" + +# failCommand is available on 4.0 for replica sets and 4.2 for sharded clusters. +runOnRequirements: + - minServerVersion: "4.0" + topologies: ["replicaset"] + - minServerVersion: "4.2" + topologies: ["sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + # A server response like {ok: 0, code: 50, ...} is transformed. + - description: "basic MaxTimeMSExpired error is transformed" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + errorCode: 50 + - name: insertOne + object: *collection + arguments: + document: { _id: 1 } + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + + # A server response like {ok: 1, writeConcernError: {code: 50, ...}} is transformed. + - description: "write concern error MaxTimeMSExpired is transformed" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + writeConcernError: + code: 50 + errmsg: "maxTimeMS expired" + - name: insertOne + object: *collection + arguments: + document: { _id: 1 } + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } diff --git a/specifications/client-side-operations-timeout/tests/global-timeoutMS.json b/specifications/client-side-operations-timeout/tests/global-timeoutMS.json new file mode 100644 index 00000000000..9d8046d1bfa --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/global-timeoutMS.json @@ -0,0 +1,5906 @@ +{ + "description": "timeoutMS can be configured on a MongoClient", + "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured on a MongoClient - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listDatabases on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listDatabaseNames on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on client", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoClient - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 250, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 350 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoClient - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 0 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/specifications/client-side-operations-timeout/tests/global-timeoutMS.yml b/specifications/client-side-operations-timeout/tests/global-timeoutMS.yml new file mode 100644 index 00000000000..8cb1013166a --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/global-timeoutMS.yml @@ -0,0 +1,3307 @@ +# Tests in this file are generated from global-timeoutMS.yml.template. + +description: "timeoutMS can be configured on a MongoClient" + +schemaVersion: "1.26" + +runOnRequirements: + - minServerVersion: "4.4" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + +initialData: + - collectionName: &collectionName coll + databaseName: &databaseName test + documents: [] + +tests: + # For each operation, we execute two tests: + # + # 1. timeoutMS can be configured to a non-zero value on a MongoClient and is inherited by the operation. Each test + # constructs a client entity with timeoutMS=250 and configures a fail point to block the operation for 350ms so + # execution results in a timeout error. + # + # 2. timeoutMS can be set to 0 for a MongoClient. Each test constructs a client entity with timeoutMS=0 and + # configures a fail point to block the operation for 15ms. The tests expect the operation to succeed and the command + # sent to not contain a maxTimeMS field. + + - description: "timeoutMS can be configured on a MongoClient - listDatabases on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 350 + - name: listDatabases + object: *client + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - listDatabases on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 15 + - name: listDatabases + object: *client + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - listDatabaseNames on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 350 + - name: listDatabaseNames + object: *client + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - listDatabaseNames on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 15 + - name: listDatabaseNames + object: *client + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - createChangeStream on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 350 + - name: createChangeStream + object: *client + arguments: + pipeline: [] + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - createChangeStream on client" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *client + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - aggregate on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 350 + - name: aggregate + object: *database + arguments: + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - aggregate on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *database + arguments: + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - listCollections on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 350 + - name: listCollections + object: *database + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - listCollections on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 15 + - name: listCollections + object: *database + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - listCollectionNames on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 350 + - name: listCollectionNames + object: *database + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - listCollectionNames on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 15 + - name: listCollectionNames + object: *database + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - runCommand on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["ping"] + blockConnection: true + blockTimeMS: 350 + - name: runCommand + object: *database + arguments: + command: { ping: 1 } + commandName: ping + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: ping + databaseName: *databaseName + command: + ping: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - runCommand on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["ping"] + blockConnection: true + blockTimeMS: 15 + - name: runCommand + object: *database + arguments: + command: { ping: 1 } + commandName: ping + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: ping + databaseName: *databaseName + command: + ping: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - createChangeStream on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 350 + - name: createChangeStream + object: *database + arguments: + pipeline: [] + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - createChangeStream on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *database + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - aggregate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 350 + - name: aggregate + object: *collection + arguments: + pipeline: [] + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - aggregate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *collection + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - count on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 350 + - name: count + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - count on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: count + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - countDocuments on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 350 + - name: countDocuments + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - countDocuments on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: countDocuments + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - estimatedDocumentCount on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 350 + - name: estimatedDocumentCount + object: *collection + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - estimatedDocumentCount on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: estimatedDocumentCount + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - distinct on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 350 + - name: distinct + object: *collection + arguments: + fieldName: x + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - distinct on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 15 + - name: distinct + object: *collection + arguments: + fieldName: x + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - find on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 350 + - name: find + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - find on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: find + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - findOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 350 + - name: findOne + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - findOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: findOne + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - listIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 350 + - name: listIndexes + object: *collection + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - listIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexes + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - listIndexNames on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 350 + - name: listIndexNames + object: *collection + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - listIndexNames on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexNames + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - createChangeStream on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 350 + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - createChangeStream on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - insertOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 350 + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - insertOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - insertMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 350 + - name: insertMany + object: *collection + arguments: + documents: + - { x: 1 } + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - insertMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertMany + object: *collection + arguments: + documents: + - { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - deleteOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 350 + - name: deleteOne + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - deleteOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteOne + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - deleteMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 350 + - name: deleteMany + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - deleteMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteMany + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - replaceOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 350 + - name: replaceOne + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - replaceOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: replaceOne + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - updateOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 350 + - name: updateOne + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - updateOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateOne + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - updateMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 350 + - name: updateMany + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - updateMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateMany + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - findOneAndDelete on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 350 + - name: findOneAndDelete + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - findOneAndDelete on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndDelete + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - findOneAndReplace on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 350 + - name: findOneAndReplace + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - findOneAndReplace on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndReplace + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - findOneAndUpdate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 350 + - name: findOneAndUpdate + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - findOneAndUpdate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndUpdate + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - bulkWrite on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 350 + - name: bulkWrite + object: *collection + arguments: + requests: + - insertOne: + document: { _id: 1 } + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - bulkWrite on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: bulkWrite + object: *collection + arguments: + requests: + - insertOne: + document: { _id: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - createIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 350 + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + name: "x_1" + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + command: + createIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - createIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + name: "x_1" + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + command: + createIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - dropIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 350 + - name: dropIndex + object: *collection + arguments: + name: "x_1" + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - dropIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + # Create the index first so dropIndex doesn't return IndexNotFound on + # servers prior to 8.3. + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + name: "x_1" + - name: dropIndex + object: *collection + arguments: + name: "x_1" + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoClient - dropIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 250 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + # Use "times: 2" to workaround a quirk in Python on Windows where + # socket I/O can timeout ~20ms earlier than expected. With + # "times: 1" the retry would succeed within the remaining ~20ms. + mode: { times: 2 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 350 + - name: dropIndexes + object: *collection + + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoClient - dropIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + uriOptions: + timeoutMS: 0 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: dropIndexes + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$exists: false } + diff --git a/specifications/client-side-operations-timeout/tests/legacy-timeouts.json b/specifications/client-side-operations-timeout/tests/legacy-timeouts.json new file mode 100644 index 00000000000..535425c934a --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/legacy-timeouts.json @@ -0,0 +1,379 @@ +{ + "description": "legacy timeouts continue to work if timeoutMS is not set", + "schemaVersion": "1.0", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "socketTimeoutMS is not used to derive a maxTimeMS command field", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "socketTimeoutMS": 50000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "waitQueueTimeoutMS is not used to derive a maxTimeMS command field", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "waitQueueTimeoutMS": 50000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "wTimeoutMS is not used to derive a maxTimeMS command field", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "wTimeoutMS": 50000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + }, + "writeConcern": { + "wtimeout": 50000 + } + } + } + } + ] + } + ] + }, + { + "description": "maxTimeMS option is used directly as the maxTimeMS field on a command", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "maxTimeMS": 50000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": 50000 + } + } + } + ] + } + ] + }, + { + "description": "maxCommitTimeMS option is used directly as the maxTimeMS field on a commitTransaction command", + "runOnRequirements": [ + { + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTransactionOptions": { + "maxCommitTimeMS": 1000 + } + } + } + } + ] + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + }, + "session": "session" + } + }, + { + "name": "commitTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": 1000 + } + } + } + ] + } + ] + } + ] +} diff --git a/specifications/client-side-operations-timeout/tests/legacy-timeouts.yml b/specifications/client-side-operations-timeout/tests/legacy-timeouts.yml new file mode 100644 index 00000000000..81c48f7c4f2 --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/legacy-timeouts.yml @@ -0,0 +1,204 @@ +description: "legacy timeouts continue to work if timeoutMS is not set" + +schemaVersion: "1.0" + +runOnRequirements: + - minServerVersion: "4.4" + +initialData: + - collectionName: &collectionName coll + databaseName: &databaseName test + documents: [] + +tests: + - description: "socketTimeoutMS is not used to derive a maxTimeMS command field" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + observeEvents: + - commandStartedEvent + uriOptions: + socketTimeoutMS: 50000 + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + + - description: "waitQueueTimeoutMS is not used to derive a maxTimeMS command field" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + observeEvents: + - commandStartedEvent + uriOptions: + waitQueueTimeoutMS: 50000 + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + + - description: "wTimeoutMS is not used to derive a maxTimeMS command field" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + observeEvents: + - commandStartedEvent + uriOptions: + wTimeoutMS: &wTimeoutMS 50000 + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + writeConcern: + wtimeout: *wTimeoutMS + + # If the maxTimeMS option is set for a specific command, it should be used as the maxTimeMS command field without any + # modifications. This is different from timeoutMS because in that case, drivers subtract the target server's min + # RTT from the remaining timeout to derive a maxTimeMS field. + - description: "maxTimeMS option is used directly as the maxTimeMS field on a command" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: estimatedDocumentCount + object: *collection + arguments: + maxTimeMS: &maxTimeMS 50000 + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: *maxTimeMS + + # Same test as above but with the maxCommitTimeMS option. + - description: "maxCommitTimeMS option is used directly as the maxTimeMS field on a commitTransaction command" + runOnRequirements: + # Note: minServerVersion is specified in top-level runOnRequirements + - topologies: ["replicaset", "sharded"] + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + observeEvents: + - commandStartedEvent + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - session: + id: &session session + client: *client + sessionOptions: + defaultTransactionOptions: + maxCommitTimeMS: &maxCommitTimeMS 1000 + - name: startTransaction + object: *session + - name: insertOne + object: *collection + arguments: + document: { _id: 1 } + session: *session + - name: commitTransaction + object: *session + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + maxTimeMS: *maxCommitTimeMS diff --git a/specifications/client-side-operations-timeout/tests/override-collection-timeoutMS.json b/specifications/client-side-operations-timeout/tests/override-collection-timeoutMS.json new file mode 100644 index 00000000000..c56d45bcbbe --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/override-collection-timeoutMS.json @@ -0,0 +1,3522 @@ +{ + "description": "timeoutMS can be overridden for a MongoCollection", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured on a MongoCollection - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoCollection - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 1000 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoCollection - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll", + "collectionOptions": { + "timeoutMS": 0 + } + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/specifications/client-side-operations-timeout/tests/override-collection-timeoutMS.yml b/specifications/client-side-operations-timeout/tests/override-collection-timeoutMS.yml new file mode 100644 index 00000000000..07e2a144294 --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/override-collection-timeoutMS.yml @@ -0,0 +1,1891 @@ +# Tests in this file are generated from override-collection-timeoutMS.yml.template. + +description: "timeoutMS can be overridden for a MongoCollection" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 10 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: &databaseName test + +initialData: + - collectionName: &collectionName coll + databaseName: *databaseName + documents: [] + +tests: + # For each collection-level operation, we execute two tests: + # + # 1. timeoutMS can be overridden to a non-zero value for a MongoCollection. Each test uses the client entity defined + # above to construct a collection entity with timeoutMS=1000 and configures a fail point to block the operation for + # 15ms so the operation succeeds. + # + # 2. timeoutMS can be overridden to 0 for a MongoCollection. Each test constructs a collection entity with + # timeoutMS=0 using the global client entity and configures a fail point to block the operation for 15ms. The + # operation should succeed and the command sent to the server should not contain a maxTimeMS field. + + - description: "timeoutMS can be configured on a MongoCollection - aggregate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *collection + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - aggregate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *collection + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - count on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: count + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - count on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: count + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - countDocuments on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: countDocuments + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - countDocuments on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: countDocuments + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - estimatedDocumentCount on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: estimatedDocumentCount + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - estimatedDocumentCount on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: estimatedDocumentCount + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - distinct on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 15 + - name: distinct + object: *collection + arguments: + fieldName: x + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - distinct on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 15 + - name: distinct + object: *collection + arguments: + fieldName: x + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - find on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: find + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - find on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: find + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - findOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: findOne + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - findOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: findOne + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - listIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexes + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - listIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexes + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - listIndexNames on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexNames + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - listIndexNames on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexNames + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - createChangeStream on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - createChangeStream on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - insertOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - insertOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - insertMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertMany + object: *collection + arguments: + documents: + - { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - insertMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertMany + object: *collection + arguments: + documents: + - { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - deleteOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteOne + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - deleteOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteOne + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - deleteMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteMany + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - deleteMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteMany + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - replaceOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: replaceOne + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - replaceOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: replaceOne + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - updateOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateOne + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - updateOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateOne + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - updateMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateMany + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - updateMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateMany + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - findOneAndDelete on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndDelete + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - findOneAndDelete on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndDelete + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - findOneAndReplace on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndReplace + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - findOneAndReplace on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndReplace + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - findOneAndUpdate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndUpdate + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - findOneAndUpdate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndUpdate + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - bulkWrite on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: bulkWrite + object: *collection + arguments: + requests: + - insertOne: + document: { _id: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - bulkWrite on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: bulkWrite + object: *collection + arguments: + requests: + - insertOne: + document: { _id: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - createIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + name: "x_1" + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + command: + createIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - createIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + name: "x_1" + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + command: + createIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - dropIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + # Create the index first so dropIndex doesn't return IndexNotFound on + # servers prior to 8.3. + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + name: "x_1" + - name: dropIndex + object: *collection + arguments: + name: "x_1" + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - dropIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + # Create the index first so dropIndex doesn't return IndexNotFound on + # servers prior to 8.3. + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + name: "x_1" + - name: dropIndex + object: *collection + arguments: + name: "x_1" + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoCollection - dropIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 1000 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: dropIndexes + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoCollection - dropIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + collectionOptions: + timeoutMS: 0 + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: dropIndexes + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$exists: false } + diff --git a/specifications/client-side-operations-timeout/tests/override-database-timeoutMS.json b/specifications/client-side-operations-timeout/tests/override-database-timeoutMS.json new file mode 100644 index 00000000000..11ff7a59fd4 --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/override-database-timeoutMS.json @@ -0,0 +1,4646 @@ +{ + "description": "timeoutMS can be overridden for a MongoDatabase", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured on a MongoDatabase - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - aggregate on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - listCollections on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - listCollectionNames on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - runCommand on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - createChangeStream on database", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - aggregate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - count on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - countDocuments on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - estimatedDocumentCount on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - distinct on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - find on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - findOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - listIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - listIndexNames on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - createChangeStream on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - insertOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - insertMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - deleteOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - deleteMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - replaceOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - updateOne on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - updateMany on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndDelete on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndReplace on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - findOneAndUpdate on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - bulkWrite on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - createIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - dropIndex on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "name": "x_1" + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured on a MongoDatabase - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 1000 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 on a MongoDatabase - dropIndexes on collection", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test", + "databaseOptions": { + "timeoutMS": 0 + } + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ] + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/specifications/client-side-operations-timeout/tests/override-database-timeoutMS.yml b/specifications/client-side-operations-timeout/tests/override-database-timeoutMS.yml new file mode 100644 index 00000000000..4ded17164d5 --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/override-database-timeoutMS.yml @@ -0,0 +1,2501 @@ +# Tests in this file are generated from override-database-timeoutMS.yml.template. + +description: "timeoutMS can be overridden for a MongoDatabase" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 10 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + +initialData: + - collectionName: &collectionName coll + databaseName: &databaseName test + documents: [] + +tests: + # For each database-level operation, we execute two tests: + # + # 1. timeoutMS can be overridden to a non-zero value for a MongoDatabase. Each test constructs uses the client entity + # defined above to construct a database entity with timeoutMS=1000 and configures a fail point to block the operation + # for 15ms so the operation succeeds. + # + # 2. timeoutMS can be overridden to 0 for a MongoDatabase. Each test constructs a database entity with timeoutMS=0 + # using the global client entity and configures a fail point to block the operation for 15ms. The operation should + # succeed and the command sent to the server should not contain a maxTimeMS field. + + - description: "timeoutMS can be configured on a MongoDatabase - aggregate on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *database + arguments: + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - aggregate on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *database + arguments: + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - listCollections on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 15 + - name: listCollections + object: *database + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - listCollections on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 15 + - name: listCollections + object: *database + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - listCollectionNames on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 15 + - name: listCollectionNames + object: *database + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - listCollectionNames on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 15 + - name: listCollectionNames + object: *database + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - runCommand on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["ping"] + blockConnection: true + blockTimeMS: 15 + - name: runCommand + object: *database + arguments: + command: { ping: 1 } + commandName: ping + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: ping + databaseName: *databaseName + command: + ping: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - runCommand on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["ping"] + blockConnection: true + blockTimeMS: 15 + - name: runCommand + object: *database + arguments: + command: { ping: 1 } + commandName: ping + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: ping + databaseName: *databaseName + command: + ping: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - createChangeStream on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *database + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - createChangeStream on database" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *database + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - aggregate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *collection + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - aggregate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *collection + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - count on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: count + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - count on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: count + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - countDocuments on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: countDocuments + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - countDocuments on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: countDocuments + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - estimatedDocumentCount on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: estimatedDocumentCount + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - estimatedDocumentCount on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: estimatedDocumentCount + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - distinct on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 15 + - name: distinct + object: *collection + arguments: + fieldName: x + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - distinct on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 15 + - name: distinct + object: *collection + arguments: + fieldName: x + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - find on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: find + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - find on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: find + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - findOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: findOne + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - findOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: findOne + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - listIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexes + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - listIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexes + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - listIndexNames on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexNames + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - listIndexNames on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexNames + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - createChangeStream on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - createChangeStream on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - insertOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - insertOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - insertMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertMany + object: *collection + arguments: + documents: + - { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - insertMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertMany + object: *collection + arguments: + documents: + - { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - deleteOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteOne + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - deleteOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteOne + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - deleteMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteMany + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - deleteMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteMany + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - replaceOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: replaceOne + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - replaceOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: replaceOne + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - updateOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateOne + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - updateOne on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateOne + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - updateMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateMany + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - updateMany on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateMany + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - findOneAndDelete on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndDelete + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - findOneAndDelete on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndDelete + object: *collection + arguments: + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - findOneAndReplace on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndReplace + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - findOneAndReplace on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndReplace + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - findOneAndUpdate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndUpdate + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - findOneAndUpdate on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndUpdate + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - bulkWrite on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: bulkWrite + object: *collection + arguments: + requests: + - insertOne: + document: { _id: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - bulkWrite on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: bulkWrite + object: *collection + arguments: + requests: + - insertOne: + document: { _id: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - createIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + name: "x_1" + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + command: + createIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - createIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + name: "x_1" + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + command: + createIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - dropIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + # Create the index first so dropIndex doesn't return IndexNotFound on + # servers prior to 8.3. + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + name: "x_1" + - name: dropIndex + object: *collection + arguments: + name: "x_1" + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - dropIndex on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + # Create the index first so dropIndex doesn't return IndexNotFound on + # servers prior to 8.3. + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + name: "x_1" + - name: dropIndex + object: *collection + arguments: + name: "x_1" + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured on a MongoDatabase - dropIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 1000 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: dropIndexes + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 on a MongoDatabase - dropIndexes on collection" + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - database: + id: &database database + client: *client + databaseName: *databaseName + databaseOptions: + timeoutMS: 0 + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: dropIndexes + object: *collection + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$exists: false } + diff --git a/specifications/client-side-operations-timeout/tests/override-operation-timeoutMS.json b/specifications/client-side-operations-timeout/tests/override-operation-timeoutMS.json new file mode 100644 index 00000000000..f33f876137f --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/override-operation-timeoutMS.json @@ -0,0 +1,3605 @@ +{ + "description": "timeoutMS can be overridden for an operation", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 10 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be configured for an operation - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - runCommand on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - runCommand on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 0, + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - listIndexNames on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - listIndexNames on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "listIndexNames", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - deleteMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - deleteMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "deleteMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - updateMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - updateMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "updateMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - createIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - createIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "createIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "keys": { + "x": 1 + }, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test", + "command": { + "createIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - dropIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "timeoutMS": 1000, + "name": "x_1" + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - dropIndex on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "createIndex", + "object": "collection", + "arguments": { + "keys": { + "x": 1 + }, + "timeoutMS": 0, + "name": "x_1" + } + }, + { + "name": "dropIndex", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "name": "x_1" + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "createIndexes", + "databaseName": "test" + } + }, + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be configured for an operation - dropIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS can be set to 0 for an operation - dropIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "dropIndexes" + ], + "blockConnection": true, + "blockTimeMS": 15 + } + } + } + }, + { + "name": "dropIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "dropIndexes", + "databaseName": "test", + "command": { + "dropIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/specifications/client-side-operations-timeout/tests/override-operation-timeoutMS.yml b/specifications/client-side-operations-timeout/tests/override-operation-timeoutMS.yml new file mode 100644 index 00000000000..370e9393cfe --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/override-operation-timeoutMS.yml @@ -0,0 +1,1936 @@ +# Tests in this file are generated from override-operation-timeoutMS.yml.template. + +description: "timeoutMS can be overridden for an operation" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 10 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + # For each level operation, we execute two tests: + # + # 1. timeoutMS can be overridden to a non-zero value for an operation. Each test executes an operation using one of + # the entities defined above with an overridden timeoutMS=1000 and configures a fail point to block the operation for + # 15ms so the operation succeeds. + # + # 2. timeoutMS can be overridden to 0 for an operation. Each test executes an operation using the entities defined + # above with an overridden timeoutMS=0 so the operation succeeds. + + - description: "timeoutMS can be configured for an operation - listDatabases on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 15 + - name: listDatabases + object: *client + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - listDatabases on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 15 + - name: listDatabases + object: *client + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - listDatabaseNames on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 15 + - name: listDatabaseNames + object: *client + arguments: + timeoutMS: 1000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - listDatabaseNames on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 15 + - name: listDatabaseNames + object: *client + arguments: + timeoutMS: 0 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - createChangeStream on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *client + arguments: + timeoutMS: 1000 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - createChangeStream on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *client + arguments: + timeoutMS: 0 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - aggregate on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *database + arguments: + timeoutMS: 1000 + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - aggregate on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *database + arguments: + timeoutMS: 0 + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - listCollections on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 15 + - name: listCollections + object: *database + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - listCollections on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 15 + - name: listCollections + object: *database + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - listCollectionNames on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 15 + - name: listCollectionNames + object: *database + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - listCollectionNames on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 15 + - name: listCollectionNames + object: *database + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - runCommand on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["ping"] + blockConnection: true + blockTimeMS: 15 + - name: runCommand + object: *database + arguments: + timeoutMS: 1000 + command: { ping: 1 } + commandName: ping + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: ping + databaseName: *databaseName + command: + ping: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - runCommand on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["ping"] + blockConnection: true + blockTimeMS: 15 + - name: runCommand + object: *database + arguments: + timeoutMS: 0 + command: { ping: 1 } + commandName: ping + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: ping + databaseName: *databaseName + command: + ping: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - createChangeStream on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *database + arguments: + timeoutMS: 1000 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - createChangeStream on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *database + arguments: + timeoutMS: 0 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - aggregate on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *collection + arguments: + timeoutMS: 1000 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - aggregate on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: aggregate + object: *collection + arguments: + timeoutMS: 0 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - count on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: count + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - count on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: count + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - countDocuments on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: countDocuments + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - countDocuments on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: countDocuments + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - estimatedDocumentCount on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: estimatedDocumentCount + object: *collection + arguments: + timeoutMS: 1000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - estimatedDocumentCount on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 15 + - name: estimatedDocumentCount + object: *collection + arguments: + timeoutMS: 0 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - distinct on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 15 + - name: distinct + object: *collection + arguments: + timeoutMS: 1000 + fieldName: x + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - distinct on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 15 + - name: distinct + object: *collection + arguments: + timeoutMS: 0 + fieldName: x + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - find on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: find + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - find on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: find + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - findOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: findOne + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - findOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 15 + - name: findOne + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - listIndexes on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexes + object: *collection + arguments: + timeoutMS: 1000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - listIndexes on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexes + object: *collection + arguments: + timeoutMS: 0 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - listIndexNames on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexNames + object: *collection + arguments: + timeoutMS: 1000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - listIndexNames on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: listIndexNames + object: *collection + arguments: + timeoutMS: 0 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - createChangeStream on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *collection + arguments: + timeoutMS: 1000 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - createChangeStream on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 15 + - name: createChangeStream + object: *collection + arguments: + timeoutMS: 0 + pipeline: [] + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - insertOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertOne + object: *collection + arguments: + timeoutMS: 1000 + document: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - insertOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertOne + object: *collection + arguments: + timeoutMS: 0 + document: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - insertMany on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertMany + object: *collection + arguments: + timeoutMS: 1000 + documents: + - { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - insertMany on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: insertMany + object: *collection + arguments: + timeoutMS: 0 + documents: + - { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - deleteOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteOne + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - deleteOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteOne + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - deleteMany on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteMany + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - deleteMany on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 15 + - name: deleteMany + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - replaceOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: replaceOne + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - replaceOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: replaceOne + object: *collection + arguments: + timeoutMS: 0 + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - updateOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateOne + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - updateOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateOne + object: *collection + arguments: + timeoutMS: 0 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - updateMany on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateMany + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - updateMany on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 15 + - name: updateMany + object: *collection + arguments: + timeoutMS: 0 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - findOneAndDelete on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndDelete + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - findOneAndDelete on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndDelete + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - findOneAndReplace on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndReplace + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - findOneAndReplace on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndReplace + object: *collection + arguments: + timeoutMS: 0 + filter: {} + replacement: { x: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - findOneAndUpdate on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndUpdate + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - findOneAndUpdate on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 15 + - name: findOneAndUpdate + object: *collection + arguments: + timeoutMS: 0 + filter: {} + update: { $set: { x: 1 } } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - bulkWrite on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: bulkWrite + object: *collection + arguments: + timeoutMS: 1000 + requests: + - insertOne: + document: { _id: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - bulkWrite on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 15 + - name: bulkWrite + object: *collection + arguments: + timeoutMS: 0 + requests: + - insertOne: + document: { _id: 1 } + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - createIndex on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: createIndex + object: *collection + arguments: + timeoutMS: 1000 + keys: { x: 1 } + name: "x_1" + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + command: + createIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - createIndex on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["createIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: createIndex + object: *collection + arguments: + timeoutMS: 0 + keys: { x: 1 } + name: "x_1" + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + command: + createIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - dropIndex on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + # Create the index first so dropIndex doesn't return IndexNotFound on + # servers prior to 8.3. + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + timeoutMS: 1000 + name: "x_1" + - name: dropIndex + object: *collection + arguments: + timeoutMS: 1000 + name: "x_1" + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - dropIndex on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + # Create the index first so dropIndex doesn't return IndexNotFound on + # servers prior to 8.3. + - name: createIndex + object: *collection + arguments: + keys: { x: 1 } + timeoutMS: 0 + name: "x_1" + - name: dropIndex + object: *collection + arguments: + timeoutMS: 0 + name: "x_1" + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: createIndexes + databaseName: *databaseName + + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS can be configured for an operation - dropIndexes on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: dropIndexes + object: *collection + arguments: + timeoutMS: 1000 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "timeoutMS can be set to 0 for an operation - dropIndexes on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["dropIndexes"] + blockConnection: true + blockTimeMS: 15 + - name: dropIndexes + object: *collection + arguments: + timeoutMS: 0 + + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: dropIndexes + databaseName: *databaseName + command: + dropIndexes: *collectionName + maxTimeMS: { $$exists: false } + diff --git a/specifications/client-side-operations-timeout/tests/retryability-legacy-timeouts.json b/specifications/client-side-operations-timeout/tests/retryability-legacy-timeouts.json new file mode 100644 index 00000000000..aded781aeed --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/retryability-legacy-timeouts.json @@ -0,0 +1,3042 @@ +{ + "description": "legacy timeouts behave correctly for retryable operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "socketTimeoutMS": 100 + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "operation succeeds after one socket timeout - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - insertOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - insertMany on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - deleteOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - replaceOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - updateOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOneAndDelete on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOneAndReplace on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOneAndUpdate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - bulkWrite on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listDatabases on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listDatabaseNames on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - createChangeStream on client", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - aggregate on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listCollections on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listCollectionNames on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - createChangeStream on database", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1 + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - aggregate on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - count on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - countDocuments on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - estimatedDocumentCount on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - distinct on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - find on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - findOne on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - listIndexes on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation succeeds after one socket timeout - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + }, + { + "description": "operation fails after two consecutive socket timeouts - createChangeStream on collection", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 125 + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll" + } + } + } + ] + } + ] + } + ] +} diff --git a/specifications/client-side-operations-timeout/tests/retryability-legacy-timeouts.yml b/specifications/client-side-operations-timeout/tests/retryability-legacy-timeouts.yml new file mode 100644 index 00000000000..abcaec6127b --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/retryability-legacy-timeouts.yml @@ -0,0 +1,1676 @@ +# Tests in this file are generated from retryability-legacy-timeouts.yml.template. + +description: "legacy timeouts behave correctly for retryable operations" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + socketTimeoutMS: 100 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + # For each retryable operation, run two tests: + # + # 1. Socket timeouts are retried once - Each test constructs a client entity with socketTimeoutMS=100, configures a + # fail point to block the operation once for 125ms, and expects the operation to succeed. + # + # 2. Operations fail after two consecutive socket timeouts - Same as (1) but the fail point is configured to block + # the operation twice and the test expects the operation to fail. + + - description: "operation succeeds after one socket timeout - insertOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 125 + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - description: "operation fails after two consecutive socket timeouts - insertOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 125 + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - description: "operation succeeds after one socket timeout - insertMany on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 125 + - name: insertMany + object: *collection + arguments: + documents: + - { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - description: "operation fails after two consecutive socket timeouts - insertMany on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 125 + - name: insertMany + object: *collection + arguments: + documents: + - { x: 1 } + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - description: "operation succeeds after one socket timeout - deleteOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 125 + - name: deleteOne + object: *collection + arguments: + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + - description: "operation fails after two consecutive socket timeouts - deleteOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 125 + - name: deleteOne + object: *collection + arguments: + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + - description: "operation succeeds after one socket timeout - replaceOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 125 + - name: replaceOne + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + - description: "operation fails after two consecutive socket timeouts - replaceOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 125 + - name: replaceOne + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + - description: "operation succeeds after one socket timeout - updateOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 125 + - name: updateOne + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + - description: "operation fails after two consecutive socket timeouts - updateOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 125 + - name: updateOne + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + - description: "operation succeeds after one socket timeout - findOneAndDelete on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 125 + - name: findOneAndDelete + object: *collection + arguments: + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - description: "operation fails after two consecutive socket timeouts - findOneAndDelete on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 125 + - name: findOneAndDelete + object: *collection + arguments: + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - description: "operation succeeds after one socket timeout - findOneAndReplace on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 125 + - name: findOneAndReplace + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - description: "operation fails after two consecutive socket timeouts - findOneAndReplace on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 125 + - name: findOneAndReplace + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - description: "operation succeeds after one socket timeout - findOneAndUpdate on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 125 + - name: findOneAndUpdate + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - description: "operation fails after two consecutive socket timeouts - findOneAndUpdate on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 125 + - name: findOneAndUpdate + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + - description: "operation succeeds after one socket timeout - bulkWrite on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 125 + - name: bulkWrite + object: *collection + arguments: + requests: + - insertOne: + document: { _id: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - description: "operation fails after two consecutive socket timeouts - bulkWrite on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 125 + - name: bulkWrite + object: *collection + arguments: + requests: + - insertOne: + document: { _id: 1 } + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - description: "operation succeeds after one socket timeout - listDatabases on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 125 + - name: listDatabases + object: *client + arguments: + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + - description: "operation fails after two consecutive socket timeouts - listDatabases on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 125 + - name: listDatabases + object: *client + arguments: + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + - description: "operation succeeds after one socket timeout - listDatabaseNames on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 125 + - name: listDatabaseNames + object: *client + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + - description: "operation fails after two consecutive socket timeouts - listDatabaseNames on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 125 + - name: listDatabaseNames + object: *client + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + - description: "operation succeeds after one socket timeout - createChangeStream on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: createChangeStream + object: *client + arguments: + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + - description: "operation fails after two consecutive socket timeouts - createChangeStream on client" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: createChangeStream + object: *client + arguments: + pipeline: [] + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + - description: "operation succeeds after one socket timeout - aggregate on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: aggregate + object: *database + arguments: + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + - description: "operation fails after two consecutive socket timeouts - aggregate on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: aggregate + object: *database + arguments: + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + - description: "operation succeeds after one socket timeout - listCollections on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 125 + - name: listCollections + object: *database + arguments: + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + - description: "operation fails after two consecutive socket timeouts - listCollections on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 125 + - name: listCollections + object: *database + arguments: + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + - description: "operation succeeds after one socket timeout - listCollectionNames on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 125 + - name: listCollectionNames + object: *database + arguments: + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + - description: "operation fails after two consecutive socket timeouts - listCollectionNames on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 125 + - name: listCollectionNames + object: *database + arguments: + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + - description: "operation succeeds after one socket timeout - createChangeStream on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: createChangeStream + object: *database + arguments: + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + - description: "operation fails after two consecutive socket timeouts - createChangeStream on database" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: createChangeStream + object: *database + arguments: + pipeline: [] + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + - description: "operation succeeds after one socket timeout - aggregate on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: aggregate + object: *collection + arguments: + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - description: "operation fails after two consecutive socket timeouts - aggregate on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: aggregate + object: *collection + arguments: + pipeline: [] + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - description: "operation succeeds after one socket timeout - count on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 125 + - name: count + object: *collection + arguments: + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + - description: "operation fails after two consecutive socket timeouts - count on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 125 + - name: count + object: *collection + arguments: + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + - description: "operation succeeds after one socket timeout - countDocuments on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: countDocuments + object: *collection + arguments: + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - description: "operation fails after two consecutive socket timeouts - countDocuments on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: countDocuments + object: *collection + arguments: + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - description: "operation succeeds after one socket timeout - estimatedDocumentCount on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 125 + - name: estimatedDocumentCount + object: *collection + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + - description: "operation fails after two consecutive socket timeouts - estimatedDocumentCount on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 125 + - name: estimatedDocumentCount + object: *collection + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + - description: "operation succeeds after one socket timeout - distinct on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 125 + - name: distinct + object: *collection + arguments: + fieldName: x + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + - description: "operation fails after two consecutive socket timeouts - distinct on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 125 + - name: distinct + object: *collection + arguments: + fieldName: x + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + - description: "operation succeeds after one socket timeout - find on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 125 + - name: find + object: *collection + arguments: + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + - description: "operation fails after two consecutive socket timeouts - find on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 125 + - name: find + object: *collection + arguments: + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + - description: "operation succeeds after one socket timeout - findOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 125 + - name: findOne + object: *collection + arguments: + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + - description: "operation fails after two consecutive socket timeouts - findOne on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 125 + - name: findOne + object: *collection + arguments: + filter: {} + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + - description: "operation succeeds after one socket timeout - listIndexes on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 125 + - name: listIndexes + object: *collection + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + - description: "operation fails after two consecutive socket timeouts - listIndexes on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 125 + - name: listIndexes + object: *collection + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + - description: "operation succeeds after one socket timeout - createChangeStream on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - description: "operation fails after two consecutive socket timeouts - createChangeStream on collection" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 125 + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + + expectError: + # Network errors are considered client errors by the unified test format spec. + isClientError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + \ No newline at end of file diff --git a/specifications/client-side-operations-timeout/tests/retryability-timeoutMS.json b/specifications/client-side-operations-timeout/tests/retryability-timeoutMS.json new file mode 100644 index 00000000000..5a0c9f36051 --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/retryability-timeoutMS.json @@ -0,0 +1,5690 @@ +{ + "description": "timeoutMS behaves correctly for retryable operations", + "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "replicaset" + ] + }, + { + "minServerVersion": "4.2", + "topologies": [ + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 100, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent" + ], + "ignoreCommandMonitoringEvents": [ + "killCursors" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applies to whole operation, not individual attempts - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - insertOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "document": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "x": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - insertMany on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "documents": [ + { + "x": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "delete" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - deleteOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "delete" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "deleteOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "delete", + "databaseName": "test", + "command": { + "delete": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - replaceOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "replaceOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "update" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - updateOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "update" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "updateOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "update", + "databaseName": "test", + "command": { + "update": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndDelete", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "filter": {}, + "replacement": { + "x": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndReplace", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "replacement": { + "x": 1 + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "findAndModify" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOneAndUpdate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {}, + "update": { + "$set": { + "x": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "findAndModify", + "databaseName": "test", + "command": { + "findAndModify": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "insert" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "bulkWrite", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "requests": [ + { + "insertOne": { + "document": { + "_id": 1 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listDatabases on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabases", + "object": "client", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listDatabases" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listDatabaseNames", + "object": "client", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listDatabases", + "databaseName": "admin", + "command": { + "listDatabases": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on client", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "client", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "admin", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - aggregate on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [ + { + "$listLocalSessions": {} + }, + { + "$limit": 1 + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listCollections on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollections", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listCollections" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listCollectionNames", + "object": "database", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listCollections", + "databaseName": "test", + "command": { + "listCollections": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on database", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "database", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": 1, + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - aggregate on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "aggregate", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - count on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "count", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - countDocuments on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "countDocuments", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "count" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "count" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "estimatedDocumentCount", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "count", + "databaseName": "test", + "command": { + "count": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "distinct" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "fieldName": "x", + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - distinct on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "distinct" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "distinct", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "fieldName": "x", + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "distinct", + "databaseName": "test", + "command": { + "distinct": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - find on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "filter": {} + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - findOne on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "find" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "findOne", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "filter": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "find", + "databaseName": "test", + "command": { + "find": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 1000 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - listIndexes on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "listIndexes" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "listIndexes", + "object": "collection", + "arguments": { + "timeoutMS": 0 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "listIndexes", + "databaseName": "test", + "command": { + "listIndexes": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + }, + { + "description": "timeoutMS applies to whole operation, not individual attempts - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.4" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 4 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "blockConnection": true, + "blockTimeMS": 60, + "errorCode": 7, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "pipeline": [] + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + { + "description": "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 1000, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + } + ] + } + ] + }, + { + "description": "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection", + "runOnRequirements": [ + { + "minServerVersion": "4.3.1" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "aggregate" + ], + "errorCode": 7, + "closeConnection": false, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "name": "createChangeStream", + "object": "collection", + "arguments": { + "timeoutMS": 0, + "pipeline": [] + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "aggregate", + "databaseName": "test", + "command": { + "aggregate": "coll", + "maxTimeMS": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/specifications/client-side-operations-timeout/tests/retryability-timeoutMS.yml b/specifications/client-side-operations-timeout/tests/retryability-timeoutMS.yml new file mode 100644 index 00000000000..5e0ad164a2c --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/retryability-timeoutMS.yml @@ -0,0 +1,2826 @@ +# Tests in this file are generated from retryability-timeoutMS.yml.template. + +description: "timeoutMS behaves correctly for retryable operations" + +schemaVersion: "1.26" + +# failCommand is available on 4.0+ replica sets and 4.2+ sharded clusters. +runOnRequirements: + - minServerVersion: "4.0" + topologies: ["replicaset"] + - minServerVersion: "4.2" + topologies: ["sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 100 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + ignoreCommandMonitoringEvents: + - killCursors + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + # For each retryable operation, run three tests: + # + # 1. timeoutMS applies to the whole operation, not to individual attempts - Client timeoutMS=100 and the operation is + # fails with a retryable error after being blocked server-side for 60ms. The operation should fail with a timeout error + # because the second attempt should take it over the 100ms limit. This test only runs on 4.4+ because it uses the + # blockConnection option in failCommand. + # + # 2. operation is retried multiple times if timeoutMS is set to a non-zero value - Client timeoutMS=100 and the + # operation fails with a retryable error twice. Drivers should send the original operation and two retries, the + # second of which should succeed. + # + # 3. operation is retried multiple times if timeoutMS is set to a zero - Override timeoutMS to zero for the operation + # and set a fail point to force a retryable error twice. Drivers should send the original operation and two retries, + # the second of which should succeed. + # + # The fail points in these tests use error code 7 (HostNotFound) because it is a retryable error but does not trigger + # an SDAM state change so we don't lose any time to server rediscovery. The tests also explicitly specify an + # errorLabels array in the fail point to avoid behavioral differences among server types and ensure that the error + # will be considered retryable. + + - description: "timeoutMS applies to whole operation, not individual attempts - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: insertOne + object: *collection + arguments: + document: { x: 1 } + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: insertOne + object: *collection + arguments: + timeoutMS: 1000 + document: { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - insertOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: insertOne + object: *collection + arguments: + timeoutMS: 0 + document: { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: insertMany + object: *collection + arguments: + documents: + - { x: 1 } + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: insertMany + object: *collection + arguments: + timeoutMS: 1000 + documents: + - { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - insertMany on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: insertMany + object: *collection + arguments: + timeoutMS: 0 + documents: + - { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["delete"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: deleteOne + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["delete"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: deleteOne + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - deleteOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["delete"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: deleteOne + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: delete + databaseName: *databaseName + command: + delete: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: replaceOne + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["update"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: replaceOne + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + replacement: { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - replaceOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["update"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: replaceOne + object: *collection + arguments: + timeoutMS: 0 + filter: {} + replacement: { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["update"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: updateOne + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["update"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: updateOne + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + update: { $set: { x: 1 } } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - updateOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["update"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: updateOne + object: *collection + arguments: + timeoutMS: 0 + filter: {} + update: { $set: { x: 1 } } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: update + databaseName: *databaseName + command: + update: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: findOneAndDelete + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: findOneAndDelete + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - findOneAndDelete on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: findOneAndDelete + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: findOneAndReplace + object: *collection + arguments: + filter: {} + replacement: { x: 1 } + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: findOneAndReplace + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + replacement: { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - findOneAndReplace on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: findOneAndReplace + object: *collection + arguments: + timeoutMS: 0 + filter: {} + replacement: { x: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["findAndModify"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: findOneAndUpdate + object: *collection + arguments: + filter: {} + update: { $set: { x: 1 } } + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: findOneAndUpdate + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + update: { $set: { x: 1 } } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - findOneAndUpdate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["findAndModify"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: findOneAndUpdate + object: *collection + arguments: + timeoutMS: 0 + filter: {} + update: { $set: { x: 1 } } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: findAndModify + databaseName: *databaseName + command: + findAndModify: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: bulkWrite + object: *collection + arguments: + requests: + - insertOne: + document: { _id: 1 } + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: bulkWrite + object: *collection + arguments: + timeoutMS: 1000 + requests: + - insertOne: + document: { _id: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - bulkWrite on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["insert"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: bulkWrite + object: *collection + arguments: + timeoutMS: 0 + requests: + - insertOne: + document: { _id: 1 } + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: listDatabases + object: *client + arguments: + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listDatabases"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listDatabases + object: *client + arguments: + timeoutMS: 1000 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - listDatabases on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listDatabases"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listDatabases + object: *client + arguments: + timeoutMS: 0 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["listDatabases"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: listDatabaseNames + object: *client + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listDatabases"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listDatabaseNames + object: *client + arguments: + timeoutMS: 1000 + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - listDatabaseNames on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listDatabases"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listDatabaseNames + object: *client + arguments: + timeoutMS: 0 + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listDatabases + databaseName: admin + command: + listDatabases: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: createChangeStream + object: *client + arguments: + pipeline: [] + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: createChangeStream + object: *client + arguments: + timeoutMS: 1000 + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on client" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: createChangeStream + object: *client + arguments: + timeoutMS: 0 + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: admin + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - aggregate on database" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: aggregate + object: *database + arguments: + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - aggregate on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: aggregate + object: *database + arguments: + timeoutMS: 1000 + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - aggregate on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: aggregate + object: *database + arguments: + timeoutMS: 0 + pipeline: [ { $listLocalSessions: {} }, { $limit: 1 } ] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - listCollections on database" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: listCollections + object: *database + arguments: + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - listCollections on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listCollections"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listCollections + object: *database + arguments: + timeoutMS: 1000 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - listCollections on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listCollections"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listCollections + object: *database + arguments: + timeoutMS: 0 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["listCollections"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: listCollectionNames + object: *database + arguments: + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listCollections"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listCollectionNames + object: *database + arguments: + timeoutMS: 1000 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - listCollectionNames on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listCollections"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listCollectionNames + object: *database + arguments: + timeoutMS: 0 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listCollections + databaseName: *databaseName + command: + listCollections: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: createChangeStream + object: *database + arguments: + pipeline: [] + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: createChangeStream + object: *database + arguments: + timeoutMS: 1000 + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on database" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: createChangeStream + object: *database + arguments: + timeoutMS: 0 + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: 1 + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: aggregate + object: *collection + arguments: + pipeline: [] + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: aggregate + object: *collection + arguments: + timeoutMS: 1000 + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - aggregate on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: aggregate + object: *collection + arguments: + timeoutMS: 0 + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - count on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: count + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - count on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["count"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: count + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - count on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["count"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: count + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: countDocuments + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: countDocuments + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - countDocuments on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: countDocuments + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["count"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: estimatedDocumentCount + object: *collection + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["count"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: estimatedDocumentCount + object: *collection + arguments: + timeoutMS: 1000 + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - estimatedDocumentCount on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["count"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: estimatedDocumentCount + object: *collection + arguments: + timeoutMS: 0 + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: count + databaseName: *databaseName + command: + count: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - distinct on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["distinct"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: distinct + object: *collection + arguments: + fieldName: x + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - distinct on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["distinct"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: distinct + object: *collection + arguments: + timeoutMS: 1000 + fieldName: x + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - distinct on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["distinct"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: distinct + object: *collection + arguments: + timeoutMS: 0 + fieldName: x + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: distinct + databaseName: *databaseName + command: + distinct: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - find on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: find + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - find on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: find + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - find on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: find + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - findOne on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: findOne + object: *collection + arguments: + filter: {} + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - findOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: findOne + object: *collection + arguments: + timeoutMS: 1000 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - findOne on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["find"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: findOne + object: *collection + arguments: + timeoutMS: 0 + filter: {} + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: find + databaseName: *databaseName + command: + find: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["listIndexes"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: listIndexes + object: *collection + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listIndexes"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listIndexes + object: *collection + arguments: + timeoutMS: 1000 + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - listIndexes on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["listIndexes"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: listIndexes + object: *collection + arguments: + timeoutMS: 0 + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: listIndexes + databaseName: *databaseName + command: + listIndexes: *collectionName + maxTimeMS: { $$exists: false } + - description: "timeoutMS applies to whole operation, not individual attempts - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.4" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 4 } + data: + failCommands: ["aggregate"] + blockConnection: true + blockTimeMS: 60 + errorCode: 7 + errorLabels: ["RetryableWriteError"] + - name: createChangeStream + object: *collection + arguments: + pipeline: [] + + expectError: + isTimeoutError: true + - description: "operation is retried multiple times for non-zero timeoutMS - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: createChangeStream + object: *collection + arguments: + timeoutMS: 1000 + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$type: ["int", "long"] } + - description: "operation is retried multiple times if timeoutMS is zero - createChangeStream on collection" + runOnRequirements: + - minServerVersion: "4.3.1" # failCommand errorLabels option + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: ["aggregate"] + errorCode: 7 + closeConnection: false + errorLabels: ["RetryableWriteError"] + - name: createChangeStream + object: *collection + arguments: + timeoutMS: 0 + pipeline: [] + + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + - commandStartedEvent: + commandName: aggregate + databaseName: *databaseName + command: + aggregate: *collectionName + maxTimeMS: { $$exists: false } + \ No newline at end of file diff --git a/specifications/client-side-operations-timeout/tests/sessions-inherit-timeoutMS.json b/specifications/client-side-operations-timeout/tests/sessions-inherit-timeoutMS.json new file mode 100644 index 00000000000..dbf163e484e --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/sessions-inherit-timeoutMS.json @@ -0,0 +1,333 @@ +{ + "description": "sessions inherit timeoutMS from their parent MongoClient", + "schemaVersion": "1.26", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "timeoutMS": 500, + "minPoolSize": 1 + }, + "awaitMinPoolSizeMS": 10000, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to commitTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "commitTransaction", + "object": "session", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "commitTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to abortTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "abortTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to withTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + } + ] +} diff --git a/specifications/client-side-operations-timeout/tests/sessions-inherit-timeoutMS.yml b/specifications/client-side-operations-timeout/tests/sessions-inherit-timeoutMS.yml new file mode 100644 index 00000000000..77f216d3adf --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/sessions-inherit-timeoutMS.yml @@ -0,0 +1,175 @@ +description: "sessions inherit timeoutMS from their parent MongoClient" + +schemaVersion: "1.26" + +runOnRequirements: + - minServerVersion: "4.4" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + timeoutMS: 500 + minPoolSize: 1 + awaitMinPoolSizeMS: 10000 + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - commandSucceededEvent + - commandFailedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + - session: + id: &session session + client: *client + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + # Drivers ignore errors from abortTransaction, so the tests in this file use commandSucceededEvent and + # commandFailedEvent events to assert success/failure. + + - description: "timeoutMS applied to commitTransaction" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["commitTransaction"] + blockConnection: true + blockTimeMS: 600 + - name: startTransaction + object: *session + - name: insertOne + object: *collection + arguments: + session: *session + document: { _id: 1 } + - name: commitTransaction + object: *session + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandSucceededEvent: + commandName: insert + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: commitTransaction + + - description: "timeoutMS applied to abortTransaction" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["abortTransaction"] + blockConnection: true + blockTimeMS: 600 + - name: startTransaction + object: *session + - name: insertOne + object: *collection + arguments: + session: *session + document: { _id: 1 } + - name: abortTransaction + object: *session + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandSucceededEvent: + commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: abortTransaction + + - description: "timeoutMS applied to withTransaction" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 600 + - name: withTransaction + object: *session + arguments: + callback: + - name: insertOne + object: *collection + arguments: + session: *session + document: { _id: 1 } + expectError: + isTimeoutError: true + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + # withTransaction specifies timeoutMS for each operation in the callback that uses the session, so the + # insert command should have a maxTimeMS field. + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/specifications/client-side-operations-timeout/tests/sessions-override-operation-timeoutMS.json b/specifications/client-side-operations-timeout/tests/sessions-override-operation-timeoutMS.json new file mode 100644 index 00000000000..441c698328c --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/sessions-override-operation-timeoutMS.json @@ -0,0 +1,335 @@ +{ + "description": "timeoutMS can be overridden for individual session operations", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS can be overridden for commitTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "commitTransaction", + "object": "session", + "arguments": { + "timeoutMS": 500 + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "commitTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to abortTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "abortTransaction", + "object": "session", + "arguments": { + "timeoutMS": 500 + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to withTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "timeoutMS": 500, + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + } + ] +} diff --git a/specifications/client-side-operations-timeout/tests/sessions-override-operation-timeoutMS.yml b/specifications/client-side-operations-timeout/tests/sessions-override-operation-timeoutMS.yml new file mode 100644 index 00000000000..bee91dc4cb8 --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/sessions-override-operation-timeoutMS.yml @@ -0,0 +1,176 @@ +description: "timeoutMS can be overridden for individual session operations" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - commandSucceededEvent + - commandFailedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + - session: + id: &session session + client: *client + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + # Drivers ignore errors from abortTransaction, so the tests in this file use commandSucceededEvent and + # commandFailedEvent events to assert success/failure. + + - description: "timeoutMS can be overridden for commitTransaction" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["commitTransaction"] + blockConnection: true + blockTimeMS: 600 + - name: startTransaction + object: *session + - name: insertOne + object: *collection + arguments: + session: *session + document: { _id: 1 } + - name: commitTransaction + object: *session + arguments: + timeoutMS: 500 + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandSucceededEvent: + commandName: insert + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: commitTransaction + + - description: "timeoutMS applied to abortTransaction" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["abortTransaction"] + blockConnection: true + blockTimeMS: 600 + - name: startTransaction + object: *session + - name: insertOne + object: *collection + arguments: + session: *session + document: { _id: 1 } + - name: abortTransaction + object: *session + arguments: + timeoutMS: 500 + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandSucceededEvent: + commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: abortTransaction + + - description: "timeoutMS applied to withTransaction" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 600 + - name: withTransaction + object: *session + arguments: + timeoutMS: 500 + callback: + - name: insertOne + object: *collection + arguments: + session: *session + document: { _id: 1 } + expectError: + isTimeoutError: true + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + # withTransaction specifies timeoutMS for each operation in the callback that uses the session, so the + # insert command should have a maxTimeMS field. + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/specifications/client-side-operations-timeout/tests/sessions-override-timeoutMS.json b/specifications/client-side-operations-timeout/tests/sessions-override-timeoutMS.json new file mode 100644 index 00000000000..d90152e909c --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/sessions-override-timeoutMS.json @@ -0,0 +1,331 @@ +{ + "description": "timeoutMS can be overridden at the level of a ClientSession", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "coll" + } + }, + { + "session": { + "id": "session", + "client": "client", + "sessionOptions": { + "defaultTimeoutMS": 500 + } + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "timeoutMS applied to commitTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "commitTransaction" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "commitTransaction", + "object": "session", + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "commitTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to abortTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "abortTransaction" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "startTransaction", + "object": "session" + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + } + }, + { + "name": "abortTransaction", + "object": "session" + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll" + } + } + }, + { + "commandSucceededEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + }, + { + "description": "timeoutMS applied to withTransaction", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 600 + } + } + } + }, + { + "name": "withTransaction", + "object": "session", + "arguments": { + "callback": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "session": "session", + "document": { + "_id": 1 + } + }, + "expectError": { + "isTimeoutError": true + } + } + ] + }, + "expectError": { + "isTimeoutError": true + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "insert", + "databaseName": "test", + "command": { + "insert": "coll", + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "insert" + } + }, + { + "commandStartedEvent": { + "commandName": "abortTransaction", + "databaseName": "admin", + "command": { + "abortTransaction": 1, + "maxTimeMS": { + "$$type": [ + "int", + "long" + ] + } + } + } + }, + { + "commandFailedEvent": { + "commandName": "abortTransaction" + } + } + ] + } + ] + } + ] +} diff --git a/specifications/client-side-operations-timeout/tests/sessions-override-timeoutMS.yml b/specifications/client-side-operations-timeout/tests/sessions-override-timeoutMS.yml new file mode 100644 index 00000000000..73aaf9ff2a7 --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/sessions-override-timeoutMS.yml @@ -0,0 +1,173 @@ +description: "timeoutMS can be overridden at the level of a ClientSession" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + topologies: ["replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - commandSucceededEvent + - commandFailedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + - collection: + id: &collection collection + database: *database + collectionName: &collectionName coll + - session: + id: &session session + client: *client + sessionOptions: + defaultTimeoutMS: 500 + +initialData: + - collectionName: *collectionName + databaseName: *databaseName + documents: [] + +tests: + # Drivers ignore errors from abortTransaction, so the tests in this file use commandSucceededEvent and + # commandFailedEvent events to assert success/failure. + + - description: "timeoutMS applied to commitTransaction" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["commitTransaction"] + blockConnection: true + blockTimeMS: 600 + - name: startTransaction + object: *session + - name: insertOne + object: *collection + arguments: + session: *session + document: { _id: 1 } + - name: commitTransaction + object: *session + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandSucceededEvent: + commandName: insert + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: commitTransaction + + - description: "timeoutMS applied to abortTransaction" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["abortTransaction"] + blockConnection: true + blockTimeMS: 600 + - name: startTransaction + object: *session + - name: insertOne + object: *collection + arguments: + session: *session + document: { _id: 1 } + - name: abortTransaction + object: *session + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + - commandSucceededEvent: + commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: abortTransaction + + - description: "timeoutMS applied to withTransaction" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 600 + - name: withTransaction + object: *session + arguments: + callback: + - name: insertOne + object: *collection + arguments: + session: *session + document: { _id: 1 } + expectError: + isTimeoutError: true + expectError: + isTimeoutError: true + expectEvents: + - client: *client + events: + - commandStartedEvent: + commandName: insert + databaseName: *databaseName + command: + insert: *collectionName + # withTransaction specifies timeoutMS for each operation in the callback that uses the session, so the + # insert command should have a maxTimeMS field. + maxTimeMS: { $$type: ["int", "long"] } + - commandFailedEvent: + commandName: insert + - commandStartedEvent: + commandName: abortTransaction + databaseName: admin + command: + abortTransaction: 1 + maxTimeMS: { $$type: [ "int", "long" ] } + - commandFailedEvent: + commandName: abortTransaction diff --git a/specifications/client-side-operations-timeout/tests/waitQueueTimeout.json b/specifications/client-side-operations-timeout/tests/waitQueueTimeout.json new file mode 100644 index 00000000000..138d5cc1618 --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/waitQueueTimeout.json @@ -0,0 +1,176 @@ +{ + "description": "WaitQueueTimeoutError does not clear the pool", + "schemaVersion": "1.9", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "failPointClient", + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "client", + "uriOptions": { + "maxPoolSize": 1, + "appname": "waitQueueTimeoutErrorTest" + }, + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "poolClearedEvent" + ] + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "test" + } + } + ], + "tests": [ + { + "description": "WaitQueueTimeoutError does not clear the pool", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "blockConnection": true, + "blockTimeMS": 500, + "appName": "waitQueueTimeoutErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "thread": { + "id": "thread0" + } + } + ] + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "thread": "thread0", + "operation": { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "ping": 1 + }, + "commandName": "ping" + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "commandStartedEvent": { + "commandName": "ping" + } + }, + "count": 1 + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "timeoutMS": 100, + "command": { + "hello": 1 + }, + "commandName": "hello" + }, + "expectError": { + "isTimeoutError": true + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "thread": "thread0" + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "command": { + "hello": 1 + }, + "commandName": "hello" + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "command", + "events": [ + { + "commandStartedEvent": { + "commandName": "ping", + "databaseName": "test", + "command": { + "ping": 1 + } + } + }, + { + "commandStartedEvent": { + "commandName": "hello", + "databaseName": "test", + "command": { + "hello": 1 + } + } + } + ] + }, + { + "client": "client", + "eventType": "cmap", + "events": [] + } + ] + } + ] +} diff --git a/specifications/client-side-operations-timeout/tests/waitQueueTimeout.yml b/specifications/client-side-operations-timeout/tests/waitQueueTimeout.yml new file mode 100644 index 00000000000..dca58df7a34 --- /dev/null +++ b/specifications/client-side-operations-timeout/tests/waitQueueTimeout.yml @@ -0,0 +1,105 @@ +description: "WaitQueueTimeoutError does not clear the pool" + +schemaVersion: "1.9" + +runOnRequirements: + - minServerVersion: "4.4" + topologies: ["single", "replicaset", "sharded"] + +createEntities: + - client: + id: &failPointClient failPointClient + useMultipleMongoses: false + - client: + id: &client client + uriOptions: + maxPoolSize: 1 + appname: &appname waitQueueTimeoutErrorTest + useMultipleMongoses: false + observeEvents: + - commandStartedEvent + - poolClearedEvent + - database: + id: &database database + client: *client + databaseName: &databaseName test + +tests: + - description: "WaitQueueTimeoutError does not clear the pool" + operations: + - name: failPoint + object: testRunner + arguments: + client: *failPointClient + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["ping"] + blockConnection: true + blockTimeMS: 500 + appName: *appname + # Start thread. + - name: createEntities + object: testRunner + arguments: + entities: + - thread: + id: &thread0 thread0 + - name: runOnThread + object: testRunner + arguments: + thread: *thread0 + operation: + name: runCommand + object: *database + arguments: + command: { ping: 1 } + commandName: ping + # Wait for the thread to checkout the only connection (maxPoolSize=1). + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + commandStartedEvent: + commandName: ping + count: 1 + # Run another command with a short timeout to make it likely to get a WaitQueueTimeoutError. + - name: runCommand + object: *database + arguments: + timeoutMS: 100 + command: { hello: 1 } + commandName: hello + expectError: + isTimeoutError: true + - name: waitForThread + object: testRunner + arguments: + thread: *thread0 + # Run another command with no timeout to ensure the pool is not cleared. + - name: runCommand + object: *database + arguments: + command: { hello: 1 } + commandName: hello + + expectEvents: + - client: *client + eventType: command + events: + - commandStartedEvent: + commandName: ping + databaseName: *databaseName + command: + ping: 1 + - commandStartedEvent: + commandName: hello + databaseName: *databaseName + command: + hello: 1 + # No poolClearedEvent. + - client: *client + eventType: cmap + events: [] diff --git a/specifications/crud/tests/unified/bypassDocumentValidation.json b/specifications/crud/tests/unified/bypassDocumentValidation.json new file mode 100644 index 00000000000..aff2d37f818 --- /dev/null +++ b/specifications/crud/tests/unified/bypassDocumentValidation.json @@ -0,0 +1,493 @@ +{ + "description": "bypassDocumentValidation", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "3.2", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll" + } + } + ], + "initialData": [ + { + "collectionName": "coll", + "databaseName": "crud", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "tests": [ + { + "description": "Aggregate with $out passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "aggregate", + "arguments": { + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "aggregate": "coll", + "pipeline": [ + { + "$sort": { + "x": 1 + } + }, + { + "$match": { + "_id": { + "$gt": 1 + } + } + }, + { + "$out": "other_test_collection" + } + ], + "bypassDocumentValidation": false + }, + "commandName": "aggregate", + "databaseName": "crud" + } + } + ] + } + ] + }, + { + "description": "BulkWrite passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "bulkWrite", + "arguments": { + "requests": [ + { + "insertOne": { + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "FindOneAndReplace passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "findOneAndReplace", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 32 + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 32 + }, + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "FindOneAndUpdate passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "findOneAndUpdate", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "findAndModify": "coll", + "query": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "InsertMany passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "insertMany", + "arguments": { + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "InsertOne passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "document": { + "_id": 4, + "x": 44 + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "insert": "coll", + "documents": [ + { + "_id": 4, + "x": 44 + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "ReplaceOne passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "replaceOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "replacement": { + "x": 32 + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "x": 32 + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "UpdateMany passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "updateMany", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": true, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + }, + { + "description": "UpdateOne passes bypassDocumentValidation: false", + "operations": [ + { + "object": "collection0", + "name": "updateOne", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "$inc": { + "x": 1 + } + }, + "bypassDocumentValidation": false + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "update": "coll", + "updates": [ + { + "q": { + "_id": { + "$gt": 1 + } + }, + "u": { + "$inc": { + "x": 1 + } + }, + "multi": { + "$$unsetOrMatches": false + }, + "upsert": { + "$$unsetOrMatches": false + } + } + ], + "bypassDocumentValidation": false + } + } + } + ] + } + ] + } + ] +} diff --git a/specifications/crud/tests/unified/bypassDocumentValidation.yml b/specifications/crud/tests/unified/bypassDocumentValidation.yml new file mode 100644 index 00000000000..50825647521 --- /dev/null +++ b/specifications/crud/tests/unified/bypassDocumentValidation.yml @@ -0,0 +1,222 @@ +description: bypassDocumentValidation + +schemaVersion: '1.4' + +runOnRequirements: + - + minServerVersion: '3.2' + serverless: forbid + +createEntities: + - + client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - + database: + id: &database0 database0 + client: client0 + databaseName: &database_name crud + - + collection: + id: &collection0 collection0 + database: database0 + collectionName: &collection_name coll + +initialData: + - + collectionName: *collection_name + databaseName: *database_name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +tests: + - + description: 'Aggregate with $out passes bypassDocumentValidation: false' + operations: + - + object: *collection0 + name: aggregate + arguments: + pipeline: &pipeline + - { $sort: { x: 1 } } + - { $match: { _id: { $gt: 1 } } } + - { $out: other_test_collection } + bypassDocumentValidation: false + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + aggregate: *collection_name + pipeline: *pipeline + bypassDocumentValidation: false + commandName: aggregate + databaseName: *database_name + - + description: 'BulkWrite passes bypassDocumentValidation: false' + operations: + - + object: *collection0 + name: bulkWrite + arguments: + requests: + - + insertOne: + document: &inserted_document { _id: 4, x: 44 } + bypassDocumentValidation: false + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + insert: *collection_name + documents: + - *inserted_document + bypassDocumentValidation: false + - + description: 'FindOneAndReplace passes bypassDocumentValidation: false' + operations: + - + object: *collection0 + name: findOneAndReplace + arguments: + filter: &filter { _id: { $gt: 1 } } + replacement: &replacement { x: 32 } + bypassDocumentValidation: false + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + findAndModify: *collection_name + query: *filter + update: *replacement + bypassDocumentValidation: false + - + description: 'FindOneAndUpdate passes bypassDocumentValidation: false' + operations: + - + object: *collection0 + name: findOneAndUpdate + arguments: + filter: *filter + update: &update { $inc: { x: 1 } } + bypassDocumentValidation: false + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + findAndModify: *collection_name + query: *filter + update: *update + bypassDocumentValidation: false + - + description: 'InsertMany passes bypassDocumentValidation: false' + operations: + - + object: *collection0 + name: insertMany + arguments: + documents: + - *inserted_document + bypassDocumentValidation: false + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + insert: *collection_name + documents: + - *inserted_document + bypassDocumentValidation: false + - + description: 'InsertOne passes bypassDocumentValidation: false' + operations: + - + object: *collection0 + name: insertOne + arguments: + document: *inserted_document + bypassDocumentValidation: false + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + insert: *collection_name + documents: + - *inserted_document + bypassDocumentValidation: false + - + description: 'ReplaceOne passes bypassDocumentValidation: false' + operations: + - + object: *collection0 + name: replaceOne + arguments: + filter: *filter + replacement: *replacement + bypassDocumentValidation: false + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + update: *collection_name + updates: + - + q: *filter + u: *replacement + multi: { $$unsetOrMatches: false } + upsert: { $$unsetOrMatches: false } + bypassDocumentValidation: false + - + description: 'UpdateMany passes bypassDocumentValidation: false' + operations: + - + object: *collection0 + name: updateMany + arguments: + filter: *filter + update: *update + bypassDocumentValidation: false + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + update: *collection_name + updates: + - + q: *filter + u: *update + multi: true + upsert: { $$unsetOrMatches: false } + bypassDocumentValidation: false + - + description: 'UpdateOne passes bypassDocumentValidation: false' + operations: + - + object: *collection0 + name: updateOne + arguments: + filter: *filter + update: *update + bypassDocumentValidation: false + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + update: *collection_name + updates: + - + q: *filter + u: *update + multi: { $$unsetOrMatches: false } + upsert: { $$unsetOrMatches: false } + bypassDocumentValidation: false diff --git a/specifications/crud/tests/unified/estimatedDocumentCount.json b/specifications/crud/tests/unified/estimatedDocumentCount.json index 1b650c1cb6c..3577d9006b5 100644 --- a/specifications/crud/tests/unified/estimatedDocumentCount.json +++ b/specifications/crud/tests/unified/estimatedDocumentCount.json @@ -249,7 +249,7 @@ "name": "estimatedDocumentCount", "object": "collection0", "expectError": { - "isError": true + "isClientError": true } } ], diff --git a/specifications/crud/tests/unified/estimatedDocumentCount.yml b/specifications/crud/tests/unified/estimatedDocumentCount.yml index 12f33cc7e51..22e1d3587f7 100644 --- a/specifications/crud/tests/unified/estimatedDocumentCount.yml +++ b/specifications/crud/tests/unified/estimatedDocumentCount.yml @@ -130,7 +130,7 @@ tests: - name: estimatedDocumentCount object: *collection0 expectError: - isError: true + isClientError: true expectEvents: - client: *client0 events: diff --git a/specifications/crud/tests/unified/find.json b/specifications/crud/tests/unified/find.json index 275d5d351a1..325cd96c218 100644 --- a/specifications/crud/tests/unified/find.json +++ b/specifications/crud/tests/unified/find.json @@ -151,6 +151,154 @@ ] } ] + }, + { + "description": "Find with filter", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "Find with filter, sort, skip, and limit", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": { + "$gt": 2 + } + }, + "sort": { + "_id": 1 + }, + "skip": 2, + "limit": 2 + }, + "expectResult": [ + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + } + ] + } + ] + }, + { + "description": "Find with limit, sort, and batchsize", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": {}, + "sort": { + "_id": 1 + }, + "limit": 4, + "batchSize": 2 + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "Find with batchSize equal to limit", + "operations": [ + { + "object": "collection0", + "name": "find", + "arguments": { + "filter": { + "_id": { + "$gt": 1 + } + }, + "sort": { + "_id": 1 + }, + "limit": 4, + "batchSize": 4 + }, + "expectResult": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 5, + "x": 55 + } + ] + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "find": "coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "limit": 4, + "batchSize": 5 + }, + "commandName": "find", + "databaseName": "find-tests" + } + } + ] + } + ] } ] } diff --git a/specifications/crud/tests/unified/find.yml b/specifications/crud/tests/unified/find.yml index 5615f072348..3a09c4d830f 100644 --- a/specifications/crud/tests/unified/find.yml +++ b/specifications/crud/tests/unified/find.yml @@ -65,4 +65,71 @@ tests: batchSize: 2 commandName: getMore databaseName: *database0Name - + - + description: 'Find with filter' + operations: + - + object: *collection0 + name: find + arguments: + filter: { _id: 1 } + expectResult: + - { _id: 1, x: 11 } + - + description: 'Find with filter, sort, skip, and limit' + operations: + - + object: *collection0 + name: find + arguments: + filter: { _id: { $gt: 2 } } + sort: { _id: 1 } + skip: 2 + limit: 2 + expectResult: + - { _id: 5, x: 55 } + - { _id: 6, x: 66 } + - + description: 'Find with limit, sort, and batchsize' + operations: + - + object: *collection0 + name: find + arguments: + filter: { } + sort: { _id: 1 } + limit: 4 + batchSize: 2 + expectResult: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 4, x: 44 } + - + description: 'Find with batchSize equal to limit' + operations: + - + object: *collection0 + name: find + arguments: + filter: { _id: { $gt: 1 } } + sort: { _id: 1 } + limit: 4 + batchSize: 4 + expectResult: + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 4, x: 44 } + - { _id: 5, x: 55 } + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + find: *collection0Name + filter: { _id: { $gt: 1 } } + limit: 4 + # Drivers use limit + 1 for batchSize to ensure the server closes the cursor + batchSize: 5 + commandName: find + databaseName: *database0Name diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json b/specifications/max-staleness/tests/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json index 5afebbbdcb2..db8b061b303 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/DefaultNoMaxStaleness.json @@ -7,7 +7,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -37,7 +37,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -49,7 +49,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -63,7 +63,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/DefaultNoMaxStaleness.yml b/specifications/max-staleness/tests/ReplicaSetNoPrimary/DefaultNoMaxStaleness.yml index 5f9750ef7be..793aa10eaaa 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/DefaultNoMaxStaleness.yml +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/DefaultNoMaxStaleness.yml @@ -8,14 +8,14 @@ topology_description: type: RSSecondary avg_rtt_ms: 50 # Too far. lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1000001"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Very stale. read_preference: mode: Nearest diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/LastUpdateTime.json b/specifications/max-staleness/tests/ReplicaSetNoPrimary/LastUpdateTime.json index 492d8a2f625..10b6f287865 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/LastUpdateTime.json +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/LastUpdateTime.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] } diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/LastUpdateTime.yml b/specifications/max-staleness/tests/ReplicaSetNoPrimary/LastUpdateTime.yml index 8bb42d8c621..20169417131 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/LastUpdateTime.yml +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/LastUpdateTime.yml @@ -8,21 +8,21 @@ topology_description: avg_rtt_ms: 5 lastUpdateTime: 1 lastWrite: {lastWriteDate: {$numberLong: "125002"}} - maxWireVersion: 6 + maxWireVersion: 21 - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 50 # Too far. lastUpdateTime: 25002 # Not used when there's no primary. lastWrite: {lastWriteDate: {$numberLong: "2"}} # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. - maxWireVersion: 6 + maxWireVersion: 21 - &3 address: c:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 25001 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. - maxWireVersion: 6 + maxWireVersion: 21 read_preference: mode: Nearest maxStalenessSeconds: 150 diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/Nearest.json b/specifications/max-staleness/tests/ReplicaSetNoPrimary/Nearest.json index 6602561c1dc..38b9986500d 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/Nearest.json +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/Nearest.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] } diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/Nearest.yml b/specifications/max-staleness/tests/ReplicaSetNoPrimary/Nearest.yml index 7c3fbe5eba7..783831f4771 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/Nearest.yml +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/Nearest.yml @@ -8,21 +8,21 @@ topology_description: avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "125002"}} - maxWireVersion: 6 + maxWireVersion: 21 - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 50 # Too far. lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "2"}} # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. - maxWireVersion: 6 + maxWireVersion: 21 - &3 address: c:27017 avg_rtt_ms: 5 lastUpdateTime: 0 type: RSSecondary lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. - maxWireVersion: 6 + maxWireVersion: 21 read_preference: mode: Nearest maxStalenessSeconds: 150 diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/Nearest2.json b/specifications/max-staleness/tests/ReplicaSetNoPrimary/Nearest2.json index 16d9a673bd8..586b47ccd2b 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/Nearest2.json +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/Nearest2.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] } diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/Nearest2.yml b/specifications/max-staleness/tests/ReplicaSetNoPrimary/Nearest2.yml index e162f04c93c..867b7706a4a 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/Nearest2.yml +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/Nearest2.yml @@ -8,21 +8,21 @@ topology_description: avg_rtt_ms: 50 # Too far. lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "125002"}} - maxWireVersion: 6 + maxWireVersion: 21 - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "2"}} # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. - maxWireVersion: 6 + maxWireVersion: 21 - &3 address: c:27017 avg_rtt_ms: 5 lastUpdateTime: 0 type: RSSecondary lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. - maxWireVersion: 6 + maxWireVersion: 21 read_preference: mode: Nearest maxStalenessSeconds: 150 diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json b/specifications/max-staleness/tests/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json index 54f318872fa..15a62090e3f 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/OneKnownTwoUnavailable.json @@ -17,7 +17,7 @@ { "address": "c:27017", "type": "RSSecondary", - "maxWireVersion": 6, + "maxWireVersion": 21, "avg_rtt_ms": 5, "lastWrite": { "lastWriteDate": { @@ -35,7 +35,7 @@ { "address": "c:27017", "type": "RSSecondary", - "maxWireVersion": 6, + "maxWireVersion": 21, "avg_rtt_ms": 5, "lastWrite": { "lastWriteDate": { @@ -48,7 +48,7 @@ { "address": "c:27017", "type": "RSSecondary", - "maxWireVersion": 6, + "maxWireVersion": 21, "avg_rtt_ms": 5, "lastWrite": { "lastWriteDate": { diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/OneKnownTwoUnavailable.yml b/specifications/max-staleness/tests/ReplicaSetNoPrimary/OneKnownTwoUnavailable.yml index c1d3b502b86..ccbec183cac 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/OneKnownTwoUnavailable.yml +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/OneKnownTwoUnavailable.yml @@ -15,7 +15,7 @@ topology_description: - &3 address: c:27017 type: RSSecondary - maxWireVersion: 6 + maxWireVersion: 21 avg_rtt_ms: 5 lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/PrimaryPreferred.json b/specifications/max-staleness/tests/ReplicaSetNoPrimary/PrimaryPreferred.json index 7956b8e516d..7c036f725c5 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/PrimaryPreferred.json +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/PrimaryPreferred.json @@ -8,7 +8,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -39,7 +39,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -53,7 +53,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/PrimaryPreferred.yml b/specifications/max-staleness/tests/ReplicaSetNoPrimary/PrimaryPreferred.yml index b4706e55232..c01aff6f799 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/PrimaryPreferred.yml +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/PrimaryPreferred.yml @@ -9,14 +9,14 @@ topology_description: type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1000001"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Very stale. read_preference: mode: PrimaryPreferred diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/PrimaryPreferred_tags.json b/specifications/max-staleness/tests/ReplicaSetNoPrimary/PrimaryPreferred_tags.json index 453dce6605a..56fcb156bb8 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/PrimaryPreferred_tags.json +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/PrimaryPreferred_tags.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -28,7 +28,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "nyc" } @@ -58,7 +58,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -75,7 +75,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/PrimaryPreferred_tags.yml b/specifications/max-staleness/tests/ReplicaSetNoPrimary/PrimaryPreferred_tags.yml index cd285d5fafa..a5df80ff36c 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/PrimaryPreferred_tags.yml +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/PrimaryPreferred_tags.yml @@ -12,7 +12,7 @@ topology_description: avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "125002"}} - maxWireVersion: 6 + maxWireVersion: 21 tags: data_center: tokyo # Matches second tag set. - &2 @@ -21,7 +21,7 @@ topology_description: avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. - maxWireVersion: 6 + maxWireVersion: 21 tags: data_center: nyc read_preference: diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/Secondary.json b/specifications/max-staleness/tests/ReplicaSetNoPrimary/Secondary.json index b383f275dc4..5a4b0c82260 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/Secondary.json +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/Secondary.json @@ -8,7 +8,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "125002" @@ -23,7 +23,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -38,7 +38,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -53,7 +53,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -80,7 +80,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -97,7 +97,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/Secondary.yml b/specifications/max-staleness/tests/ReplicaSetNoPrimary/Secondary.yml index 88c5ab549db..93e81841b4f 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/Secondary.yml +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/Secondary.yml @@ -9,7 +9,7 @@ topology_description: type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "125002"}} tags: data_center: tokyo # No match, but its lastWriteDate is used in estimate. @@ -18,7 +18,7 @@ topology_description: type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "2"}} # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. tags: data_center: nyc @@ -27,7 +27,7 @@ topology_description: type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. tags: data_center: nyc @@ -36,7 +36,7 @@ topology_description: type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "2"}} tags: data_center: tokyo # No match. diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/SecondaryPreferred.json b/specifications/max-staleness/tests/ReplicaSetNoPrimary/SecondaryPreferred.json index 7bce7d0aa42..19a948e928d 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/SecondaryPreferred.json +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/SecondaryPreferred.json @@ -7,7 +7,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -38,7 +38,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -52,7 +52,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/SecondaryPreferred.yml b/specifications/max-staleness/tests/ReplicaSetNoPrimary/SecondaryPreferred.yml index 0860a568ea1..f997882bc0a 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/SecondaryPreferred.yml +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/SecondaryPreferred.yml @@ -8,14 +8,14 @@ topology_description: type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1000001"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Very stale. read_preference: mode: SecondaryPreferred diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/SecondaryPreferred_tags.json b/specifications/max-staleness/tests/ReplicaSetNoPrimary/SecondaryPreferred_tags.json index 32c9ca770bd..b4633d88f37 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/SecondaryPreferred_tags.json +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/SecondaryPreferred_tags.json @@ -8,7 +8,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "125002" @@ -23,7 +23,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -38,7 +38,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -53,7 +53,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -80,7 +80,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -97,7 +97,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/SecondaryPreferred_tags.yml b/specifications/max-staleness/tests/ReplicaSetNoPrimary/SecondaryPreferred_tags.yml index 5a13cc73237..f4a6c8f6b64 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/SecondaryPreferred_tags.yml +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/SecondaryPreferred_tags.yml @@ -9,7 +9,7 @@ topology_description: type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "125002"}} tags: data_center: tokyo # No match, but its lastWriteDate is used in estimate. @@ -18,7 +18,7 @@ topology_description: type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "2"}} # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. tags: data_center: nyc @@ -27,7 +27,7 @@ topology_description: type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. tags: data_center: nyc @@ -36,7 +36,7 @@ topology_description: type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "2"}} tags: data_center: tokyo # No match. diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/ZeroMaxStaleness.json b/specifications/max-staleness/tests/ReplicaSetNoPrimary/ZeroMaxStaleness.json index fd84cd11931..ccb916f1072 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/ZeroMaxStaleness.json +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/ZeroMaxStaleness.json @@ -7,7 +7,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/specifications/max-staleness/tests/ReplicaSetNoPrimary/ZeroMaxStaleness.yml b/specifications/max-staleness/tests/ReplicaSetNoPrimary/ZeroMaxStaleness.yml index e37f6b20b1b..e6be41ca673 100644 --- a/specifications/max-staleness/tests/ReplicaSetNoPrimary/ZeroMaxStaleness.yml +++ b/specifications/max-staleness/tests/ReplicaSetNoPrimary/ZeroMaxStaleness.yml @@ -8,14 +8,14 @@ topology_description: type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "2"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: Nearest diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json b/specifications/max-staleness/tests/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json index 35eaa9d69d6..00137cf69e5 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/DefaultNoMaxStaleness.json @@ -7,7 +7,7 @@ "type": "RSPrimary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -37,7 +37,7 @@ "type": "RSPrimary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -49,7 +49,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -63,7 +63,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/DefaultNoMaxStaleness.yml b/specifications/max-staleness/tests/ReplicaSetWithPrimary/DefaultNoMaxStaleness.yml index 67cc8949318..1ea9b209c8f 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/DefaultNoMaxStaleness.yml +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/DefaultNoMaxStaleness.yml @@ -8,14 +8,14 @@ topology_description: type: RSPrimary avg_rtt_ms: 50 # Too far. lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1000001"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Very stale. read_preference: mode: Nearest diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/LastUpdateTime.json b/specifications/max-staleness/tests/ReplicaSetWithPrimary/LastUpdateTime.json index 18450beaedd..9d1db2de65d 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/LastUpdateTime.json +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/LastUpdateTime.json @@ -13,7 +13,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] } diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/LastUpdateTime.yml b/specifications/max-staleness/tests/ReplicaSetWithPrimary/LastUpdateTime.yml index 694178bbd74..e4fbe1ff7d1 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/LastUpdateTime.yml +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/LastUpdateTime.yml @@ -8,7 +8,7 @@ topology_description: avg_rtt_ms: 50 # Too far. lastUpdateTime: 1 lastWrite: {lastWriteDate: {$numberLong: "2"}} - maxWireVersion: 6 + maxWireVersion: 21 - &2 address: b:27017 type: RSSecondary @@ -17,14 +17,14 @@ topology_description: # Updated 125 sec after primary, so 125 sec stale. # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. lastWrite: {lastWriteDate: {$numberLong: "2"}} - maxWireVersion: 6 + maxWireVersion: 21 - &3 address: c:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 125001 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. - maxWireVersion: 6 + maxWireVersion: 21 read_preference: mode: Nearest maxStalenessSeconds: 150 diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/LongHeartbeat.json b/specifications/max-staleness/tests/ReplicaSetWithPrimary/LongHeartbeat.json index b9fb407f9e8..b0636236cc8 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/LongHeartbeat.json +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/LongHeartbeat.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -39,7 +39,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -51,7 +51,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -65,7 +65,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/LongHeartbeat.yml b/specifications/max-staleness/tests/ReplicaSetWithPrimary/LongHeartbeat.yml index 89cab474442..1e479c32e09 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/LongHeartbeat.yml +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/LongHeartbeat.yml @@ -10,14 +10,14 @@ topology_description: type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 50 # Too far. lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: Nearest diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/LongHeartbeat2.json b/specifications/max-staleness/tests/ReplicaSetWithPrimary/LongHeartbeat2.json index b695e1caeb2..76edfcb836a 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/LongHeartbeat2.json +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/LongHeartbeat2.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/LongHeartbeat2.yml b/specifications/max-staleness/tests/ReplicaSetWithPrimary/LongHeartbeat2.yml index c849a0eff98..836467bf1f7 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/LongHeartbeat2.yml +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/LongHeartbeat2.yml @@ -10,14 +10,14 @@ topology_description: type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: Nearest diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/MaxStalenessTooSmall.json b/specifications/max-staleness/tests/ReplicaSetWithPrimary/MaxStalenessTooSmall.json index 9b798d37da1..aa936e3c67a 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/MaxStalenessTooSmall.json +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/MaxStalenessTooSmall.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/MaxStalenessTooSmall.yml b/specifications/max-staleness/tests/ReplicaSetWithPrimary/MaxStalenessTooSmall.yml index 82c8f81ae55..9edd4199bf6 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/MaxStalenessTooSmall.yml +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/MaxStalenessTooSmall.yml @@ -11,14 +11,14 @@ topology_description: type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: Nearest diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json b/specifications/max-staleness/tests/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json index 1fa7bb4dd06..c24752a7f12 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json @@ -7,7 +7,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.yml b/specifications/max-staleness/tests/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.yml index dd62017011e..92358a702dd 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.yml +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.yml @@ -9,14 +9,14 @@ topology_description: type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: maxStalenessSeconds: 120 diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest.json b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest.json index 198be4a681d..d3a9535b093 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest.json +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] } diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest.yml b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest.yml index 96eeb4b5592..5ef6c720a5e 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest.yml +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest.yml @@ -8,21 +8,21 @@ topology_description: avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "125002"}} - maxWireVersion: 6 + maxWireVersion: 21 - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 50 # Too far. lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "2"}} # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. - maxWireVersion: 6 + maxWireVersion: 21 - &3 address: c:27017 avg_rtt_ms: 5 lastUpdateTime: 0 type: RSSecondary lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. - maxWireVersion: 6 + maxWireVersion: 21 read_preference: mode: Nearest maxStalenessSeconds: 150 diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest2.json b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest2.json index 3ae629c898b..f91706e8040 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest2.json +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest2.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "c:27017", @@ -37,7 +37,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, @@ -56,7 +56,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -68,7 +68,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], "in_latency_window": [ @@ -82,7 +82,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] } diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest2.yml b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest2.yml index 3ee119c3a73..7d4f706fc7a 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest2.yml +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest2.yml @@ -8,21 +8,21 @@ topology_description: avg_rtt_ms: 50 # Too far. lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "125002"}} - maxWireVersion: 6 + maxWireVersion: 21 - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "2"}} # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. - maxWireVersion: 6 + maxWireVersion: 21 - &3 address: c:27017 avg_rtt_ms: 5 lastUpdateTime: 0 type: RSSecondary lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. - maxWireVersion: 6 + maxWireVersion: 21 read_preference: mode: Nearest maxStalenessSeconds: 150 diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest_tags.json b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest_tags.json index 675df82631b..4ed0b9ed2ea 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest_tags.json +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest_tags.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -28,7 +28,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "nyc" } @@ -58,7 +58,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -75,7 +75,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest_tags.yml b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest_tags.yml index ce2c3ab3f07..6db5d4362b5 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest_tags.yml +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Nearest_tags.yml @@ -12,7 +12,7 @@ topology_description: avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "125002"}} - maxWireVersion: 6 + maxWireVersion: 21 tags: data_center: tokyo - &2 @@ -21,7 +21,7 @@ topology_description: avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. - maxWireVersion: 6 + maxWireVersion: 21 tags: data_center: nyc read_preference: diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/PrimaryPreferred.json b/specifications/max-staleness/tests/ReplicaSetWithPrimary/PrimaryPreferred.json index 795b47a1115..7945530e6a9 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/PrimaryPreferred.json +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/PrimaryPreferred.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -39,7 +39,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -53,7 +53,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/PrimaryPreferred.yml b/specifications/max-staleness/tests/ReplicaSetWithPrimary/PrimaryPreferred.yml index c1594c410f8..e38f33d7c1a 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/PrimaryPreferred.yml +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/PrimaryPreferred.yml @@ -9,14 +9,14 @@ topology_description: type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: PrimaryPreferred diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred.json b/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred.json index 5455708a70b..b433d6a4303 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred.json +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred.json @@ -7,7 +7,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -38,7 +38,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -52,7 +52,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred.yml b/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred.yml index 33c25f41297..ff35b9bdabf 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred.yml +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred.yml @@ -8,14 +8,14 @@ topology_description: type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1000001"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Very stale. read_preference: mode: SecondaryPreferred diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred_tags.json b/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred_tags.json index 6670b54c892..e594af78320 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred_tags.json +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred_tags.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "125002" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -35,7 +35,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 1, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -50,7 +50,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -65,7 +65,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -92,7 +92,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -107,7 +107,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 1, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -124,7 +124,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred_tags.yml b/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred_tags.yml index 05301d21095..4d17a4d7fd9 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred_tags.yml +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred_tags.yml @@ -9,14 +9,14 @@ topology_description: type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "125002"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "2"}} # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. tags: data_center: nyc @@ -25,7 +25,7 @@ topology_description: type: RSSecondary avg_rtt_ms: 50 # Too far. lastUpdateTime: 1 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1000001"}} # Not used in estimate since we have a primary. tags: data_center: nyc @@ -34,7 +34,7 @@ topology_description: type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. tags: data_center: nyc @@ -43,7 +43,7 @@ topology_description: type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "2"}} tags: data_center: tokyo # No match. diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json b/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json index 642fee1fb30..bc0953c6574 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred_tags2.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -40,7 +40,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "nyc" } @@ -70,7 +70,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -87,7 +87,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred_tags2.yml b/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred_tags2.yml index 00ed08c0af5..82c5a799f7c 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred_tags2.yml +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/SecondaryPreferred_tags2.yml @@ -12,14 +12,14 @@ topology_description: avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "125002"}} - maxWireVersion: 6 + maxWireVersion: 21 - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "2"}} - maxWireVersion: 6 + maxWireVersion: 21 tags: data_center: tokyo - &3 @@ -28,7 +28,7 @@ topology_description: avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. - maxWireVersion: 6 + maxWireVersion: 21 tags: data_center: nyc read_preference: diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Secondary_tags.json b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Secondary_tags.json index 502120dce67..2817cf9225b 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Secondary_tags.json +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Secondary_tags.json @@ -8,7 +8,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "125002" @@ -20,7 +20,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -35,7 +35,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 1, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -50,7 +50,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -65,7 +65,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -92,7 +92,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -107,7 +107,7 @@ "type": "RSSecondary", "avg_rtt_ms": 50, "lastUpdateTime": 1, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1000001" @@ -124,7 +124,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Secondary_tags.yml b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Secondary_tags.yml index 50650fea844..721cbc8a5db 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Secondary_tags.yml +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Secondary_tags.yml @@ -9,14 +9,14 @@ topology_description: type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "125002"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "2"}} # 125 sec stale + 25 sec heartbeat <= 150 sec maxStaleness. tags: data_center: nyc @@ -25,7 +25,7 @@ topology_description: type: RSSecondary avg_rtt_ms: 50 # Too far. lastUpdateTime: 1 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1000001"}} # Not used in estimate since we have a primary. tags: data_center: nyc @@ -34,7 +34,7 @@ topology_description: type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. tags: data_center: nyc @@ -43,7 +43,7 @@ topology_description: type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "2"}} tags: data_center: tokyo # No match. diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Secondary_tags2.json b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Secondary_tags2.json index 6978a1807b8..7aa487a078e 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Secondary_tags2.json +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Secondary_tags2.json @@ -13,7 +13,7 @@ "$numberLong": "125002" } }, - "maxWireVersion": 6 + "maxWireVersion": 21 }, { "address": "b:27017", @@ -25,7 +25,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -40,7 +40,7 @@ "$numberLong": "1" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "nyc" } @@ -70,7 +70,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } @@ -87,7 +87,7 @@ "$numberLong": "2" } }, - "maxWireVersion": 6, + "maxWireVersion": 21, "tags": { "data_center": "tokyo" } diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Secondary_tags2.yml b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Secondary_tags2.yml index b48ae682bd5..669ec5bb560 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/Secondary_tags2.yml +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/Secondary_tags2.yml @@ -12,14 +12,14 @@ topology_description: avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "125002"}} - maxWireVersion: 6 + maxWireVersion: 21 - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "2"}} - maxWireVersion: 6 + maxWireVersion: 21 tags: data_center: tokyo - &3 @@ -28,7 +28,7 @@ topology_description: avg_rtt_ms: 5 lastUpdateTime: 0 lastWrite: {lastWriteDate: {$numberLong: "1"}} # Too stale. - maxWireVersion: 6 + maxWireVersion: 21 tags: data_center: nyc read_preference: diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/ZeroMaxStaleness.json b/specifications/max-staleness/tests/ReplicaSetWithPrimary/ZeroMaxStaleness.json index e1e4a7ffb73..fff5609fcc6 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/ZeroMaxStaleness.json +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/ZeroMaxStaleness.json @@ -7,7 +7,7 @@ "type": "RSPrimary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "2" @@ -19,7 +19,7 @@ "type": "RSSecondary", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/specifications/max-staleness/tests/ReplicaSetWithPrimary/ZeroMaxStaleness.yml b/specifications/max-staleness/tests/ReplicaSetWithPrimary/ZeroMaxStaleness.yml index 1c2caf8e00e..18790394e7e 100644 --- a/specifications/max-staleness/tests/ReplicaSetWithPrimary/ZeroMaxStaleness.yml +++ b/specifications/max-staleness/tests/ReplicaSetWithPrimary/ZeroMaxStaleness.yml @@ -8,14 +8,14 @@ topology_description: type: RSPrimary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "2"}} - &2 address: b:27017 type: RSSecondary avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: Nearest diff --git a/specifications/max-staleness/tests/Sharded/SmallMaxStaleness.json b/specifications/max-staleness/tests/Sharded/SmallMaxStaleness.json index 91d89720d10..98e05be3635 100644 --- a/specifications/max-staleness/tests/Sharded/SmallMaxStaleness.json +++ b/specifications/max-staleness/tests/Sharded/SmallMaxStaleness.json @@ -8,7 +8,7 @@ "type": "Mongos", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -20,7 +20,7 @@ "type": "Mongos", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -39,7 +39,7 @@ "type": "Mongos", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -51,7 +51,7 @@ "type": "Mongos", "avg_rtt_ms": 50, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -65,7 +65,7 @@ "type": "Mongos", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/specifications/max-staleness/tests/Sharded/SmallMaxStaleness.yml b/specifications/max-staleness/tests/Sharded/SmallMaxStaleness.yml index 5ee1e079df3..9d5034df1e7 100644 --- a/specifications/max-staleness/tests/Sharded/SmallMaxStaleness.yml +++ b/specifications/max-staleness/tests/Sharded/SmallMaxStaleness.yml @@ -9,14 +9,14 @@ topology_description: type: Mongos avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} - &2 address: b:27017 type: Mongos avg_rtt_ms: 50 # Too far. lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: Nearest diff --git a/specifications/max-staleness/tests/Single/SmallMaxStaleness.json b/specifications/max-staleness/tests/Single/SmallMaxStaleness.json index b8d2db24be7..d948739855f 100644 --- a/specifications/max-staleness/tests/Single/SmallMaxStaleness.json +++ b/specifications/max-staleness/tests/Single/SmallMaxStaleness.json @@ -8,7 +8,7 @@ "type": "Standalone", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -27,7 +27,7 @@ "type": "Standalone", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" @@ -41,7 +41,7 @@ "type": "Standalone", "avg_rtt_ms": 5, "lastUpdateTime": 0, - "maxWireVersion": 6, + "maxWireVersion": 21, "lastWrite": { "lastWriteDate": { "$numberLong": "1" diff --git a/specifications/max-staleness/tests/Single/SmallMaxStaleness.yml b/specifications/max-staleness/tests/Single/SmallMaxStaleness.yml index 40f63a82a5f..3cba2ae55cc 100644 --- a/specifications/max-staleness/tests/Single/SmallMaxStaleness.yml +++ b/specifications/max-staleness/tests/Single/SmallMaxStaleness.yml @@ -9,7 +9,7 @@ topology_description: type: Standalone avg_rtt_ms: 5 lastUpdateTime: 0 - maxWireVersion: 6 + maxWireVersion: 21 lastWrite: {lastWriteDate: {$numberLong: "1"}} read_preference: mode: Nearest diff --git a/specifications/max-staleness/tests/Unknown/SmallMaxStaleness.json b/specifications/max-staleness/tests/Unknown/SmallMaxStaleness.json index 8d69f46a1ea..0e609bcf940 100644 --- a/specifications/max-staleness/tests/Unknown/SmallMaxStaleness.json +++ b/specifications/max-staleness/tests/Unknown/SmallMaxStaleness.json @@ -6,7 +6,7 @@ { "address": "a:27017", "type": "Unknown", - "maxWireVersion": 6 + "maxWireVersion": 21 } ] }, diff --git a/specifications/max-staleness/tests/Unknown/SmallMaxStaleness.yml b/specifications/max-staleness/tests/Unknown/SmallMaxStaleness.yml index 59e14db3623..5d420637652 100644 --- a/specifications/max-staleness/tests/Unknown/SmallMaxStaleness.yml +++ b/specifications/max-staleness/tests/Unknown/SmallMaxStaleness.yml @@ -7,7 +7,7 @@ topology_description: - &1 address: a:27017 type: Unknown - maxWireVersion: 6 + maxWireVersion: 21 read_preference: mode: Nearest maxStalenessSeconds: 1 diff --git a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-InterruptedAtShutdown.json b/specifications/server-discovery-and-monitoring/tests/errors/pre-42-InterruptedAtShutdown.json deleted file mode 100644 index 9f6ea212e5c..00000000000 --- a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-InterruptedAtShutdown.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 InterruptedAtShutdown error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 InterruptedAtShutdown error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "InterruptedAtShutdown", - "code": 11600 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-InterruptedAtShutdown.yml b/specifications/server-discovery-and-monitoring/tests/errors/pre-42-InterruptedAtShutdown.yml deleted file mode 100644 index 4996e9f25ff..00000000000 --- a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-InterruptedAtShutdown.yml +++ /dev/null @@ -1,47 +0,0 @@ -# Autogenerated tests for SDAM error handling, see generate-error-tests.py -description: Pre-4.2 InterruptedAtShutdown error -uri: mongodb://a/?replicaSet=rs -phases: -- description: Primary A is discovered - responses: - - - a:27017 - - ok: 1 - helloOk: true - isWritablePrimary: true - hosts: - - a:27017 - setName: rs - minWireVersion: 0 - maxWireVersion: 7 - outcome: &outcome - servers: - a:27017: - type: RSPrimary - setName: rs - topologyVersion: null - pool: - generation: 0 - topologyType: ReplicaSetWithPrimary - logicalSessionTimeoutMinutes: null - setName: rs - -- description: Pre-4.2 InterruptedAtShutdown error marks server Unknown and clears the pool - applicationErrors: - - address: a:27017 - when: afterHandshakeCompletes - maxWireVersion: 7 - type: command - response: - ok: 0 - errmsg: InterruptedAtShutdown - code: 11600 - outcome: - servers: - a:27017: - type: Unknown - topologyVersion: null - pool: - generation: 1 - topologyType: ReplicaSetNoPrimary - logicalSessionTimeoutMinutes: null - setName: rs diff --git a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-InterruptedDueToReplStateChange.json b/specifications/server-discovery-and-monitoring/tests/errors/pre-42-InterruptedDueToReplStateChange.json deleted file mode 100644 index 7e5f2357134..00000000000 --- a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-InterruptedDueToReplStateChange.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 InterruptedDueToReplStateChange error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 InterruptedDueToReplStateChange error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "InterruptedDueToReplStateChange", - "code": 11602 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-InterruptedDueToReplStateChange.yml b/specifications/server-discovery-and-monitoring/tests/errors/pre-42-InterruptedDueToReplStateChange.yml deleted file mode 100644 index ada5f1ff736..00000000000 --- a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-InterruptedDueToReplStateChange.yml +++ /dev/null @@ -1,47 +0,0 @@ -# Autogenerated tests for SDAM error handling, see generate-error-tests.py -description: Pre-4.2 InterruptedDueToReplStateChange error -uri: mongodb://a/?replicaSet=rs -phases: -- description: Primary A is discovered - responses: - - - a:27017 - - ok: 1 - helloOk: true - isWritablePrimary: true - hosts: - - a:27017 - setName: rs - minWireVersion: 0 - maxWireVersion: 7 - outcome: &outcome - servers: - a:27017: - type: RSPrimary - setName: rs - topologyVersion: null - pool: - generation: 0 - topologyType: ReplicaSetWithPrimary - logicalSessionTimeoutMinutes: null - setName: rs - -- description: Pre-4.2 InterruptedDueToReplStateChange error marks server Unknown and clears the pool - applicationErrors: - - address: a:27017 - when: afterHandshakeCompletes - maxWireVersion: 7 - type: command - response: - ok: 0 - errmsg: InterruptedDueToReplStateChange - code: 11602 - outcome: - servers: - a:27017: - type: Unknown - topologyVersion: null - pool: - generation: 1 - topologyType: ReplicaSetNoPrimary - logicalSessionTimeoutMinutes: null - setName: rs diff --git a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-LegacyNotPrimary.json b/specifications/server-discovery-and-monitoring/tests/errors/pre-42-LegacyNotPrimary.json deleted file mode 100644 index 1635f1a8568..00000000000 --- a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-LegacyNotPrimary.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 LegacyNotPrimary error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 LegacyNotPrimary error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "LegacyNotPrimary", - "code": 10058 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-LegacyNotPrimary.yml b/specifications/server-discovery-and-monitoring/tests/errors/pre-42-LegacyNotPrimary.yml deleted file mode 100644 index e2b37b4f5f1..00000000000 --- a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-LegacyNotPrimary.yml +++ /dev/null @@ -1,47 +0,0 @@ -# Autogenerated tests for SDAM error handling, see generate-error-tests.py -description: Pre-4.2 LegacyNotPrimary error -uri: mongodb://a/?replicaSet=rs -phases: -- description: Primary A is discovered - responses: - - - a:27017 - - ok: 1 - helloOk: true - isWritablePrimary: true - hosts: - - a:27017 - setName: rs - minWireVersion: 0 - maxWireVersion: 7 - outcome: &outcome - servers: - a:27017: - type: RSPrimary - setName: rs - topologyVersion: null - pool: - generation: 0 - topologyType: ReplicaSetWithPrimary - logicalSessionTimeoutMinutes: null - setName: rs - -- description: Pre-4.2 LegacyNotPrimary error marks server Unknown and clears the pool - applicationErrors: - - address: a:27017 - when: afterHandshakeCompletes - maxWireVersion: 7 - type: command - response: - ok: 0 - errmsg: LegacyNotPrimary - code: 10058 - outcome: - servers: - a:27017: - type: Unknown - topologyVersion: null - pool: - generation: 1 - topologyType: ReplicaSetNoPrimary - logicalSessionTimeoutMinutes: null - setName: rs diff --git a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-NotPrimaryNoSecondaryOk.json b/specifications/server-discovery-and-monitoring/tests/errors/pre-42-NotPrimaryNoSecondaryOk.json deleted file mode 100644 index 0e70ede02c9..00000000000 --- a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-NotPrimaryNoSecondaryOk.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 NotPrimaryNoSecondaryOk error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 NotPrimaryNoSecondaryOk error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "NotPrimaryNoSecondaryOk", - "code": 13435 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-NotPrimaryNoSecondaryOk.yml b/specifications/server-discovery-and-monitoring/tests/errors/pre-42-NotPrimaryNoSecondaryOk.yml deleted file mode 100644 index 867b7d3dc15..00000000000 --- a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-NotPrimaryNoSecondaryOk.yml +++ /dev/null @@ -1,47 +0,0 @@ -# Autogenerated tests for SDAM error handling, see generate-error-tests.py -description: Pre-4.2 NotPrimaryNoSecondaryOk error -uri: mongodb://a/?replicaSet=rs -phases: -- description: Primary A is discovered - responses: - - - a:27017 - - ok: 1 - helloOk: true - isWritablePrimary: true - hosts: - - a:27017 - setName: rs - minWireVersion: 0 - maxWireVersion: 7 - outcome: &outcome - servers: - a:27017: - type: RSPrimary - setName: rs - topologyVersion: null - pool: - generation: 0 - topologyType: ReplicaSetWithPrimary - logicalSessionTimeoutMinutes: null - setName: rs - -- description: Pre-4.2 NotPrimaryNoSecondaryOk error marks server Unknown and clears the pool - applicationErrors: - - address: a:27017 - when: afterHandshakeCompletes - maxWireVersion: 7 - type: command - response: - ok: 0 - errmsg: NotPrimaryNoSecondaryOk - code: 13435 - outcome: - servers: - a:27017: - type: Unknown - topologyVersion: null - pool: - generation: 1 - topologyType: ReplicaSetNoPrimary - logicalSessionTimeoutMinutes: null - setName: rs diff --git a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-NotPrimaryOrSecondary.json b/specifications/server-discovery-and-monitoring/tests/errors/pre-42-NotPrimaryOrSecondary.json deleted file mode 100644 index 3fefb216634..00000000000 --- a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-NotPrimaryOrSecondary.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 NotPrimaryOrSecondary error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 NotPrimaryOrSecondary error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "NotPrimaryOrSecondary", - "code": 13436 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-NotPrimaryOrSecondary.yml b/specifications/server-discovery-and-monitoring/tests/errors/pre-42-NotPrimaryOrSecondary.yml deleted file mode 100644 index 705e2339c01..00000000000 --- a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-NotPrimaryOrSecondary.yml +++ /dev/null @@ -1,47 +0,0 @@ -# Autogenerated tests for SDAM error handling, see generate-error-tests.py -description: Pre-4.2 NotPrimaryOrSecondary error -uri: mongodb://a/?replicaSet=rs -phases: -- description: Primary A is discovered - responses: - - - a:27017 - - ok: 1 - helloOk: true - isWritablePrimary: true - hosts: - - a:27017 - setName: rs - minWireVersion: 0 - maxWireVersion: 7 - outcome: &outcome - servers: - a:27017: - type: RSPrimary - setName: rs - topologyVersion: null - pool: - generation: 0 - topologyType: ReplicaSetWithPrimary - logicalSessionTimeoutMinutes: null - setName: rs - -- description: Pre-4.2 NotPrimaryOrSecondary error marks server Unknown and clears the pool - applicationErrors: - - address: a:27017 - when: afterHandshakeCompletes - maxWireVersion: 7 - type: command - response: - ok: 0 - errmsg: NotPrimaryOrSecondary - code: 13436 - outcome: - servers: - a:27017: - type: Unknown - topologyVersion: null - pool: - generation: 1 - topologyType: ReplicaSetNoPrimary - logicalSessionTimeoutMinutes: null - setName: rs diff --git a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-NotWritablePrimary.json b/specifications/server-discovery-and-monitoring/tests/errors/pre-42-NotWritablePrimary.json deleted file mode 100644 index d010da0a5bc..00000000000 --- a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-NotWritablePrimary.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 NotWritablePrimary error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 NotWritablePrimary error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "NotWritablePrimary", - "code": 10107 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-NotWritablePrimary.yml b/specifications/server-discovery-and-monitoring/tests/errors/pre-42-NotWritablePrimary.yml deleted file mode 100644 index 29669ff336e..00000000000 --- a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-NotWritablePrimary.yml +++ /dev/null @@ -1,47 +0,0 @@ -# Autogenerated tests for SDAM error handling, see generate-error-tests.py -description: Pre-4.2 NotWritablePrimary error -uri: mongodb://a/?replicaSet=rs -phases: -- description: Primary A is discovered - responses: - - - a:27017 - - ok: 1 - helloOk: true - isWritablePrimary: true - hosts: - - a:27017 - setName: rs - minWireVersion: 0 - maxWireVersion: 7 - outcome: &outcome - servers: - a:27017: - type: RSPrimary - setName: rs - topologyVersion: null - pool: - generation: 0 - topologyType: ReplicaSetWithPrimary - logicalSessionTimeoutMinutes: null - setName: rs - -- description: Pre-4.2 NotWritablePrimary error marks server Unknown and clears the pool - applicationErrors: - - address: a:27017 - when: afterHandshakeCompletes - maxWireVersion: 7 - type: command - response: - ok: 0 - errmsg: NotWritablePrimary - code: 10107 - outcome: - servers: - a:27017: - type: Unknown - topologyVersion: null - pool: - generation: 1 - topologyType: ReplicaSetNoPrimary - logicalSessionTimeoutMinutes: null - setName: rs diff --git a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-PrimarySteppedDown.json b/specifications/server-discovery-and-monitoring/tests/errors/pre-42-PrimarySteppedDown.json deleted file mode 100644 index 02956d201dd..00000000000 --- a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-PrimarySteppedDown.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 PrimarySteppedDown error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 PrimarySteppedDown error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "PrimarySteppedDown", - "code": 189 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-PrimarySteppedDown.yml b/specifications/server-discovery-and-monitoring/tests/errors/pre-42-PrimarySteppedDown.yml deleted file mode 100644 index 9176ee25c3f..00000000000 --- a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-PrimarySteppedDown.yml +++ /dev/null @@ -1,47 +0,0 @@ -# Autogenerated tests for SDAM error handling, see generate-error-tests.py -description: Pre-4.2 PrimarySteppedDown error -uri: mongodb://a/?replicaSet=rs -phases: -- description: Primary A is discovered - responses: - - - a:27017 - - ok: 1 - helloOk: true - isWritablePrimary: true - hosts: - - a:27017 - setName: rs - minWireVersion: 0 - maxWireVersion: 7 - outcome: &outcome - servers: - a:27017: - type: RSPrimary - setName: rs - topologyVersion: null - pool: - generation: 0 - topologyType: ReplicaSetWithPrimary - logicalSessionTimeoutMinutes: null - setName: rs - -- description: Pre-4.2 PrimarySteppedDown error marks server Unknown and clears the pool - applicationErrors: - - address: a:27017 - when: afterHandshakeCompletes - maxWireVersion: 7 - type: command - response: - ok: 0 - errmsg: PrimarySteppedDown - code: 189 - outcome: - servers: - a:27017: - type: Unknown - topologyVersion: null - pool: - generation: 1 - topologyType: ReplicaSetNoPrimary - logicalSessionTimeoutMinutes: null - setName: rs diff --git a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-ShutdownInProgress.json b/specifications/server-discovery-and-monitoring/tests/errors/pre-42-ShutdownInProgress.json deleted file mode 100644 index fc3a5aa6fee..00000000000 --- a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-ShutdownInProgress.json +++ /dev/null @@ -1,70 +0,0 @@ -{ - "description": "Pre-4.2 ShutdownInProgress error", - "uri": "mongodb://a/?replicaSet=rs", - "phases": [ - { - "description": "Primary A is discovered", - "responses": [ - [ - "a:27017", - { - "ok": 1, - "helloOk": true, - "isWritablePrimary": true, - "hosts": [ - "a:27017" - ], - "setName": "rs", - "minWireVersion": 0, - "maxWireVersion": 7 - } - ] - ], - "outcome": { - "servers": { - "a:27017": { - "type": "RSPrimary", - "setName": "rs", - "topologyVersion": null, - "pool": { - "generation": 0 - } - } - }, - "topologyType": "ReplicaSetWithPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - }, - { - "description": "Pre-4.2 ShutdownInProgress error marks server Unknown and clears the pool", - "applicationErrors": [ - { - "address": "a:27017", - "when": "afterHandshakeCompletes", - "maxWireVersion": 7, - "type": "command", - "response": { - "ok": 0, - "errmsg": "ShutdownInProgress", - "code": 91 - } - } - ], - "outcome": { - "servers": { - "a:27017": { - "type": "Unknown", - "topologyVersion": null, - "pool": { - "generation": 1 - } - } - }, - "topologyType": "ReplicaSetNoPrimary", - "logicalSessionTimeoutMinutes": null, - "setName": "rs" - } - } - ] -} diff --git a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-ShutdownInProgress.yml b/specifications/server-discovery-and-monitoring/tests/errors/pre-42-ShutdownInProgress.yml deleted file mode 100644 index 5c64050e591..00000000000 --- a/specifications/server-discovery-and-monitoring/tests/errors/pre-42-ShutdownInProgress.yml +++ /dev/null @@ -1,47 +0,0 @@ -# Autogenerated tests for SDAM error handling, see generate-error-tests.py -description: Pre-4.2 ShutdownInProgress error -uri: mongodb://a/?replicaSet=rs -phases: -- description: Primary A is discovered - responses: - - - a:27017 - - ok: 1 - helloOk: true - isWritablePrimary: true - hosts: - - a:27017 - setName: rs - minWireVersion: 0 - maxWireVersion: 7 - outcome: &outcome - servers: - a:27017: - type: RSPrimary - setName: rs - topologyVersion: null - pool: - generation: 0 - topologyType: ReplicaSetWithPrimary - logicalSessionTimeoutMinutes: null - setName: rs - -- description: Pre-4.2 ShutdownInProgress error marks server Unknown and clears the pool - applicationErrors: - - address: a:27017 - when: afterHandshakeCompletes - maxWireVersion: 7 - type: command - response: - ok: 0 - errmsg: ShutdownInProgress - code: 91 - outcome: - servers: - a:27017: - type: Unknown - topologyVersion: null - pool: - generation: 1 - topologyType: ReplicaSetNoPrimary - logicalSessionTimeoutMinutes: null - setName: rs diff --git a/specifications/server-discovery-and-monitoring/tests/errors/pre-42.yml.template b/specifications/server-discovery-and-monitoring/tests/errors/pre-42.yml.template deleted file mode 100644 index 7449f68c163..00000000000 --- a/specifications/server-discovery-and-monitoring/tests/errors/pre-42.yml.template +++ /dev/null @@ -1,47 +0,0 @@ -# Autogenerated tests for SDAM error handling, see generate-error-tests.py -description: Pre-4.2 {error_name} error -uri: mongodb://a/?replicaSet=rs -phases: -- description: Primary A is discovered - responses: - - - a:27017 - - ok: 1 - helloOk: true - isWritablePrimary: true - hosts: - - a:27017 - setName: rs - minWireVersion: 0 - maxWireVersion: 7 - outcome: &outcome - servers: - a:27017: - type: RSPrimary - setName: rs - topologyVersion: null - pool: - generation: 0 - topologyType: ReplicaSetWithPrimary - logicalSessionTimeoutMinutes: null - setName: rs - -- description: Pre-4.2 {error_name} error marks server Unknown and clears the pool - applicationErrors: - - address: a:27017 - when: afterHandshakeCompletes - maxWireVersion: 7 - type: command - response: - ok: 0 - errmsg: {error_name} - code: {error_code} - outcome: - servers: - a:27017: - type: Unknown - topologyVersion: null - pool: - generation: 1 - topologyType: ReplicaSetNoPrimary - logicalSessionTimeoutMinutes: null - setName: rs diff --git a/specifications/server-discovery-and-monitoring/tests/monitoring/discovered_standalone.json b/specifications/server-discovery-and-monitoring/tests/monitoring/discovered_standalone.json index dd8f7fc51ea..097203694ea 100644 --- a/specifications/server-discovery-and-monitoring/tests/monitoring/discovered_standalone.json +++ b/specifications/server-discovery-and-monitoring/tests/monitoring/discovered_standalone.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/monitoring/discovered_standalone.yml b/specifications/server-discovery-and-monitoring/tests/monitoring/discovered_standalone.yml index 5d808f2600e..1de96154ccc 100644 --- a/specifications/server-discovery-and-monitoring/tests/monitoring/discovered_standalone.yml +++ b/specifications/server-discovery-and-monitoring/tests/monitoring/discovered_standalone.yml @@ -5,7 +5,7 @@ phases: responses: - - "a:27017" - - { ok: 1, helloOk: true, isWritablePrimary: true, minWireVersion: 0, maxWireVersion: 6 } + - { ok: 1, helloOk: true, isWritablePrimary: true, minWireVersion: 0, maxWireVersion: 21 } outcome: events: diff --git a/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_no_primary.json b/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_no_primary.json index 950e32efe11..41d048729da 100644 --- a/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_no_primary.json +++ b/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_no_primary.json @@ -19,7 +19,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_no_primary.yml b/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_no_primary.yml index ce9cdf46e64..e5ca9cd90d6 100644 --- a/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_no_primary.yml +++ b/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_no_primary.yml @@ -17,7 +17,7 @@ phases: - "a:27017" - "b:27017" minWireVersion: 0 - maxWireVersion: 6 + maxWireVersion: 21 outcome: events: - diff --git a/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_primary.json b/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_primary.json index 2ad94d6e6af..3ccc127d1d6 100644 --- a/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_primary.json +++ b/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_primary.json @@ -18,7 +18,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_primary.yml b/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_primary.yml index 2c789190637..256895bb28a 100644 --- a/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_primary.yml +++ b/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_primary.yml @@ -16,7 +16,7 @@ phases: - "a:27017" - "b:27017" minWireVersion: 0 - maxWireVersion: 6 + maxWireVersion: 21 outcome: events: - diff --git a/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_removal.json b/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_removal.json index ae28faa30cc..dc6fbe7e7d8 100644 --- a/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_removal.json +++ b/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_removal.json @@ -69,7 +69,7 @@ "a:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_removal.yml b/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_removal.yml index 2471f47680c..25c006861a4 100644 --- a/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_removal.yml +++ b/specifications/server-discovery-and-monitoring/tests/monitoring/replica_set_with_removal.yml @@ -50,7 +50,7 @@ phases: primary: "a:27017", hosts: [ "a:27017" ], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 } - - "b:27017" diff --git a/specifications/server-discovery-and-monitoring/tests/monitoring/required_replica_set.json b/specifications/server-discovery-and-monitoring/tests/monitoring/required_replica_set.json index 401c5d99c56..1f4e5c1d715 100644 --- a/specifications/server-discovery-and-monitoring/tests/monitoring/required_replica_set.json +++ b/specifications/server-discovery-and-monitoring/tests/monitoring/required_replica_set.json @@ -18,7 +18,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/monitoring/required_replica_set.yml b/specifications/server-discovery-and-monitoring/tests/monitoring/required_replica_set.yml index 7a060128f37..69d0500de1e 100644 --- a/specifications/server-discovery-and-monitoring/tests/monitoring/required_replica_set.yml +++ b/specifications/server-discovery-and-monitoring/tests/monitoring/required_replica_set.yml @@ -14,7 +14,7 @@ phases: primary: "a:27017", hosts: [ "a:27017", "b:27017" ], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 } outcome: events: diff --git a/specifications/server-discovery-and-monitoring/tests/monitoring/standalone.json b/specifications/server-discovery-and-monitoring/tests/monitoring/standalone.json index 821a1525d41..f375a383ca2 100644 --- a/specifications/server-discovery-and-monitoring/tests/monitoring/standalone.json +++ b/specifications/server-discovery-and-monitoring/tests/monitoring/standalone.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/monitoring/standalone.yml b/specifications/server-discovery-and-monitoring/tests/monitoring/standalone.yml index d9f6bcfaf31..0c3ed64601f 100644 --- a/specifications/server-discovery-and-monitoring/tests/monitoring/standalone.yml +++ b/specifications/server-discovery-and-monitoring/tests/monitoring/standalone.yml @@ -5,7 +5,7 @@ phases: responses: - - "a:27017" - - { ok: 1, helloOk: true, isWritablePrimary: true, minWireVersion: 0, maxWireVersion: 6 } + - { ok: 1, helloOk: true, isWritablePrimary: true, minWireVersion: 0, maxWireVersion: 21 } outcome: events: diff --git a/specifications/server-discovery-and-monitoring/tests/monitoring/standalone_suppress_equal_description_changes.json b/specifications/server-discovery-and-monitoring/tests/monitoring/standalone_suppress_equal_description_changes.json index 5958e2d26c7..4d046ff8ed6 100644 --- a/specifications/server-discovery-and-monitoring/tests/monitoring/standalone_suppress_equal_description_changes.json +++ b/specifications/server-discovery-and-monitoring/tests/monitoring/standalone_suppress_equal_description_changes.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -21,7 +21,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/monitoring/standalone_suppress_equal_description_changes.yml b/specifications/server-discovery-and-monitoring/tests/monitoring/standalone_suppress_equal_description_changes.yml index f2f83ffbace..255ec2dd434 100644 --- a/specifications/server-discovery-and-monitoring/tests/monitoring/standalone_suppress_equal_description_changes.yml +++ b/specifications/server-discovery-and-monitoring/tests/monitoring/standalone_suppress_equal_description_changes.yml @@ -5,10 +5,10 @@ phases: responses: - - "a:27017" - - { ok: 1, helloOk: true, isWritablePrimary: true, minWireVersion: 0, maxWireVersion: 6 } + - { ok: 1, helloOk: true, isWritablePrimary: true, minWireVersion: 0, maxWireVersion: 21 } - - "a:27017" - - { ok: 1, helloOk: true, isWritablePrimary: true, minWireVersion: 0, maxWireVersion: 6 } + - { ok: 1, helloOk: true, isWritablePrimary: true, minWireVersion: 0, maxWireVersion: 21 } outcome: events: diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_arbiters.json b/specifications/server-discovery-and-monitoring/tests/rs/discover_arbiters.json index 53709b0cee9..803462b1561 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_arbiters.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_arbiters.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_arbiters.yml b/specifications/server-discovery-and-monitoring/tests/rs/discover_arbiters.yml index 67d29eadb54..5334bc82413 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_arbiters.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_arbiters.yml @@ -16,7 +16,7 @@ phases: [ arbiters: ["b:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_arbiters_replicaset.json b/specifications/server-discovery-and-monitoring/tests/rs/discover_arbiters_replicaset.json index 64fb49f4fcd..e58d7c7fb4c 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_arbiters_replicaset.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_arbiters_replicaset.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_arbiters_replicaset.yml b/specifications/server-discovery-and-monitoring/tests/rs/discover_arbiters_replicaset.yml index c7edb52b018..882dbb0f896 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_arbiters_replicaset.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_arbiters_replicaset.yml @@ -16,7 +16,7 @@ phases: [ arbiters: ["b:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_ghost.json b/specifications/server-discovery-and-monitoring/tests/rs/discover_ghost.json index 2e24c83e0b7..3b7fc836ec1 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_ghost.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_ghost.json @@ -12,7 +12,7 @@ "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_ghost.yml b/specifications/server-discovery-and-monitoring/tests/rs/discover_ghost.yml index e613f684b46..7a9cbd55594 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_ghost.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_ghost.yml @@ -14,7 +14,7 @@ phases: [ isWritablePrimary: false, isreplicaset: true, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_ghost_replicaset.json b/specifications/server-discovery-and-monitoring/tests/rs/discover_ghost_replicaset.json index cf5fe83a542..1a8457983b2 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_ghost_replicaset.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_ghost_replicaset.json @@ -12,7 +12,7 @@ "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_ghost_replicaset.yml b/specifications/server-discovery-and-monitoring/tests/rs/discover_ghost_replicaset.yml index 75ec3e64a9b..61ba1eab56a 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_ghost_replicaset.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_ghost_replicaset.yml @@ -14,7 +14,7 @@ phases: [ isWritablePrimary: false, isreplicaset: true, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_hidden.json b/specifications/server-discovery-and-monitoring/tests/rs/discover_hidden.json index e4a90f1f9cd..10bd51edebd 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_hidden.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_hidden.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_hidden.yml b/specifications/server-discovery-and-monitoring/tests/rs/discover_hidden.yml index 64ed1d82b96..f483255802a 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_hidden.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_hidden.yml @@ -17,7 +17,7 @@ phases: [ hosts: ["c:27017", "d:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_hidden_replicaset.json b/specifications/server-discovery-and-monitoring/tests/rs/discover_hidden_replicaset.json index 04420596f00..63cf5586757 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_hidden_replicaset.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_hidden_replicaset.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_hidden_replicaset.yml b/specifications/server-discovery-and-monitoring/tests/rs/discover_hidden_replicaset.yml index 614a39d4826..8d496544d27 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_hidden_replicaset.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_hidden_replicaset.yml @@ -17,7 +17,7 @@ phases: [ hosts: ["c:27017", "d:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_passives.json b/specifications/server-discovery-and-monitoring/tests/rs/discover_passives.json index 30258409f64..0a292c675c2 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_passives.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_passives.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -56,7 +56,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_passives.yml b/specifications/server-discovery-and-monitoring/tests/rs/discover_passives.yml index 637d86d7f15..25419733cf4 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_passives.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_passives.yml @@ -16,7 +16,7 @@ phases: [ passives: ["b:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -55,7 +55,7 @@ phases: [ passives: ["b:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_passives_replicaset.json b/specifications/server-discovery-and-monitoring/tests/rs/discover_passives_replicaset.json index 266eaa52344..c48fd476251 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_passives_replicaset.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_passives_replicaset.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -56,7 +56,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_passives_replicaset.yml b/specifications/server-discovery-and-monitoring/tests/rs/discover_passives_replicaset.yml index a5ced995d31..d981280a2ff 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_passives_replicaset.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_passives_replicaset.yml @@ -16,7 +16,7 @@ phases: [ passives: ["b:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -55,7 +55,7 @@ phases: [ passives: ["b:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_primary.json b/specifications/server-discovery-and-monitoring/tests/rs/discover_primary.json index 2d1292bbd47..04e7a4984c8 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_primary.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_primary.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_primary.yml b/specifications/server-discovery-and-monitoring/tests/rs/discover_primary.yml index eaedf130ba1..3c11e3e4f20 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_primary.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_primary.yml @@ -15,7 +15,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_primary_replicaset.json b/specifications/server-discovery-and-monitoring/tests/rs/discover_primary_replicaset.json index 54dfefba5fd..3cdcfdcee2d 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_primary_replicaset.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_primary_replicaset.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_primary_replicaset.yml b/specifications/server-discovery-and-monitoring/tests/rs/discover_primary_replicaset.yml index 7879701bb25..47831fa60ab 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_primary_replicaset.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_primary_replicaset.yml @@ -15,7 +15,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_rsother.json b/specifications/server-discovery-and-monitoring/tests/rs/discover_rsother.json index 4ab25667f01..9c3b8d8b7dd 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_rsother.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_rsother.json @@ -17,7 +17,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_rsother.yml b/specifications/server-discovery-and-monitoring/tests/rs/discover_rsother.yml index d78e70c5d72..11c65863f41 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_rsother.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_rsother.yml @@ -16,7 +16,7 @@ phases: [ hosts: ["c:27017", "d:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_rsother_replicaset.json b/specifications/server-discovery-and-monitoring/tests/rs/discover_rsother_replicaset.json index e3958d70adb..3da9efb0660 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_rsother_replicaset.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_rsother_replicaset.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -34,7 +34,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_rsother_replicaset.yml b/specifications/server-discovery-and-monitoring/tests/rs/discover_rsother_replicaset.yml index 19159d1efe7..c25d33ab2b0 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_rsother_replicaset.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_rsother_replicaset.yml @@ -17,7 +17,7 @@ phases: [ hosts: ["c:27017", "d:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ["b:27017", { @@ -28,7 +28,7 @@ phases: [ hosts: ["c:27017", "d:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_secondary.json b/specifications/server-discovery-and-monitoring/tests/rs/discover_secondary.json index 22325d4e03a..64a1ce31e31 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_secondary.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_secondary.json @@ -17,7 +17,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_secondary.yml b/specifications/server-discovery-and-monitoring/tests/rs/discover_secondary.yml index 184849d3ff4..83af822edad 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_secondary.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_secondary.yml @@ -16,7 +16,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_secondary_replicaset.json b/specifications/server-discovery-and-monitoring/tests/rs/discover_secondary_replicaset.json index d903b6444d7..d230f976a2c 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_secondary_replicaset.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_secondary_replicaset.json @@ -17,7 +17,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discover_secondary_replicaset.yml b/specifications/server-discovery-and-monitoring/tests/rs/discover_secondary_replicaset.yml index c73a535f691..71229387ccd 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discover_secondary_replicaset.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/discover_secondary_replicaset.yml @@ -16,7 +16,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discovery.json b/specifications/server-discovery-and-monitoring/tests/rs/discovery.json index 50e1269223a..e9deaa7587f 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discovery.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/discovery.json @@ -18,7 +18,7 @@ "c:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -59,7 +59,7 @@ "d:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -103,7 +103,7 @@ "e:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -147,7 +147,7 @@ "c:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/discovery.yml b/specifications/server-discovery-and-monitoring/tests/rs/discovery.yml index 30c82e0cef9..19be04897bf 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/discovery.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/discovery.yml @@ -17,7 +17,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017", "c:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -63,7 +63,7 @@ phases: [ primary: "d:27017", hosts: ["b:27017", "c:27017", "d:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -113,7 +113,7 @@ phases: [ setName: "rs", hosts: ["b:27017", "c:27017", "d:27017", "e:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -165,7 +165,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017", "c:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/equal_electionids.json b/specifications/server-discovery-and-monitoring/tests/rs/equal_electionids.json index 17df3207fa8..f1deedf9f42 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/equal_electionids.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/equal_electionids.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -39,7 +39,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/equal_electionids.yml b/specifications/server-discovery-and-monitoring/tests/rs/equal_electionids.yml index 48bb683d64e..17e8aa2c11e 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/equal_electionids.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/equal_electionids.yml @@ -16,7 +16,7 @@ phases: [ setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ["b:27017", { ok: 1, @@ -27,7 +27,7 @@ phases: [ setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/hosts_differ_from_seeds.json b/specifications/server-discovery-and-monitoring/tests/rs/hosts_differ_from_seeds.json index 4e02304c619..085e81e2663 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/hosts_differ_from_seeds.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/hosts_differ_from_seeds.json @@ -15,7 +15,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/hosts_differ_from_seeds.yml b/specifications/server-discovery-and-monitoring/tests/rs/hosts_differ_from_seeds.yml index e97fbca05bb..2ecd27ec7c0 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/hosts_differ_from_seeds.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/hosts_differ_from_seeds.yml @@ -15,7 +15,7 @@ phases: [ setName: "rs", hosts: ["b:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/incompatible_arbiter.json b/specifications/server-discovery-and-monitoring/tests/rs/incompatible_arbiter.json index f0539cb3373..bda18d9f6f7 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/incompatible_arbiter.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/incompatible_arbiter.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/specifications/server-discovery-and-monitoring/tests/rs/incompatible_arbiter.yml b/specifications/server-discovery-and-monitoring/tests/rs/incompatible_arbiter.yml index e4928f191d4..2e46ff29514 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/incompatible_arbiter.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/incompatible_arbiter.yml @@ -12,7 +12,7 @@ phases: setName: "rs" hosts: ["a:27017", "b:27017"] minWireVersion: 0 - maxWireVersion: 6 + maxWireVersion: 21 - - "b:27017" - ok: 1 diff --git a/specifications/server-discovery-and-monitoring/tests/rs/incompatible_ghost.json b/specifications/server-discovery-and-monitoring/tests/rs/incompatible_ghost.json index 824e953f906..9d82e316822 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/incompatible_ghost.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/incompatible_ghost.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/specifications/server-discovery-and-monitoring/tests/rs/incompatible_ghost.yml b/specifications/server-discovery-and-monitoring/tests/rs/incompatible_ghost.yml index da1db790fe4..c99badf4a39 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/incompatible_ghost.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/incompatible_ghost.yml @@ -12,7 +12,7 @@ phases: setName: "rs" hosts: ["a:27017", "b:27017"] minWireVersion: 0 - maxWireVersion: 6 + maxWireVersion: 21 - - "b:27017" - ok: 1 diff --git a/specifications/server-discovery-and-monitoring/tests/rs/incompatible_other.json b/specifications/server-discovery-and-monitoring/tests/rs/incompatible_other.json index 6f301ef5de3..149ba01142a 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/incompatible_other.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/incompatible_other.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/specifications/server-discovery-and-monitoring/tests/rs/incompatible_other.yml b/specifications/server-discovery-and-monitoring/tests/rs/incompatible_other.yml index 98061c0a1fb..16452e7b02e 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/incompatible_other.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/incompatible_other.yml @@ -12,7 +12,7 @@ phases: setName: "rs" hosts: ["a:27017", "b:27017"] minWireVersion: 0 - maxWireVersion: 6 + maxWireVersion: 21 - - "b:27017" - ok: 1 diff --git a/specifications/server-discovery-and-monitoring/tests/rs/ls_timeout.json b/specifications/server-discovery-and-monitoring/tests/rs/ls_timeout.json index 96389d3b769..c68790ddfda 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/ls_timeout.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/ls_timeout.json @@ -20,7 +20,7 @@ "setName": "rs", "logicalSessionTimeoutMinutes": 3, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -58,7 +58,7 @@ "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -104,7 +104,7 @@ "setName": "rs", "arbiterOnly": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -152,7 +152,7 @@ "setName": "rs", "logicalSessionTimeoutMinutes": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -194,7 +194,7 @@ "hidden": true, "logicalSessionTimeoutMinutes": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -244,7 +244,7 @@ "setName": "rs", "logicalSessionTimeoutMinutes": null, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/ls_timeout.yml b/specifications/server-discovery-and-monitoring/tests/rs/ls_timeout.yml index 26679f339ed..2cea00e99eb 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/ls_timeout.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/ls_timeout.yml @@ -14,7 +14,7 @@ phases: [ setName: "rs", logicalSessionTimeoutMinutes: 3, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ], outcome: { @@ -51,7 +51,7 @@ phases: [ isWritablePrimary: false, isreplicaset: true, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ], outcome: { @@ -90,7 +90,7 @@ phases: [ setName: "rs", arbiterOnly: true, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], outcome: { @@ -131,7 +131,7 @@ phases: [ setName: "rs", logicalSessionTimeoutMinutes: 2, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ], outcome: { @@ -172,7 +172,7 @@ phases: [ hidden: true, logicalSessionTimeoutMinutes: 1, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ], outcome: { @@ -214,7 +214,7 @@ phases: [ setName: "rs", logicalSessionTimeoutMinutes: null, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/member_reconfig.json b/specifications/server-discovery-and-monitoring/tests/rs/member_reconfig.json index 0e2c2c462ea..a05fed0efb3 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/member_reconfig.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/member_reconfig.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -49,7 +49,7 @@ "a:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/member_reconfig.yml b/specifications/server-discovery-and-monitoring/tests/rs/member_reconfig.yml index 37e63bc0597..a43a88d53fc 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/member_reconfig.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/member_reconfig.yml @@ -15,7 +15,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -52,7 +52,7 @@ phases: [ setName: "rs", hosts: ["a:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/member_standalone.json b/specifications/server-discovery-and-monitoring/tests/rs/member_standalone.json index 0756003a89f..db100db9f37 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/member_standalone.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/member_standalone.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -40,7 +40,7 @@ "a:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/member_standalone.yml b/specifications/server-discovery-and-monitoring/tests/rs/member_standalone.yml index 50c0056650f..1be000ea4ee 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/member_standalone.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/member_standalone.yml @@ -13,7 +13,7 @@ phases: [ helloOk: true, isWritablePrimary: true, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -44,7 +44,7 @@ phases: [ setName: "rs", hosts: ["a:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/new_primary.json b/specifications/server-discovery-and-monitoring/tests/rs/new_primary.json index ed1a6245f96..1a84c69c919 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/new_primary.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/new_primary.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -50,7 +50,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/new_primary.yml b/specifications/server-discovery-and-monitoring/tests/rs/new_primary.yml index 736dd06c5f3..f2485a18633 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/new_primary.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/new_primary.yml @@ -15,7 +15,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -52,7 +52,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/new_primary_new_electionid.json b/specifications/server-discovery-and-monitoring/tests/rs/new_primary_new_electionid.json index ccb3a41f757..509720d445a 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/new_primary_new_electionid.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/new_primary_new_electionid.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -67,7 +67,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -114,7 +114,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/new_primary_new_electionid.yml b/specifications/server-discovery-and-monitoring/tests/rs/new_primary_new_electionid.yml index dfebbd8561f..5641cfda954 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/new_primary_new_electionid.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/new_primary_new_electionid.yml @@ -16,7 +16,7 @@ phases: [ setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -54,7 +54,7 @@ phases: [ setVersion: 1, electionId: {"$oid": "000000000000000000000002"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -92,7 +92,7 @@ phases: [ setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], outcome: { diff --git a/specifications/server-discovery-and-monitoring/tests/rs/new_primary_new_setversion.json b/specifications/server-discovery-and-monitoring/tests/rs/new_primary_new_setversion.json index 415a0f66aa9..96533c61ee2 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/new_primary_new_setversion.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/new_primary_new_setversion.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -67,7 +67,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -114,7 +114,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/new_primary_new_setversion.yml b/specifications/server-discovery-and-monitoring/tests/rs/new_primary_new_setversion.yml index 3ebc798b61e..f2697971125 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/new_primary_new_setversion.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/new_primary_new_setversion.yml @@ -16,7 +16,7 @@ phases: [ setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -54,7 +54,7 @@ phases: [ setVersion: 2, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -92,7 +92,7 @@ phases: [ setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], outcome: { diff --git a/specifications/server-discovery-and-monitoring/tests/rs/new_primary_wrong_set_name.json b/specifications/server-discovery-and-monitoring/tests/rs/new_primary_wrong_set_name.json index d7b19cfe8f1..774b3a57364 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/new_primary_wrong_set_name.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/new_primary_wrong_set_name.json @@ -16,7 +16,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -49,7 +49,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/new_primary_wrong_set_name.yml b/specifications/server-discovery-and-monitoring/tests/rs/new_primary_wrong_set_name.yml index ca6303cda97..dc6df9cd8b4 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/new_primary_wrong_set_name.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/new_primary_wrong_set_name.yml @@ -16,7 +16,7 @@ phases: [ hosts: ["a:27017", "b:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -55,7 +55,7 @@ phases: [ hosts: ["a:27017"], setName: "wrong", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/non_rs_member.json b/specifications/server-discovery-and-monitoring/tests/rs/non_rs_member.json index 538077ef099..6bf10bd628f 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/non_rs_member.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/non_rs_member.json @@ -10,7 +10,7 @@ "ok": 1, "helloOk": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/non_rs_member.yml b/specifications/server-discovery-and-monitoring/tests/rs/non_rs_member.yml index c18b27ee3fa..4e2f0b1be26 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/non_rs_member.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/non_rs_member.yml @@ -11,7 +11,7 @@ phases: [ ok: 1, helloOk: true, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/normalize_case.json b/specifications/server-discovery-and-monitoring/tests/rs/normalize_case.json index 96a944f0c35..62915495e0c 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/normalize_case.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/normalize_case.json @@ -21,7 +21,7 @@ "C:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/normalize_case.yml b/specifications/server-discovery-and-monitoring/tests/rs/normalize_case.yml index d8003ee3754..a543ab72af1 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/normalize_case.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/normalize_case.yml @@ -17,7 +17,7 @@ phases: [ passives: ["B:27017"], arbiters: ["C:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/normalize_case_me.json b/specifications/server-discovery-and-monitoring/tests/rs/normalize_case_me.json index ab1720cefc0..0d9ba6213e2 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/normalize_case_me.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/normalize_case_me.json @@ -22,7 +22,7 @@ "C:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -67,7 +67,7 @@ "C:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/normalize_case_me.yml b/specifications/server-discovery-and-monitoring/tests/rs/normalize_case_me.yml index a0df3351a51..03ac43f314e 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/normalize_case_me.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/normalize_case_me.yml @@ -18,7 +18,7 @@ phases: [ passives: ["B:27017"], arbiters: ["C:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -66,7 +66,7 @@ phases: [ passives: ["B:27017"], arbiters: ["C:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/null_election_id-pre-6.0.json b/specifications/server-discovery-and-monitoring/tests/rs/null_election_id-pre-6.0.json index f1fa2e252e1..9e7ccc6e7f2 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/null_election_id-pre-6.0.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/null_election_id-pre-6.0.json @@ -18,7 +18,7 @@ "setVersion": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 7 } ] ], @@ -66,7 +66,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -116,7 +116,7 @@ "setVersion": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 7 } ] ], @@ -167,7 +167,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 7 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/null_election_id-pre-6.0.yml b/specifications/server-discovery-and-monitoring/tests/rs/null_election_id-pre-6.0.yml index 7e9bd55021a..63f9699f228 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/null_election_id-pre-6.0.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/null_election_id-pre-6.0.yml @@ -15,7 +15,7 @@ phases: [ setVersion: 1, setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 7 }] ], @@ -57,7 +57,7 @@ phases: [ setVersion: 1, electionId: {"$oid": "000000000000000000000002"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 7 }] ], @@ -99,7 +99,7 @@ phases: [ setVersion: 1, setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 7 }] ], outcome: { @@ -142,7 +142,7 @@ phases: [ setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 7 }] ], outcome: { diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_ghost.json b/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_ghost.json index 9c54b39856e..e34280e88c1 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_ghost.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_ghost.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -41,7 +41,7 @@ "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_ghost.yml b/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_ghost.yml index dbf5b9289d9..ee0158db0d0 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_ghost.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_ghost.yml @@ -15,7 +15,7 @@ phases: [ hosts: ["a:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -43,7 +43,7 @@ phases: [ isWritablePrimary: false, isreplicaset: true, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_mongos.json b/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_mongos.json index ac416e57d5f..79510d9399b 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_mongos.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_mongos.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -41,7 +41,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_mongos.yml b/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_mongos.yml index 6cdb0771068..251f1552776 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_mongos.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_mongos.yml @@ -15,7 +15,7 @@ phases: [ hosts: ["a:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -43,7 +43,7 @@ phases: [ isWritablePrimary: true, msg: "isdbgrid", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_standalone.json b/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_standalone.json index a64524d0ca4..abcc1e2d012 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_standalone.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_standalone.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -38,7 +38,7 @@ { "ok": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_standalone.yml b/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_standalone.yml index abcc7fcfe3e..9f6a0817bf7 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_standalone.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_becomes_standalone.yml @@ -15,7 +15,7 @@ phases: [ hosts: ["a:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -40,7 +40,7 @@ phases: [ ["a:27017", { ok: 1, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_changes_set_name.json b/specifications/server-discovery-and-monitoring/tests/rs/primary_changes_set_name.json index bf70ca3014b..3b564d2c931 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_changes_set_name.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_changes_set_name.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -44,7 +44,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_changes_set_name.yml b/specifications/server-discovery-and-monitoring/tests/rs/primary_changes_set_name.yml index 00ed1c0a13e..e49aa249116 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_changes_set_name.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_changes_set_name.yml @@ -16,7 +16,7 @@ phases: [ hosts: ["a:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -48,7 +48,7 @@ phases: [ hosts: ["a:27017"], setName: "wrong", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect.json b/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect.json index 3db854f0859..73a01a82a9f 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect.yml b/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect.yml index 9a49656549a..84c88daa671 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect.yml @@ -15,7 +15,7 @@ phases: [ hosts: ["a:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect_electionid.json b/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect_electionid.json index 3a80b150fe3..5a91188ea8d 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect_electionid.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect_electionid.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -39,7 +39,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -115,7 +115,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -159,7 +159,7 @@ "$oid": "000000000000000000000003" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -203,7 +203,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect_electionid.yml b/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect_electionid.yml index 0d7d294f974..391ec31213f 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect_electionid.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect_electionid.yml @@ -16,7 +16,7 @@ phases: [ setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ["b:27017", { ok: 1, @@ -27,7 +27,7 @@ phases: [ setVersion: 1, electionId: {"$oid": "000000000000000000000002"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -91,7 +91,7 @@ phases: [ setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], outcome: { @@ -127,7 +127,7 @@ phases: [ setVersion: 1, electionId: {"$oid": "000000000000000000000003"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], outcome: { @@ -163,7 +163,7 @@ phases: [ hosts: ["a:27017", "b:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], outcome: { diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect_setversion.json b/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect_setversion.json index 32e03fb7d4b..f7417ad77bc 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect_setversion.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect_setversion.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -39,7 +39,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -115,7 +115,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -159,7 +159,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -203,7 +203,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect_setversion.yml b/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect_setversion.yml index 41f2f8d7b73..57eeb573e48 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect_setversion.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_disconnect_setversion.yml @@ -16,7 +16,7 @@ phases: [ setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ["b:27017", { ok: 1, @@ -27,7 +27,7 @@ phases: [ setVersion: 2, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -91,7 +91,7 @@ phases: [ setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], outcome: { @@ -127,7 +127,7 @@ phases: [ setVersion: 2, electionId: {"$oid": "000000000000000000000002"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], outcome: { @@ -163,7 +163,7 @@ phases: [ hosts: ["a:27017", "b:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], outcome: { diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_hint_from_secondary_with_mismatched_me.json b/specifications/server-discovery-and-monitoring/tests/rs/primary_hint_from_secondary_with_mismatched_me.json index bc02cc95712..1ca72225a29 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_hint_from_secondary_with_mismatched_me.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_hint_from_secondary_with_mismatched_me.json @@ -18,7 +18,7 @@ "setName": "rs", "primary": "b:27017", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -48,7 +48,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_hint_from_secondary_with_mismatched_me.yml b/specifications/server-discovery-and-monitoring/tests/rs/primary_hint_from_secondary_with_mismatched_me.yml index f5b536c01f4..cebbe7d0833 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_hint_from_secondary_with_mismatched_me.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_hint_from_secondary_with_mismatched_me.yml @@ -17,7 +17,7 @@ phases: [ setName: "rs", primary: "b:27017", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -45,7 +45,7 @@ phases: [ hosts: ["b:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_mismatched_me.json b/specifications/server-discovery-and-monitoring/tests/rs/primary_mismatched_me.json index 2d2c0f40d8d..6bb6226f8a7 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_mismatched_me.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_mismatched_me.json @@ -31,7 +31,7 @@ "ok": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ] diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_mismatched_me.yml b/specifications/server-discovery-and-monitoring/tests/rs/primary_mismatched_me.yml index c24fd1a990e..25ca1722e0e 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_mismatched_me.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_mismatched_me.yml @@ -22,6 +22,6 @@ phases: ok: 1 setName: rs minWireVersion: 0 - maxWireVersion: 6 + maxWireVersion: 21 uri: 'mongodb://localhost:27017/?replicaSet=rs' diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_reports_new_member.json b/specifications/server-discovery-and-monitoring/tests/rs/primary_reports_new_member.json index ac0d9374f0d..ed28c48c871 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_reports_new_member.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_reports_new_member.json @@ -17,7 +17,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -51,7 +51,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -86,7 +86,7 @@ "c:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -127,7 +127,7 @@ "c:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_reports_new_member.yml b/specifications/server-discovery-and-monitoring/tests/rs/primary_reports_new_member.yml index de3f528fe9e..aa2878d6374 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_reports_new_member.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_reports_new_member.yml @@ -17,7 +17,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -55,7 +55,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -93,7 +93,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017", "c:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -140,7 +140,7 @@ phases: [ primary: "b:27017", hosts: ["a:27017", "b:27017", "c:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_to_no_primary_mismatched_me.json b/specifications/server-discovery-and-monitoring/tests/rs/primary_to_no_primary_mismatched_me.json index 6dbd73dadc1..798a648d196 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_to_no_primary_mismatched_me.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_to_no_primary_mismatched_me.json @@ -17,7 +17,7 @@ "me": "a:27017", "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -52,7 +52,7 @@ "me": "c:27017", "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_to_no_primary_mismatched_me.yml b/specifications/server-discovery-and-monitoring/tests/rs/primary_to_no_primary_mismatched_me.yml index a78982c28e1..e8bdb00f0ff 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_to_no_primary_mismatched_me.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_to_no_primary_mismatched_me.yml @@ -16,7 +16,7 @@ phases: [ me: "a:27017", setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -53,7 +53,7 @@ phases: [ me : "c:27017", setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_wrong_set_name.json b/specifications/server-discovery-and-monitoring/tests/rs/primary_wrong_set_name.json index cc0691fb8c7..1366e389969 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_wrong_set_name.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_wrong_set_name.json @@ -15,7 +15,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/primary_wrong_set_name.yml b/specifications/server-discovery-and-monitoring/tests/rs/primary_wrong_set_name.yml index d4434522a98..3e463333e6d 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/primary_wrong_set_name.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/primary_wrong_set_name.yml @@ -15,7 +15,7 @@ phases: [ hosts: ["a:27017"], setName: "wrong", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/repeated.json b/specifications/server-discovery-and-monitoring/tests/rs/repeated.json index 610aeae0ac7..3ce0948ab82 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/repeated.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/repeated.json @@ -18,7 +18,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -49,7 +49,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -84,7 +84,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -120,7 +120,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/repeated.yml b/specifications/server-discovery-and-monitoring/tests/rs/repeated.yml index f651005babe..b9e14ed9828 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/repeated.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/repeated.yml @@ -15,7 +15,7 @@ phases: hosts: ["a:27017", "c:27017"] setName: "rs" minWireVersion: 0 - maxWireVersion: 6 + maxWireVersion: 21 outcome: servers: "a:27017": @@ -39,7 +39,7 @@ phases: helloOk: true isWritablePrimary: true minWireVersion: 0 - maxWireVersion: 6 + maxWireVersion: 21 outcome: servers: "a:27017": @@ -64,7 +64,7 @@ phases: hosts: ["a:27017", "c:27017"] setName: "rs" minWireVersion: 0 - maxWireVersion: 6 + maxWireVersion: 21 outcome: servers: "a:27017": @@ -90,7 +90,7 @@ phases: hosts: ["a:27017", "c:27017"] setName: "rs" minWireVersion: 0 - maxWireVersion: 6 + maxWireVersion: 21 outcome: servers: "a:27017": diff --git a/specifications/server-discovery-and-monitoring/tests/rs/replicaset_rsnp.json b/specifications/server-discovery-and-monitoring/tests/rs/replicaset_rsnp.json index 3148e1c141f..1cd732b82f1 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/replicaset_rsnp.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/replicaset_rsnp.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/replicaset_rsnp.yml b/specifications/server-discovery-and-monitoring/tests/rs/replicaset_rsnp.yml index 87e80bdb319..6bdf8cbe2d8 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/replicaset_rsnp.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/replicaset_rsnp.yml @@ -11,7 +11,7 @@ phases: helloOk: true isWritablePrimary: true minWireVersion: 0 - maxWireVersion: 6 + maxWireVersion: 21 outcome: # Server is removed because it's a standalone and the driver # started in RSNP topology diff --git a/specifications/server-discovery-and-monitoring/tests/rs/response_from_removed.json b/specifications/server-discovery-and-monitoring/tests/rs/response_from_removed.json index 87a66d9e728..fa46a14ceb3 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/response_from_removed.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/response_from_removed.json @@ -15,7 +15,7 @@ "a:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -46,7 +46,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/response_from_removed.yml b/specifications/server-discovery-and-monitoring/tests/rs/response_from_removed.yml index 6ec66c87501..fc9961ce364 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/response_from_removed.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/response_from_removed.yml @@ -15,7 +15,7 @@ phases: [ setName: "rs", hosts: ["a:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -47,7 +47,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/sec_not_auth.json b/specifications/server-discovery-and-monitoring/tests/rs/sec_not_auth.json index a39855e654a..ccbe7a08af9 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/sec_not_auth.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/sec_not_auth.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -32,7 +32,7 @@ "c:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/sec_not_auth.yml b/specifications/server-discovery-and-monitoring/tests/rs/sec_not_auth.yml index 09c75f9c78c..507eb9d2927 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/sec_not_auth.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/sec_not_auth.yml @@ -15,7 +15,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ["b:27017", { @@ -27,7 +27,7 @@ phases: [ setName: "rs", hosts: ["b:27017", "c:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/secondary_ignore_ok_0-pre-6.0.json b/specifications/server-discovery-and-monitoring/tests/rs/secondary_ignore_ok_0-pre-6.0.json index 054425c84c0..f27060533cd 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/secondary_ignore_ok_0-pre-6.0.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/secondary_ignore_ok_0-pre-6.0.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -32,7 +32,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -59,7 +59,7 @@ { "ok": 0, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/secondary_ignore_ok_0-pre-6.0.yml b/specifications/server-discovery-and-monitoring/tests/rs/secondary_ignore_ok_0-pre-6.0.yml index de9048d4012..b63c43f2277 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/secondary_ignore_ok_0-pre-6.0.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/secondary_ignore_ok_0-pre-6.0.yml @@ -15,7 +15,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ["b:27017", { @@ -26,7 +26,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -59,7 +59,7 @@ phases: [ ok: 0, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/secondary_ignore_ok_0.json b/specifications/server-discovery-and-monitoring/tests/rs/secondary_ignore_ok_0.json index ee9519930b4..9ffff58ef05 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/secondary_ignore_ok_0.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/secondary_ignore_ok_0.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -32,7 +32,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -59,7 +59,7 @@ { "ok": 0, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/secondary_ignore_ok_0.yml b/specifications/server-discovery-and-monitoring/tests/rs/secondary_ignore_ok_0.yml index d94fafaf992..796e7f668e1 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/secondary_ignore_ok_0.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/secondary_ignore_ok_0.yml @@ -15,7 +15,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ["b:27017", { @@ -26,7 +26,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -59,7 +59,7 @@ phases: [ ok: 0, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/secondary_ipv6_literal.json b/specifications/server-discovery-and-monitoring/tests/rs/secondary_ipv6_literal.json new file mode 100644 index 00000000000..c23d8dc4c95 --- /dev/null +++ b/specifications/server-discovery-and-monitoring/tests/rs/secondary_ipv6_literal.json @@ -0,0 +1,38 @@ +{ + "description": "Secondary with IPv6 literal", + "uri": "mongodb://[::1]/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "[::1]:27017", + { + "ok": 1, + "helloOk": true, + "isWritablePrimary": false, + "secondary": true, + "setName": "rs", + "me": "[::1]:27017", + "hosts": [ + "[::1]:27017" + ], + "minWireVersion": 0, + "maxWireVersion": 26 + } + ] + ], + "outcome": { + "servers": { + "[::1]:27017": { + "type": "RSSecondary", + "setName": "rs" + } + }, + "topologyType": "ReplicaSetNoPrimary", + "setName": "rs", + "logicalSessionTimeoutMinutes": null, + "compatible": true + } + } + ] +} diff --git a/specifications/server-discovery-and-monitoring/tests/rs/secondary_ipv6_literal.yml b/specifications/server-discovery-and-monitoring/tests/rs/secondary_ipv6_literal.yml new file mode 100644 index 00000000000..bac34969687 --- /dev/null +++ b/specifications/server-discovery-and-monitoring/tests/rs/secondary_ipv6_literal.yml @@ -0,0 +1,25 @@ +# Regression test for bug discovered in HELP-68823. +description: Secondary with IPv6 literal +uri: mongodb://[::1]/?replicaSet=rs +phases: +- responses: + - - "[::1]:27017" + - ok: 1 + helloOk: true + isWritablePrimary: false + secondary: true + setName: rs + me: "[::1]:27017" + hosts: + - "[::1]:27017" + minWireVersion: 0 + maxWireVersion: 26 + outcome: + servers: + "[::1]:27017": + type: RSSecondary + setName: rs + topologyType: ReplicaSetNoPrimary + setName: rs + logicalSessionTimeoutMinutes: null + compatible: true diff --git a/specifications/server-discovery-and-monitoring/tests/rs/secondary_mismatched_me.json b/specifications/server-discovery-and-monitoring/tests/rs/secondary_mismatched_me.json index 6f1b9b59866..790e4bfca84 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/secondary_mismatched_me.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/secondary_mismatched_me.json @@ -32,7 +32,7 @@ "ok": 1, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ] diff --git a/specifications/server-discovery-and-monitoring/tests/rs/secondary_mismatched_me.yml b/specifications/server-discovery-and-monitoring/tests/rs/secondary_mismatched_me.yml index e5f0f9aceb6..d359609d838 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/secondary_mismatched_me.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/secondary_mismatched_me.yml @@ -25,4 +25,4 @@ phases: ok: 1 setName: rs minWireVersion: 0 - maxWireVersion: 6 + maxWireVersion: 21 diff --git a/specifications/server-discovery-and-monitoring/tests/rs/secondary_wrong_set_name.json b/specifications/server-discovery-and-monitoring/tests/rs/secondary_wrong_set_name.json index 8d2f152f594..1f86b505437 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/secondary_wrong_set_name.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/secondary_wrong_set_name.json @@ -16,7 +16,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/secondary_wrong_set_name.yml b/specifications/server-discovery-and-monitoring/tests/rs/secondary_wrong_set_name.yml index 0121d631ce4..60823815f00 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/secondary_wrong_set_name.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/secondary_wrong_set_name.yml @@ -16,7 +16,7 @@ phases: [ hosts: ["a:27017"], setName: "wrong", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/secondary_wrong_set_name_with_primary.json b/specifications/server-discovery-and-monitoring/tests/rs/secondary_wrong_set_name_with_primary.json index b7ef2d6d6ab..6b899141514 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/secondary_wrong_set_name_with_primary.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/secondary_wrong_set_name_with_primary.json @@ -16,7 +16,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -51,7 +51,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/secondary_wrong_set_name_with_primary.yml b/specifications/server-discovery-and-monitoring/tests/rs/secondary_wrong_set_name_with_primary.yml index acd471e78ba..8d1990363cb 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/secondary_wrong_set_name_with_primary.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/secondary_wrong_set_name_with_primary.yml @@ -15,7 +15,7 @@ phases: [ hosts: ["a:27017", "b:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -53,7 +53,7 @@ phases: [ hosts: ["a:27017", "b:27017"], setName: "wrong", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/setversion_without_electionid-pre-6.0.json b/specifications/server-discovery-and-monitoring/tests/rs/setversion_without_electionid-pre-6.0.json index c2e2fe5b9ba..e62c6963ed3 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/setversion_without_electionid-pre-6.0.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/setversion_without_electionid-pre-6.0.json @@ -17,7 +17,7 @@ "setName": "rs", "setVersion": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 7 } ] ], @@ -56,7 +56,7 @@ "setName": "rs", "setVersion": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 7 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/setversion_without_electionid-pre-6.0.yml b/specifications/server-discovery-and-monitoring/tests/rs/setversion_without_electionid-pre-6.0.yml index 54bfa4d2563..0fe6819aa71 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/setversion_without_electionid-pre-6.0.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/setversion_without_electionid-pre-6.0.yml @@ -15,7 +15,7 @@ phases: [ setName: "rs", setVersion: 2, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 7 }] ], @@ -52,7 +52,7 @@ phases: [ setName: "rs", setVersion: 1, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 7 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/stepdown_change_set_name.json b/specifications/server-discovery-and-monitoring/tests/rs/stepdown_change_set_name.json index e9075f97f22..6de995518d3 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/stepdown_change_set_name.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/stepdown_change_set_name.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -45,7 +45,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/stepdown_change_set_name.yml b/specifications/server-discovery-and-monitoring/tests/rs/stepdown_change_set_name.yml index 9c4140925d5..2e1c1a551e7 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/stepdown_change_set_name.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/stepdown_change_set_name.yml @@ -16,7 +16,7 @@ phases: [ hosts: ["a:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -50,7 +50,7 @@ phases: [ hosts: ["a:27017"], setName: "wrong", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/too_new.json b/specifications/server-discovery-and-monitoring/tests/rs/too_new.json index 0433d27a368..696246f8e10 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/too_new.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/too_new.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/specifications/server-discovery-and-monitoring/tests/rs/too_new.yml b/specifications/server-discovery-and-monitoring/tests/rs/too_new.yml index 52912826beb..121286554ed 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/too_new.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/too_new.yml @@ -12,7 +12,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ["b:27017", { ok: 1, diff --git a/specifications/server-discovery-and-monitoring/tests/rs/too_old.json b/specifications/server-discovery-and-monitoring/tests/rs/too_old.json index 461d00acc4c..dc8a5b2b9c4 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/too_old.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/too_old.json @@ -16,7 +16,7 @@ "b:27017" ], "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -30,7 +30,9 @@ "hosts": [ "a:27017", "b:27017" - ] + ], + "minWireVersion": 999, + "maxWireVersion": 1000 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/too_old.yml b/specifications/server-discovery-and-monitoring/tests/rs/too_old.yml index ab238dbab9b..f4d4bd8b55a 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/too_old.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/too_old.yml @@ -10,7 +10,7 @@ phases: [ setName: "rs", hosts: ["a:27017", "b:27017"], minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ["b:27017", { ok: 1, @@ -18,7 +18,9 @@ phases: [ isWritablePrimary: false, secondary: true, setName: "rs", - hosts: ["a:27017", "b:27017"] + hosts: ["a:27017", "b:27017"], + minWireVersion: 999, + maxWireVersion: 1000 }] ], outcome: { diff --git a/specifications/server-discovery-and-monitoring/tests/rs/unexpected_mongos.json b/specifications/server-discovery-and-monitoring/tests/rs/unexpected_mongos.json index cc19a961f2c..c6ffb321cae 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/unexpected_mongos.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/unexpected_mongos.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/unexpected_mongos.yml b/specifications/server-discovery-and-monitoring/tests/rs/unexpected_mongos.yml index d0d8547f960..a9c5a24ee63 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/unexpected_mongos.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/unexpected_mongos.yml @@ -14,7 +14,7 @@ phases: [ isWritablePrimary: true, msg: "isdbgrid", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/use_setversion_without_electionid-pre-6.0.json b/specifications/server-discovery-and-monitoring/tests/rs/use_setversion_without_electionid-pre-6.0.json index 5c58b656145..2f9b567b850 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/use_setversion_without_electionid-pre-6.0.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/use_setversion_without_electionid-pre-6.0.json @@ -20,7 +20,7 @@ "$oid": "000000000000000000000001" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 7 } ] ], @@ -64,7 +64,7 @@ "setName": "rs", "setVersion": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 7 } ] ], @@ -108,7 +108,7 @@ "$oid": "000000000000000000000002" }, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 7 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/use_setversion_without_electionid-pre-6.0.yml b/specifications/server-discovery-and-monitoring/tests/rs/use_setversion_without_electionid-pre-6.0.yml index a0434eb45c0..24d6accbe0a 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/use_setversion_without_electionid-pre-6.0.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/use_setversion_without_electionid-pre-6.0.yml @@ -16,7 +16,7 @@ phases: [ setVersion: 1, electionId: {"$oid": "000000000000000000000001"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 7 }] ], @@ -53,7 +53,7 @@ phases: [ setName: "rs", setVersion: 2, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 7 }] ], @@ -91,7 +91,7 @@ phases: [ setVersion: 1, electionId: {"$oid": "000000000000000000000002"}, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 7 }] ], outcome: { diff --git a/specifications/server-discovery-and-monitoring/tests/rs/wrong_set_name.json b/specifications/server-discovery-and-monitoring/tests/rs/wrong_set_name.json index 9654ff7b79b..d0764d24dc3 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/wrong_set_name.json +++ b/specifications/server-discovery-and-monitoring/tests/rs/wrong_set_name.json @@ -17,7 +17,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/rs/wrong_set_name.yml b/specifications/server-discovery-and-monitoring/tests/rs/wrong_set_name.yml index ae75d6f7e44..e7d3ace92dc 100644 --- a/specifications/server-discovery-and-monitoring/tests/rs/wrong_set_name.yml +++ b/specifications/server-discovery-and-monitoring/tests/rs/wrong_set_name.yml @@ -16,7 +16,7 @@ phases: [ hosts: ["b:27017", "c:27017"], setName: "wrong", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/sharded/discover_single_mongos.json b/specifications/server-discovery-and-monitoring/tests/sharded/discover_single_mongos.json index 9e877a0840d..bf7e57521c0 100644 --- a/specifications/server-discovery-and-monitoring/tests/sharded/discover_single_mongos.json +++ b/specifications/server-discovery-and-monitoring/tests/sharded/discover_single_mongos.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/sharded/discover_single_mongos.yml b/specifications/server-discovery-and-monitoring/tests/sharded/discover_single_mongos.yml index f44a2970dd2..1e8adc6c5c0 100644 --- a/specifications/server-discovery-and-monitoring/tests/sharded/discover_single_mongos.yml +++ b/specifications/server-discovery-and-monitoring/tests/sharded/discover_single_mongos.yml @@ -13,7 +13,7 @@ phases: isWritablePrimary: true msg: "isdbgrid" minWireVersion: 0 - maxWireVersion: 6 + maxWireVersion: 21 outcome: servers: diff --git a/specifications/server-discovery-and-monitoring/tests/sharded/ls_timeout_mongos.json b/specifications/server-discovery-and-monitoring/tests/sharded/ls_timeout_mongos.json index 93fa398d52e..3da0f84ca23 100644 --- a/specifications/server-discovery-and-monitoring/tests/sharded/ls_timeout_mongos.json +++ b/specifications/server-discovery-and-monitoring/tests/sharded/ls_timeout_mongos.json @@ -13,7 +13,7 @@ "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -25,7 +25,7 @@ "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 2, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -56,7 +56,7 @@ "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 1, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -67,7 +67,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/sharded/ls_timeout_mongos.yml b/specifications/server-discovery-and-monitoring/tests/sharded/ls_timeout_mongos.yml index 7f78d0d6c6f..62ae97f0571 100644 --- a/specifications/server-discovery-and-monitoring/tests/sharded/ls_timeout_mongos.yml +++ b/specifications/server-discovery-and-monitoring/tests/sharded/ls_timeout_mongos.yml @@ -15,7 +15,7 @@ phases: [ msg: "isdbgrid", logicalSessionTimeoutMinutes: 1, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ["b:27017", { @@ -26,7 +26,7 @@ phases: [ msg: "isdbgrid", logicalSessionTimeoutMinutes: 2, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -63,7 +63,7 @@ phases: [ msg: "isdbgrid", logicalSessionTimeoutMinutes: 1, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ["b:27017", { @@ -73,7 +73,7 @@ phases: [ isWritablePrimary: true, msg: "isdbgrid", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/sharded/mongos_disconnect.json b/specifications/server-discovery-and-monitoring/tests/sharded/mongos_disconnect.json index 50a93eda5ff..29b33518695 100644 --- a/specifications/server-discovery-and-monitoring/tests/sharded/mongos_disconnect.json +++ b/specifications/server-discovery-and-monitoring/tests/sharded/mongos_disconnect.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -23,7 +23,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -76,7 +76,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/sharded/mongos_disconnect.yml b/specifications/server-discovery-and-monitoring/tests/sharded/mongos_disconnect.yml index c4393d85b10..c384b3be115 100644 --- a/specifications/server-discovery-and-monitoring/tests/sharded/mongos_disconnect.yml +++ b/specifications/server-discovery-and-monitoring/tests/sharded/mongos_disconnect.yml @@ -14,7 +14,7 @@ phases: [ isWritablePrimary: true, msg: "isdbgrid", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ["b:27017", { @@ -24,7 +24,7 @@ phases: [ isWritablePrimary: true, msg: "isdbgrid", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], @@ -85,7 +85,7 @@ phases: [ isWritablePrimary: true, msg: "isdbgrid", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ], diff --git a/specifications/server-discovery-and-monitoring/tests/sharded/multiple_mongoses.json b/specifications/server-discovery-and-monitoring/tests/sharded/multiple_mongoses.json index 311592d715a..ae0c2d9cdef 100644 --- a/specifications/server-discovery-and-monitoring/tests/sharded/multiple_mongoses.json +++ b/specifications/server-discovery-and-monitoring/tests/sharded/multiple_mongoses.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -23,7 +23,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/sharded/multiple_mongoses.yml b/specifications/server-discovery-and-monitoring/tests/sharded/multiple_mongoses.yml index 0a49d642499..6311605a896 100644 --- a/specifications/server-discovery-and-monitoring/tests/sharded/multiple_mongoses.yml +++ b/specifications/server-discovery-and-monitoring/tests/sharded/multiple_mongoses.yml @@ -14,7 +14,7 @@ phases: [ isWritablePrimary: true, msg: "isdbgrid", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ["b:27017", { @@ -24,7 +24,7 @@ phases: [ isWritablePrimary: true, msg: "isdbgrid", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/sharded/non_mongos_removed.json b/specifications/server-discovery-and-monitoring/tests/sharded/non_mongos_removed.json index d74375ebbfd..4698f576d5a 100644 --- a/specifications/server-discovery-and-monitoring/tests/sharded/non_mongos_removed.json +++ b/specifications/server-discovery-and-monitoring/tests/sharded/non_mongos_removed.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -26,7 +26,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/sharded/non_mongos_removed.yml b/specifications/server-discovery-and-monitoring/tests/sharded/non_mongos_removed.yml index ab25349bda8..aa604f11001 100644 --- a/specifications/server-discovery-and-monitoring/tests/sharded/non_mongos_removed.yml +++ b/specifications/server-discovery-and-monitoring/tests/sharded/non_mongos_removed.yml @@ -14,7 +14,7 @@ phases: [ isWritablePrimary: true, msg: "isdbgrid", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ["b:27017", { @@ -25,7 +25,7 @@ phases: [ hosts: ["b:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/sharded/too_old.json b/specifications/server-discovery-and-monitoring/tests/sharded/too_old.json index 688e1db0f5a..b918715ada8 100644 --- a/specifications/server-discovery-and-monitoring/tests/sharded/too_old.json +++ b/specifications/server-discovery-and-monitoring/tests/sharded/too_old.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 2, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ diff --git a/specifications/server-discovery-and-monitoring/tests/sharded/too_old.yml b/specifications/server-discovery-and-monitoring/tests/sharded/too_old.yml index 925a8f55b8e..a709934c2f6 100644 --- a/specifications/server-discovery-and-monitoring/tests/sharded/too_old.yml +++ b/specifications/server-discovery-and-monitoring/tests/sharded/too_old.yml @@ -9,7 +9,7 @@ phases: [ isWritablePrimary: true, msg: "isdbgrid", minWireVersion: 2, - maxWireVersion: 6 + maxWireVersion: 21 }], ["b:27017", { ok: 1, diff --git a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_external_ip.json b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_external_ip.json index 90676a8f9b0..1461b4c4694 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_external_ip.json +++ b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_external_ip.json @@ -15,7 +15,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_external_ip.yml b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_external_ip.yml index 18c01226a9a..0cb73c50e2a 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_external_ip.yml +++ b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_external_ip.yml @@ -15,7 +15,7 @@ phases: [ hosts: ["b:27017"], # Internal IP. setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_mongos.json b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_mongos.json index 25fe9651856..72be020862b 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_mongos.json +++ b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_mongos.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_mongos.yml b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_mongos.yml index 853ce57c17e..e81c9aef9b5 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_mongos.yml +++ b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_mongos.yml @@ -14,7 +14,7 @@ phases: [ isWritablePrimary: true, msg: "isdbgrid", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_replicaset.json b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_replicaset.json index cd8660888a0..82a51d390eb 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_replicaset.json +++ b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_replicaset.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_replicaset.yml b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_replicaset.yml index 21e565fd9cb..9a0f3996ca1 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_replicaset.yml +++ b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_replicaset.yml @@ -12,7 +12,7 @@ phases: isWritablePrimary: true setName: rs minWireVersion: 0 - maxWireVersion: 6 + maxWireVersion: 21 outcome: servers: "a:27017": diff --git a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rsarbiter.json b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rsarbiter.json index e2049560566..e06d2843645 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rsarbiter.json +++ b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rsarbiter.json @@ -17,7 +17,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rsarbiter.yml b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rsarbiter.yml index 7e262c9dbe8..d9fa876655e 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rsarbiter.yml +++ b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rsarbiter.yml @@ -16,7 +16,7 @@ phases: [ hosts: ["a:27017", "b:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rsprimary.json b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rsprimary.json index 409e8502b3c..45eb1602fb7 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rsprimary.json +++ b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rsprimary.json @@ -16,7 +16,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rsprimary.yml b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rsprimary.yml index 4ea0b1551fe..85dcb30a7c5 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rsprimary.yml +++ b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rsprimary.yml @@ -15,7 +15,7 @@ phases: [ hosts: ["a:27017", "b:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rssecondary.json b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rssecondary.json index 305f283b527..b1bef8a49f4 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rssecondary.json +++ b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rssecondary.json @@ -17,7 +17,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rssecondary.yml b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rssecondary.yml index b0c4d1f2181..2d7da65e41e 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rssecondary.yml +++ b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_rssecondary.yml @@ -16,7 +16,7 @@ phases: [ hosts: ["a:27017", "b:27017"], setName: "rs", minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_standalone.json b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_standalone.json index b47278482a7..e71ba07e740 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_standalone.json +++ b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_standalone.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_standalone.yml b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_standalone.yml index cd71087e4d6..ab4cb02ad44 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_standalone.yml +++ b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_standalone.yml @@ -13,7 +13,7 @@ phases: [ helloOk: true, isWritablePrimary: true, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_wrong_set_name.json b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_wrong_set_name.json index 71080e6810e..8014a0a5337 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_wrong_set_name.json +++ b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_wrong_set_name.json @@ -16,7 +16,7 @@ ], "setName": "wrong", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], @@ -45,7 +45,7 @@ ], "setName": "rs", "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_wrong_set_name.yml b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_wrong_set_name.yml index f1e48dc4194..46b476d0498 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/direct_connection_wrong_set_name.yml +++ b/specifications/server-discovery-and-monitoring/tests/single/direct_connection_wrong_set_name.yml @@ -11,7 +11,7 @@ phases: - b:27017 setName: wrong minWireVersion: 0 - maxWireVersion: 6 + maxWireVersion: 21 outcome: servers: a:27017: @@ -29,7 +29,7 @@ phases: - b:27017 setName: rs minWireVersion: 0 - maxWireVersion: 6 + maxWireVersion: 21 outcome: servers: a:27017: diff --git a/specifications/server-discovery-and-monitoring/tests/single/discover_standalone.json b/specifications/server-discovery-and-monitoring/tests/single/discover_standalone.json index 858cbdaf638..d78c81654b2 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/discover_standalone.json +++ b/specifications/server-discovery-and-monitoring/tests/single/discover_standalone.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/discover_standalone.yml b/specifications/server-discovery-and-monitoring/tests/single/discover_standalone.yml index 49ebb142091..bc112d4eac5 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/discover_standalone.yml +++ b/specifications/server-discovery-and-monitoring/tests/single/discover_standalone.yml @@ -13,7 +13,7 @@ phases: [ helloOk: true, isWritablePrimary: true, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/ls_timeout_standalone.json b/specifications/server-discovery-and-monitoring/tests/single/ls_timeout_standalone.json index 87b3e4e8a10..236eabe00ab 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/ls_timeout_standalone.json +++ b/specifications/server-discovery-and-monitoring/tests/single/ls_timeout_standalone.json @@ -12,7 +12,7 @@ "isWritablePrimary": true, "logicalSessionTimeoutMinutes": 7, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/ls_timeout_standalone.yml b/specifications/server-discovery-and-monitoring/tests/single/ls_timeout_standalone.yml index 2926d95a9dd..4747af6770b 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/ls_timeout_standalone.yml +++ b/specifications/server-discovery-and-monitoring/tests/single/ls_timeout_standalone.yml @@ -14,7 +14,7 @@ phases: [ isWritablePrimary: true, logicalSessionTimeoutMinutes: 7, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/not_ok_response.json b/specifications/server-discovery-and-monitoring/tests/single/not_ok_response.json index 8e7c2a10e37..cfaac3564ac 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/not_ok_response.json +++ b/specifications/server-discovery-and-monitoring/tests/single/not_ok_response.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ], [ @@ -21,7 +21,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/not_ok_response.yml b/specifications/server-discovery-and-monitoring/tests/single/not_ok_response.yml index 64103a59024..c1ae7d98754 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/not_ok_response.yml +++ b/specifications/server-discovery-and-monitoring/tests/single/not_ok_response.yml @@ -13,7 +13,7 @@ phases: [ helloOk: true, isWritablePrimary: true, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }], ["a:27017", { @@ -22,7 +22,7 @@ phases: [ helloOk: true, isWritablePrimary: true, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/standalone_removed.json b/specifications/server-discovery-and-monitoring/tests/single/standalone_removed.json index 57f8f861b18..675cdbb0083 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/standalone_removed.json +++ b/specifications/server-discovery-and-monitoring/tests/single/standalone_removed.json @@ -11,7 +11,7 @@ "helloOk": true, "isWritablePrimary": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/standalone_removed.yml b/specifications/server-discovery-and-monitoring/tests/single/standalone_removed.yml index 59b44b162e7..c8404463f73 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/standalone_removed.yml +++ b/specifications/server-discovery-and-monitoring/tests/single/standalone_removed.yml @@ -13,7 +13,7 @@ phases: [ helloOk: true, isWritablePrimary: true, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/standalone_using_legacy_hello.json b/specifications/server-discovery-and-monitoring/tests/single/standalone_using_legacy_hello.json index 46660fa8de5..488cac49181 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/standalone_using_legacy_hello.json +++ b/specifications/server-discovery-and-monitoring/tests/single/standalone_using_legacy_hello.json @@ -10,7 +10,7 @@ "ok": 1, "ismaster": true, "minWireVersion": 0, - "maxWireVersion": 6 + "maxWireVersion": 21 } ] ], diff --git a/specifications/server-discovery-and-monitoring/tests/single/standalone_using_legacy_hello.yml b/specifications/server-discovery-and-monitoring/tests/single/standalone_using_legacy_hello.yml index a18ecb70fc3..82c6ccfa7b1 100644 --- a/specifications/server-discovery-and-monitoring/tests/single/standalone_using_legacy_hello.yml +++ b/specifications/server-discovery-and-monitoring/tests/single/standalone_using_legacy_hello.yml @@ -12,7 +12,7 @@ phases: [ ok: 1, ismaster: true, minWireVersion: 0, - maxWireVersion: 6 + maxWireVersion: 21 }] ], diff --git a/specifications/server-discovery-and-monitoring/tests/unified/pool-clear-application-error.json b/specifications/server-discovery-and-monitoring/tests/unified/pool-clear-application-error.json new file mode 100644 index 00000000000..b8fd95fee39 --- /dev/null +++ b/specifications/server-discovery-and-monitoring/tests/unified/pool-clear-application-error.json @@ -0,0 +1,149 @@ +{ + "description": "pool-clear-application-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "initialData": [ + { + "collectionName": "find-network-error", + "databaseName": "sdam-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "tests": [ + { + "description": "Pool is cleared before application connection is checked into the pool", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "closeConnection": true, + "appName": "findNetworkErrorTest" + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "poolClearedEvent", + "connectionCheckedInEvent" + ], + "uriOptions": { + "retryWrites": false, + "retryReads": false, + "appname": "findNetworkErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "sdam-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "find-network-error" + } + } + ] + } + }, + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "connectionCheckedInEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "poolClearedEvent": {} + }, + { + "connectionCheckedInEvent": {} + } + ] + } + ] + } + ] +} diff --git a/specifications/server-discovery-and-monitoring/tests/unified/pool-clear-application-error.yml b/specifications/server-discovery-and-monitoring/tests/unified/pool-clear-application-error.yml new file mode 100644 index 00000000000..43e425711e2 --- /dev/null +++ b/specifications/server-discovery-and-monitoring/tests/unified/pool-clear-application-error.yml @@ -0,0 +1,88 @@ +--- +description: pool-clear-application-error + +schemaVersion: "1.4" + +runOnRequirements: + # failCommand appName requirements + - minServerVersion: "4.4" + serverless: forbid + topologies: [ single, replicaset, sharded ] + +createEntities: + - client: + id: &setupClient setupClient + useMultipleMongoses: false + +initialData: &initialData + - collectionName: &collectionName find-network-error + databaseName: &databaseName sdam-tests + documents: + - _id: 1 + - _id: 2 + +tests: + - description: Pool is cleared before application connection is checked into the pool + operations: + - name: failPoint + object: testRunner + arguments: + client: *setupClient + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: + - find + closeConnection: true + appName: findNetworkErrorTest + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - poolClearedEvent + - connectionCheckedInEvent + uriOptions: + retryWrites: false + retryReads: false + appname: findNetworkErrorTest + - database: + id: &database database + client: *client + databaseName: *databaseName + - collection: + id: &collection collection + database: *database + collectionName: *collectionName + - name: find + object: *collection + arguments: + filter: + _id: 1 + expectError: + isError: true + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + poolClearedEvent: {} + count: 1 + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + connectionCheckedInEvent: {} + count: 1 + expectEvents: + - client: *client + eventType: cmap + events: + - poolClearedEvent: {} + - connectionCheckedInEvent: {} diff --git a/specifications/server-discovery-and-monitoring/tests/unified/pool-clear-checkout-error.json b/specifications/server-discovery-and-monitoring/tests/unified/pool-clear-checkout-error.json new file mode 100644 index 00000000000..126ee545333 --- /dev/null +++ b/specifications/server-discovery-and-monitoring/tests/unified/pool-clear-checkout-error.json @@ -0,0 +1,296 @@ +{ + "description": "pool-clear-on-error-checkout", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single", + "replicaset", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "tests": [ + { + "description": "Pool is cleared before connection is closed (authentication error)", + "runOnRequirements": [ + { + "auth": true + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authErrorTest", + "errorCode": 18 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "connectionCheckOutStartedEvent", + "poolClearedEvent", + "connectionClosedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authErrorTest" + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "bar" + } + } + ] + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "connectionClosedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "poolClearedEvent": {} + }, + { + "connectionClosedEvent": {} + } + ] + } + ] + }, + { + "description": "Pool is cleared before connection is closed (handshake error)", + "runOnRequirements": [ + { + "topologies": [ + "single" + ] + } + ], + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "connectionCheckOutStartedEvent", + "poolClearedEvent", + "connectionClosedEvent", + "topologyDescriptionChangedEvent" + ], + "uriOptions": { + "retryWrites": false, + "appname": "authErrorTest", + "minPoolSize": 0, + "serverMonitoringMode": "poll", + "heartbeatFrequencyMS": 1000000 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "foo" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "bar" + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Unknown" + }, + "newDescription": { + "type": "Single" + } + } + }, + "count": 1 + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "authErrorTest", + "closeConnection": true + } + } + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "expectError": { + "isError": true + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "connectionClosedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "poolClearedEvent": {} + }, + { + "connectionClosedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/specifications/server-discovery-and-monitoring/tests/unified/pool-clear-checkout-error.yml b/specifications/server-discovery-and-monitoring/tests/unified/pool-clear-checkout-error.yml new file mode 100644 index 00000000000..8df74b6a6f5 --- /dev/null +++ b/specifications/server-discovery-and-monitoring/tests/unified/pool-clear-checkout-error.yml @@ -0,0 +1,176 @@ +--- +description: pool-clear-on-error-checkout + +schemaVersion: "1.4" + +runOnRequirements: + # failCommand appName requirements + - minServerVersion: "4.4" + serverless: forbid + topologies: [ single, replicaset, sharded ] + +createEntities: + - client: + id: &setupClient setupClient + useMultipleMongoses: false + +tests: + - description: Pool is cleared before connection is closed (authentication error) + runOnRequirements: + - auth: true + + operations: + - name: failPoint + object: testRunner + arguments: + client: *setupClient + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: + - saslContinue + appName: authErrorTest + errorCode: 18 + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - connectionCheckOutStartedEvent + - poolClearedEvent + - connectionClosedEvent + uriOptions: + retryWrites: false + appname: authErrorTest + - database: + id: &database database + client: *client + databaseName: foo + - collection: + id: &collection collection + database: *database + collectionName: bar + - name: insertMany + object: *collection + arguments: + documents: + - _id: 3 + - _id: 4 + expectError: + isError: true + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + poolClearedEvent: {} + count: 1 + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + connectionClosedEvent: {} + count: 1 + expectEvents: + - client: *client + eventType: cmap + events: + - connectionCheckOutStartedEvent: {} + - poolClearedEvent: {} + - connectionClosedEvent: {} + + - description: Pool is cleared before connection is closed (handshake error) + runOnRequirements: + - topologies: [ single ] + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + useMultipleMongoses: false + observeEvents: + - connectionCheckOutStartedEvent + - poolClearedEvent + - connectionClosedEvent + - topologyDescriptionChangedEvent + uriOptions: + retryWrites: false + appname: authErrorTest + minPoolSize: 0 + # ensure that once we've connected to the server, the failCommand won't + # be triggered by monitors and will only be triggered by handshakes + serverMonitoringMode: poll + heartbeatFrequencyMS: 1000000 + - database: + id: &database database + client: *client + databaseName: foo + - collection: + id: &collection collection + database: *database + collectionName: bar + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + topologyDescriptionChangedEvent: + previousDescription: + type: "Unknown" + newDescription: + type: "Single" + count: 1 + + - name: failPoint + object: testRunner + arguments: + client: *setupClient + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: + - hello + - isMaster + appName: authErrorTest + closeConnection: true + + - name: insertMany + object: *collection + arguments: + documents: + - _id: 3 + - _id: 4 + expectError: + isError: true + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + poolClearedEvent: {} + count: 1 + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + connectionClosedEvent: {} + count: 1 + expectEvents: + - client: *client + eventType: cmap + events: + - connectionCheckOutStartedEvent: {} + - poolClearedEvent: {} + - connectionClosedEvent: {} + diff --git a/specifications/server-discovery-and-monitoring/tests/unified/pool-clear-min-pool-size-error.json b/specifications/server-discovery-and-monitoring/tests/unified/pool-clear-min-pool-size-error.json new file mode 100644 index 00000000000..11c6be5bc16 --- /dev/null +++ b/specifications/server-discovery-and-monitoring/tests/unified/pool-clear-min-pool-size-error.json @@ -0,0 +1,230 @@ +{ + "description": "pool-cleared-on-min-pool-size-population-error", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "4.4", + "serverless": "forbid", + "topologies": [ + "single" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "setupClient", + "useMultipleMongoses": false + } + } + ], + "tests": [ + { + "description": "Pool is cleared on authentication error during minPoolSize population", + "runOnRequirements": [ + { + "auth": true + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authErrorTest", + "errorCode": 18 + } + } + } + }, + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "connectionCreatedEvent", + "poolClearedEvent", + "connectionClosedEvent" + ], + "uriOptions": { + "appname": "authErrorTest", + "minPoolSize": 1 + } + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "connectionClosedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + }, + { + "poolClearedEvent": {} + }, + { + "connectionClosedEvent": {} + } + ] + } + ] + }, + { + "description": "Pool is cleared on handshake error during minPoolSize population", + "operations": [ + { + "name": "createEntities", + "object": "testRunner", + "arguments": { + "entities": [ + { + "client": { + "id": "client", + "observeEvents": [ + "topologyDescriptionChangedEvent", + "connectionCreatedEvent", + "poolClearedEvent", + "connectionClosedEvent", + "connectionReadyEvent" + ], + "uriOptions": { + "appname": "authErrorTest", + "minPoolSize": 5, + "maxConnecting": 1, + "serverMonitoringMode": "poll", + "heartbeatFrequencyMS": 1000000 + } + } + } + ] + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": { + "previousDescription": { + "type": "Unknown" + }, + "newDescription": { + "type": "Single" + } + } + }, + "count": 1 + } + }, + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "setupClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "hello", + "isMaster" + ], + "appName": "authErrorTest", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "poolClearedEvent": {} + }, + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "connectionClosedEvent": {} + }, + "count": 1 + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCreatedEvent": {} + }, + { + "connectionReadyEvent": {} + }, + { + "connectionCreatedEvent": {} + }, + { + "poolClearedEvent": {} + }, + { + "connectionClosedEvent": {} + } + ] + } + ] + } + ] +} diff --git a/specifications/server-discovery-and-monitoring/tests/unified/pool-clear-min-pool-size-error.yml b/specifications/server-discovery-and-monitoring/tests/unified/pool-clear-min-pool-size-error.yml new file mode 100644 index 00000000000..7e7ef0c590b --- /dev/null +++ b/specifications/server-discovery-and-monitoring/tests/unified/pool-clear-min-pool-size-error.yml @@ -0,0 +1,144 @@ +--- +description: pool-cleared-on-min-pool-size-population-error + +schemaVersion: "1.4" + +runOnRequirements: + # failCommand appName requirements + - minServerVersion: "4.4" + serverless: forbid + topologies: [ single ] + +createEntities: + - client: + id: &setupClient setupClient + useMultipleMongoses: false + +tests: + - description: Pool is cleared on authentication error during minPoolSize population + runOnRequirements: + # failCommand appName requirements + - auth: true + operations: + - name: failPoint + object: testRunner + arguments: + client: *setupClient + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: + - saslContinue + appName: authErrorTest + errorCode: 18 + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + observeEvents: + - connectionCreatedEvent + - poolClearedEvent + - connectionClosedEvent + uriOptions: + appname: authErrorTest + minPoolSize: 1 + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + poolClearedEvent: {} + count: 1 + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + connectionClosedEvent: {} + count: 1 + expectEvents: + - client: *client + eventType: cmap + events: + - connectionCreatedEvent: {} + - poolClearedEvent: {} + - connectionClosedEvent: {} + + - description: Pool is cleared on handshake error during minPoolSize population + operations: + - name: createEntities + object: testRunner + arguments: + entities: + - client: + id: &client client + observeEvents: + - topologyDescriptionChangedEvent + - connectionCreatedEvent + - poolClearedEvent + - connectionClosedEvent + - connectionReadyEvent + uriOptions: + appname: authErrorTest + minPoolSize: 5 + maxConnecting: 1 + # ensure that once we've connected to the server, the failCommand won't + # be triggered by monitors and will only be triggered by handshakes + serverMonitoringMode: poll + heartbeatFrequencyMS: 1000000 + + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + topologyDescriptionChangedEvent: + previousDescription: + type: "Unknown" + newDescription: + type: "Single" + count: 1 + + - name: failPoint + object: testRunner + arguments: + client: *setupClient + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: + - hello + - isMaster + appName: authErrorTest + closeConnection: true + + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + poolClearedEvent: {} + count: 1 + - name: waitForEvent + object: testRunner + arguments: + client: *client + event: + connectionClosedEvent: {} + count: 1 + expectEvents: + - client: *client + eventType: cmap + events: + - connectionCreatedEvent: {} + - connectionReadyEvent: {} + - connectionCreatedEvent: {} + - poolClearedEvent: {} + - connectionClosedEvent: {} + diff --git a/specifications/unified-test-format/tests/valid-pass/expectedError-isClientError.json b/specifications/unified-test-format/tests/valid-pass/expectedError-isClientError.json new file mode 100644 index 00000000000..9c6beda5882 --- /dev/null +++ b/specifications/unified-test-format/tests/valid-pass/expectedError-isClientError.json @@ -0,0 +1,74 @@ +{ + "description": "expectedError-isClientError", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "4.0", + "topologies": [ + "single", + "replicaset" + ] + }, + { + "minServerVersion": "4.1.7", + "topologies": [ + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + } + ], + "tests": [ + { + "description": "isClientError considers network errors", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "ping" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database0", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/specifications/unified-test-format/tests/valid-pass/expectedError-isClientError.yml b/specifications/unified-test-format/tests/valid-pass/expectedError-isClientError.yml new file mode 100644 index 00000000000..3bc12e73f9e --- /dev/null +++ b/specifications/unified-test-format/tests/valid-pass/expectedError-isClientError.yml @@ -0,0 +1,39 @@ +description: "expectedError-isClientError" + +schemaVersion: "1.3" + +runOnRequirements: + - minServerVersion: "4.0" + topologies: [single, replicaset] + - minServerVersion: "4.1.7" + topologies: [sharded, load-balanced] + +createEntities: + - client: + id: &client0 client0 + useMultipleMongoses: false + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name test + +tests: + - description: "isClientError considers network errors" + operations: + - name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ ping ] + closeConnection: true + - name: runCommand + object: *database0 + arguments: + commandName: ping + command: { ping: 1 } + expectError: + isClientError: true diff --git a/specifications/unified-test-format/tests/valid-pass/operator-type-number_alias.json b/specifications/unified-test-format/tests/valid-pass/operator-type-number_alias.json new file mode 100644 index 00000000000..e628d0d7773 --- /dev/null +++ b/specifications/unified-test-format/tests/valid-pass/operator-type-number_alias.json @@ -0,0 +1,174 @@ +{ + "description": "operator-type-number_alias", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "test" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "test", + "documents": [] + } + ], + "tests": [ + { + "description": "type number alias matches int32", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": { + "$numberInt": "2147483647" + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$type": "number" + } + } + ] + } + ] + }, + { + "description": "type number alias matches int64", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": { + "$numberLong": "9223372036854775807" + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$type": "number" + } + } + ] + } + ] + }, + { + "description": "type number alias matches double", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": { + "$numberDouble": "2.71828" + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$type": "number" + } + } + ] + } + ] + }, + { + "description": "type number alias matches decimal128", + "operations": [ + { + "name": "insertOne", + "object": "collection0", + "arguments": { + "document": { + "_id": 1, + "x": { + "$numberDecimal": "3.14159" + } + } + } + }, + { + "name": "find", + "object": "collection0", + "arguments": { + "filter": { + "_id": 1 + }, + "limit": 1 + }, + "expectResult": [ + { + "_id": 1, + "x": { + "$$type": "number" + } + } + ] + } + ] + } + ] +} diff --git a/specifications/unified-test-format/tests/valid-pass/operator-type-number_alias.yml b/specifications/unified-test-format/tests/valid-pass/operator-type-number_alias.yml new file mode 100644 index 00000000000..04357a0242c --- /dev/null +++ b/specifications/unified-test-format/tests/valid-pass/operator-type-number_alias.yml @@ -0,0 +1,61 @@ +description: operator-type-number_alias + +schemaVersion: "1.0" + +createEntities: + - client: + id: &client0 client0 + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name test + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: [] + +tests: + - + description: type number alias matches int32 + operations: + - name: insertOne + object: *collection0 + arguments: + document: { _id: 1, x: { $numberInt: "2147483647" } } + - &find + name: find + object: *collection0 + arguments: + filter: { _id: 1 } + limit: 1 + expectResult: + - { _id: 1, x: { $$type: "number" } } + - + description: type number alias matches int64 + operations: + - name: insertOne + object: *collection0 + arguments: + document: { _id: 1, x: { $numberLong: "9223372036854775807" } } + - *find + - + description: type number alias matches double + operations: + - name: insertOne + object: *collection0 + arguments: + document: { _id: 1, x: { $numberDouble: "2.71828" } } + - *find + - + description: type number alias matches decimal128 + operations: + - name: insertOne + object: *collection0 + arguments: + document: { _id: 1, x: { $numberDecimal: "3.14159" } } + - *find diff --git a/specifications/unified-test-format/tests/valid-pass/poc-queryable-encryption.json b/specifications/unified-test-format/tests/valid-pass/poc-queryable-encryption.json new file mode 100644 index 00000000000..309d1d3b4be --- /dev/null +++ b/specifications/unified-test-format/tests/valid-pass/poc-queryable-encryption.json @@ -0,0 +1,193 @@ +{ + "description": "poc-queryable-encryption", + "schemaVersion": "1.23", + "runOnRequirements": [ + { + "minServerVersion": "7.0", + "csfle": true, + "topologies": [ + "replicaset", + "load-balanced", + "sharded" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "autoEncryptOpts": { + "keyVaultNamespace": "keyvault.datakeys", + "kmsProviders": { + "local": { + "key": "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk" + } + } + } + } + }, + { + "database": { + "id": "encryptedDB", + "client": "client0", + "databaseName": "poc-queryable-encryption" + } + }, + { + "collection": { + "id": "encryptedColl", + "database": "encryptedDB", + "collectionName": "encrypted" + } + }, + { + "client": { + "id": "client1" + } + }, + { + "database": { + "id": "unencryptedDB", + "client": "client1", + "databaseName": "poc-queryable-encryption" + } + }, + { + "collection": { + "id": "unencryptedColl", + "database": "unencryptedDB", + "collectionName": "encrypted" + } + } + ], + "initialData": [ + { + "databaseName": "keyvault", + "collectionName": "datakeys", + "documents": [ + { + "_id": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1641024000000" + } + }, + "status": 1, + "masterKey": { + "provider": "local" + } + } + ] + }, + { + "databaseName": "poc-queryable-encryption", + "collectionName": "encrypted", + "documents": [], + "createOptions": { + "encryptedFields": { + "fields": [ + { + "keyId": { + "$binary": { + "base64": "EjRWeBI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "path": "encryptedInt", + "bsonType": "int", + "queries": { + "queryType": "equality", + "contention": { + "$numberLong": "0" + } + } + } + ] + } + } + } + ], + "tests": [ + { + "description": "insert, replace, and find with queryable encryption", + "operations": [ + { + "object": "encryptedColl", + "name": "insertOne", + "arguments": { + "document": { + "_id": 1, + "encryptedInt": 11 + } + } + }, + { + "object": "encryptedColl", + "name": "replaceOne", + "arguments": { + "filter": { + "encryptedInt": 11 + }, + "replacement": { + "encryptedInt": 22 + } + } + }, + { + "object": "encryptedColl", + "name": "find", + "arguments": { + "filter": { + "encryptedInt": 22 + } + }, + "expectResult": [ + { + "_id": 1, + "encryptedInt": 22 + } + ] + }, + { + "object": "unencryptedColl", + "name": "find", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "encryptedInt": { + "$$type": "binData" + }, + "__safeContent__": [ + { + "$binary": { + "base64": "rhS16TJojgDDBtbluxBokvcotP1mQTGeYpNt8xd3MJQ=", + "subType": "00" + } + } + ] + } + ] + } + ] + } + ] +} diff --git a/specifications/unified-test-format/tests/valid-pass/poc-queryable-encryption.yml b/specifications/unified-test-format/tests/valid-pass/poc-queryable-encryption.yml new file mode 100644 index 00000000000..797904ee959 --- /dev/null +++ b/specifications/unified-test-format/tests/valid-pass/poc-queryable-encryption.yml @@ -0,0 +1,86 @@ +description: poc-queryable-encryption + +schemaVersion: "1.23" + +runOnRequirements: + - minServerVersion: "7.0" + csfle: true + # QE is not supported on standalone servers + topologies: [ replicaset, load-balanced, sharded ] + +createEntities: + - client: + id: &client0 client0 + autoEncryptOpts: + keyVaultNamespace: keyvault.datakeys + kmsProviders: + local: + key: Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk + - database: + id: &encryptedDB encryptedDB + client: *client0 + databaseName: &encryptedDBName poc-queryable-encryption + - collection: + id: &encryptedColl encryptedColl + database: *encryptedDB + collectionName: &encryptedCollName encrypted + - client: + id: &client1 client1 + - database: + id: &unencryptedDB unencryptedDB + client: *client1 + databaseName: *encryptedDBName + - collection: + id: &unencryptedColl unencryptedColl + database: *unencryptedDB + collectionName: *encryptedCollName + +initialData: + - databaseName: keyvault + collectionName: datakeys + documents: + - _id: &keyid { $binary: { base64: EjRWeBI0mHYSNBI0VniQEg==, subType: "04" } } + keyMaterial: { $binary: { base64: sHe0kz57YW7v8g9VP9sf/+K1ex4JqKc5rf/URX3n3p8XdZ6+15uXPaSayC6adWbNxkFskuMCOifDoTT+rkqMtFkDclOy884RuGGtUysq3X7zkAWYTKi8QAfKkajvVbZl2y23UqgVasdQu3OVBQCrH/xY00nNAs/52e958nVjBuzQkSb1T8pKJAyjZsHJ60+FtnfafDZSTAIBJYn7UWBCwQ==, subType: "00" } } + creationDate: { $date: { $numberLong: "1641024000000" } } + updateDate: { $date: { $numberLong: "1641024000000" } } + status: 1 + masterKey: + provider: local + - databaseName: *encryptedDBName + collectionName: *encryptedCollName + documents: [] + createOptions: + encryptedFields: + fields: + - keyId: *keyid + path: 'encryptedInt' + bsonType: 'int' + queries: {'queryType': 'equality', 'contention': {'$numberLong': '0'}} + +tests: + - description: insert, replace, and find with queryable encryption + operations: + - object: *encryptedColl + name: insertOne + arguments: + document: + _id: 1 + encryptedInt: 11 + - object: *encryptedColl + name: replaceOne + arguments: + filter: { encryptedInt: 11 } + replacement: { encryptedInt: 22 } + - object: *encryptedColl + name: find + arguments: + filter: { encryptedInt: 22 } + expectResult: + - _id: 1 + encryptedInt: 22 + - object: *unencryptedColl + name: find + arguments: + filter: {} + expectResult: + - { _id: 1, encryptedInt: { $$type: binData }, __safeContent__: [ { "$binary" : { "base64" : "rhS16TJojgDDBtbluxBokvcotP1mQTGeYpNt8xd3MJQ=", "subType" : "00" } } ] } \ No newline at end of file diff --git a/specifications/uri-options/tests/proxy-options.json b/specifications/uri-options/tests/proxy-options.json new file mode 100644 index 00000000000..585546ead7f --- /dev/null +++ b/specifications/uri-options/tests/proxy-options.json @@ -0,0 +1,139 @@ +{ + "tests": [ + { + "description": "proxyPort without proxyHost", + "uri": "mongodb://localhost/?proxyPort=1080", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "proxyUsername without proxyHost", + "uri": "mongodb://localhost/?proxyUsername=abc", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "proxyPassword without proxyHost", + "uri": "mongodb://localhost/?proxyPassword=def", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "all other proxy options without proxyHost", + "uri": "mongodb://localhost/?proxyPort=1080&proxyUsername=abc&proxyPassword=def", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "proxyUsername without proxyPassword", + "uri": "mongodb://localhost/?proxyHost=localhost&proxyUsername=abc", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "proxyPassword without proxyUsername", + "uri": "mongodb://localhost/?proxyHost=localhost&proxyPassword=def", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "multiple proxyHost parameters", + "uri": "mongodb://localhost/?proxyHost=localhost&proxyHost=localhost2", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "multiple proxyPort parameters", + "uri": "mongodb://localhost/?proxyHost=localhost&proxyPort=1234&proxyPort=12345", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "multiple proxyUsername parameters", + "uri": "mongodb://localhost/?proxyHost=localhost&proxyUsername=abc&proxyUsername=def&proxyPassword=123", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "multiple proxyPassword parameters", + "uri": "mongodb://localhost/?proxyHost=localhost&proxyUsername=abc&proxyPassword=123&proxyPassword=456", + "valid": false, + "warning": false, + "hosts": null, + "auth": null, + "options": null + }, + { + "description": "only host present", + "uri": "mongodb://localhost/?proxyHost=localhost", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "host and default port present", + "uri": "mongodb://localhost/?proxyHost=localhost&proxyPort=1080", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "host and non-default port present", + "uri": "mongodb://localhost/?proxyHost=localhost&proxyPort=12345", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "replicaset, host and non-default port present", + "uri": "mongodb://rs1,rs2,rs3/?proxyHost=localhost&proxyPort=12345", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + }, + { + "description": "all options present", + "uri": "mongodb://rs1,rs2,rs3/?proxyHost=localhost&proxyPort=12345&proxyUsername=asdf&proxyPassword=qwerty", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": {} + } + ] +} diff --git a/specifications/uri-options/tests/proxy-options.yml b/specifications/uri-options/tests/proxy-options.yml new file mode 100644 index 00000000000..a97863dd599 --- /dev/null +++ b/specifications/uri-options/tests/proxy-options.yml @@ -0,0 +1,121 @@ +tests: + - + description: "proxyPort without proxyHost" + uri: "mongodb://localhost/?proxyPort=1080" + valid: false + warning: false + hosts: ~ + auth: ~ + options: ~ + - + description: "proxyUsername without proxyHost" + uri: "mongodb://localhost/?proxyUsername=abc" + valid: false + warning: false + hosts: ~ + auth: ~ + options: ~ + - + description: "proxyPassword without proxyHost" + uri: "mongodb://localhost/?proxyPassword=def" + valid: false + warning: false + hosts: ~ + auth: ~ + options: ~ + - + description: "all other proxy options without proxyHost" + uri: "mongodb://localhost/?proxyPort=1080&proxyUsername=abc&proxyPassword=def" + valid: false + warning: false + hosts: ~ + auth: ~ + options: ~ + - + description: "proxyUsername without proxyPassword" + uri: "mongodb://localhost/?proxyHost=localhost&proxyUsername=abc" + valid: false + warning: false + hosts: ~ + auth: ~ + options: ~ + - + description: "proxyPassword without proxyUsername" + uri: "mongodb://localhost/?proxyHost=localhost&proxyPassword=def" + valid: false + warning: false + hosts: ~ + auth: ~ + options: ~ + - + description: "multiple proxyHost parameters" + uri: "mongodb://localhost/?proxyHost=localhost&proxyHost=localhost2" + valid: false + warning: false + hosts: ~ + auth: ~ + options: ~ + - + description: "multiple proxyPort parameters" + uri: "mongodb://localhost/?proxyHost=localhost&proxyPort=1234&proxyPort=12345" + valid: false + warning: false + hosts: ~ + auth: ~ + options: ~ + - + description: "multiple proxyUsername parameters" + uri: "mongodb://localhost/?proxyHost=localhost&proxyUsername=abc&proxyUsername=def&proxyPassword=123" + valid: false + warning: false + hosts: ~ + auth: ~ + options: ~ + - + description: "multiple proxyPassword parameters" + uri: "mongodb://localhost/?proxyHost=localhost&proxyUsername=abc&proxyPassword=123&proxyPassword=456" + valid: false + warning: false + hosts: ~ + auth: ~ + options: ~ + - + description: "only host present" + uri: "mongodb://localhost/?proxyHost=localhost" + valid: true + warning: false + hosts: ~ + auth: ~ + options: {} + - + description: "host and default port present" + uri: "mongodb://localhost/?proxyHost=localhost&proxyPort=1080" + valid: true + warning: false + hosts: ~ + auth: ~ + options: {} + - + description: "host and non-default port present" + uri: "mongodb://localhost/?proxyHost=localhost&proxyPort=12345" + valid: true + warning: false + hosts: ~ + auth: ~ + options: {} + - + description: "replicaset, host and non-default port present" + uri: "mongodb://rs1,rs2,rs3/?proxyHost=localhost&proxyPort=12345" + valid: true + warning: false + hosts: ~ + auth: ~ + options: {} + - + description: "all options present" + uri: "mongodb://rs1,rs2,rs3/?proxyHost=localhost&proxyPort=12345&proxyUsername=asdf&proxyPassword=qwerty" + valid: true + warning: false + hosts: ~ + auth: ~ + options: {} diff --git a/specifications/uri-options/tests/read-preference-options.json b/specifications/uri-options/tests/read-preference-options.json index 054f5e089ed..abbf0d0cc6e 100644 --- a/specifications/uri-options/tests/read-preference-options.json +++ b/specifications/uri-options/tests/read-preference-options.json @@ -36,6 +36,21 @@ ] } }, + { + "description": "Read preference tags are case sensitive", + "uri": "mongodb://example.com/?readPreference=secondary&readPreferenceTags=dc:NY", + "valid": true, + "warning": false, + "hosts": null, + "auth": null, + "options": { + "readPreferenceTags": [ + { + "dc": "NY" + } + ] + } + }, { "description": "Invalid readPreferenceTags causes a warning", "uri": "mongodb://example.com/?readPreferenceTags=invalid", diff --git a/specifications/uri-options/tests/read-preference-options.yml b/specifications/uri-options/tests/read-preference-options.yml index c7f18fe72f2..267454c0ea0 100644 --- a/specifications/uri-options/tests/read-preference-options.yml +++ b/specifications/uri-options/tests/read-preference-options.yml @@ -26,6 +26,17 @@ tests: readPreferenceTags: - dc: "ny" + - + description: "Read preference tags are case sensitive" + uri: "mongodb://example.com/?readPreference=secondary&readPreferenceTags=dc:NY" + valid: true + warning: false + hosts: ~ + auth: ~ + options: + readPreferenceTags: + - + dc: "NY" - description: "Invalid readPreferenceTags causes a warning" uri: "mongodb://example.com/?readPreferenceTags=invalid" diff --git a/src/MongoDB.Bson/Exceptions/DuplicateBsonMemberMapAttributeException.cs b/src/MongoDB.Bson/Exceptions/DuplicateBsonMemberMapAttributeException.cs index caefa7ec10f..07fd875534f 100644 --- a/src/MongoDB.Bson/Exceptions/DuplicateBsonMemberMapAttributeException.cs +++ b/src/MongoDB.Bson/Exceptions/DuplicateBsonMemberMapAttributeException.cs @@ -23,7 +23,7 @@ namespace MongoDB.Bson /// </summary> public class DuplicateBsonMemberMapAttributeException : BsonException { - // constructors + // constructors /// <summary> /// Initializes a new instance of the <see cref="DuplicateBsonMemberMapAttributeException" /> class. /// </summary> diff --git a/src/MongoDB.Bson/IO/BinaryPrimitivesCompat.cs b/src/MongoDB.Bson/IO/BinaryPrimitivesCompat.cs index 323649f6d08..41f3f90b67a 100644 --- a/src/MongoDB.Bson/IO/BinaryPrimitivesCompat.cs +++ b/src/MongoDB.Bson/IO/BinaryPrimitivesCompat.cs @@ -15,6 +15,7 @@ using System; using System.Buffers.Binary; +using System.Runtime.InteropServices; namespace MongoDB.Bson.IO { @@ -31,5 +32,55 @@ public static void WriteDoubleLittleEndian(Span<byte> destination, double value) { BinaryPrimitives.WriteInt64LittleEndian(destination, BitConverter.DoubleToInt64Bits(value)); } + + public static float ReadSingleLittleEndian(ReadOnlySpan<byte> source) + { + if (source.Length < 4) + { + throw new ArgumentOutOfRangeException(nameof(source.Length), "Source span is too small to contain a float."); + } + +#if NET6_0_OR_GREATER + return BinaryPrimitives.ReadSingleLittleEndian(source); +#else + // Constructs a 32-bit float from 4 Little Endian bytes in a platform-agnostic way. + // Ensures correct bit pattern regardless of system endianness. + int intValue = + source[0] | + (source[1] << 8) | + (source[2] << 16) | + (source[3] << 24); + + // This struct emulates BitConverter.Int32BitsToSingle for platforms like net472. + return new FloatIntUnion { IntValue = intValue }.FloatValue; +#endif + } + + public static void WriteSingleLittleEndian(Span<byte> destination, float value) + { + if (destination.Length < 4) + { + throw new ArgumentOutOfRangeException(nameof(destination.Length), "Destination span is too small to hold a float."); + } + +#if NET6_0_OR_GREATER + BinaryPrimitives.WriteSingleLittleEndian(destination, value); +#else + // This struct emulates BitConverter.SingleToInt32Bits for platforms like net472. + int intValue = new FloatIntUnion { FloatValue = value }.IntValue; + + destination[0] = (byte)(intValue); + destination[1] = (byte)(intValue >> 8); + destination[2] = (byte)(intValue >> 16); + destination[3] = (byte)(intValue >> 24); +#endif + } + + [StructLayout(LayoutKind.Explicit)] + private struct FloatIntUnion + { + [FieldOffset(0)] public float FloatValue; + [FieldOffset(0)] public int IntValue; + } } } diff --git a/src/MongoDB.Bson/IO/BsonBinaryReaderSettings.cs b/src/MongoDB.Bson/IO/BsonBinaryReaderSettings.cs index 72305ed0169..1c510ed71e3 100644 --- a/src/MongoDB.Bson/IO/BsonBinaryReaderSettings.cs +++ b/src/MongoDB.Bson/IO/BsonBinaryReaderSettings.cs @@ -76,7 +76,7 @@ public UTF8Encoding Encoding } /// <summary> - /// Gets or sets whether to fix occurrences of the old binary subtype on input. + /// Gets or sets whether to fix occurrences of the old binary subtype on input. /// </summary> public bool FixOldBinarySubTypeOnInput { @@ -89,7 +89,7 @@ public bool FixOldBinarySubTypeOnInput } /// <summary> - /// Gets or sets whether to fix occurrences of the old representation of DateTime.MaxValue on input. + /// Gets or sets whether to fix occurrences of the old representation of DateTime.MaxValue on input. /// </summary> public bool FixOldDateTimeMaxValueOnInput { diff --git a/src/MongoDB.Bson/IO/ByteBufferStream.cs b/src/MongoDB.Bson/IO/ByteBufferStream.cs index eddc84df2bf..6c04117dded 100644 --- a/src/MongoDB.Bson/IO/ByteBufferStream.cs +++ b/src/MongoDB.Bson/IO/ByteBufferStream.cs @@ -572,7 +572,7 @@ public override void WriteCString(string value) { // Compare to 128 to preserve original behavior const int maxLengthToUseCStringUtf8EncodingWith = 128; - + if (maxLength <= maxLengthToUseCStringUtf8EncodingWith) { using var rentedBuffer = ThreadStaticBuffer.RentBuffer(maxLengthToUseCStringUtf8EncodingWith); diff --git a/src/MongoDB.Bson/IO/InputBufferChunkSource.cs b/src/MongoDB.Bson/IO/InputBufferChunkSource.cs index 6997de9d818..470a4910fcb 100644 --- a/src/MongoDB.Bson/IO/InputBufferChunkSource.cs +++ b/src/MongoDB.Bson/IO/InputBufferChunkSource.cs @@ -132,7 +132,7 @@ public int MaxUnpooledChunkSize get { return _maxUnpooledChunkSize; } } - // methods + // methods /// <inheritdoc/> public void Dispose() { diff --git a/src/MongoDB.Bson/ObjectModel/BsonTypeExtensions.cs b/src/MongoDB.Bson/ObjectModel/BsonTypeExtensions.cs new file mode 100644 index 00000000000..d05b85bda32 --- /dev/null +++ b/src/MongoDB.Bson/ObjectModel/BsonTypeExtensions.cs @@ -0,0 +1,57 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; + +namespace MongoDB.Bson +{ + /// <summary> + /// A static class containing extension methods for <see cref="BsonType"/>. + /// </summary> + public static class BsonTypeExtensions + { + /// <summary> + /// Maps a <see cref="BsonType"/> to its corresponding server string representation. + /// </summary> + /// <param name="type">The input type to map.</param> + public static string ToServerString(this BsonType type) + { + return type switch + { + BsonType.Array => "array", + BsonType.Binary => "binData", + BsonType.Boolean => "bool", + BsonType.DateTime => "date", + BsonType.Decimal128 => "decimal", + BsonType.Document => "object", + BsonType.Double => "double", + BsonType.Int32 => "int", + BsonType.Int64 => "long", + BsonType.JavaScript => "javascript", + BsonType.JavaScriptWithScope => "javascriptWithScope", + BsonType.MaxKey => "maxKey", + BsonType.MinKey => "minKey", + BsonType.Null => "null", + BsonType.ObjectId => "objectId", + BsonType.RegularExpression => "regex", + BsonType.String => "string", + BsonType.Symbol => "symbol", + BsonType.Timestamp => "timestamp", + BsonType.Undefined => "undefined", + _ => throw new ArgumentException($"Unexpected BSON type: {type}.", nameof(type)) + }; + } + } +} diff --git a/src/MongoDB.Bson/ObjectModel/MaterializedOnDemandBsonDocument.cs b/src/MongoDB.Bson/ObjectModel/MaterializedOnDemandBsonDocument.cs index 1c2bbcfc5ef..70ea8d12601 100644 --- a/src/MongoDB.Bson/ObjectModel/MaterializedOnDemandBsonDocument.cs +++ b/src/MongoDB.Bson/ObjectModel/MaterializedOnDemandBsonDocument.cs @@ -407,7 +407,7 @@ public override IEnumerator<BsonElement> GetEnumerator() /// Returns a hash code for this instance. /// </summary> /// <returns> - /// A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + /// A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. /// </returns> public override int GetHashCode() { diff --git a/src/MongoDB.Bson/Serialization/BinaryVectorReader.cs b/src/MongoDB.Bson/Serialization/BinaryVectorReader.cs index ef83c201091..9c6a0dfe745 100644 --- a/src/MongoDB.Bson/Serialization/BinaryVectorReader.cs +++ b/src/MongoDB.Bson/Serialization/BinaryVectorReader.cs @@ -17,6 +17,7 @@ using System.Collections.Generic; using System.Linq; using System.Runtime.InteropServices; +using MongoDB.Bson.IO; namespace MongoDB.Bson.Serialization { @@ -41,21 +42,8 @@ public static (TItem[] Items, byte Padding, BinaryVectorDataType VectorDataType) switch (vectorDataType) { case BinaryVectorDataType.Float32: - - if ((vectorDataBytes.Span.Length & 3) != 0) - { - throw new FormatException("Data length of binary vector of type Float32 must be a multiple of 4 bytes."); - } - - if (BitConverter.IsLittleEndian) - { - var singles = MemoryMarshal.Cast<byte, float>(vectorDataBytes.Span); - items = (TItem[])(object)singles.ToArray(); - } - else - { - throw new NotSupportedException("Binary vector data is not supported on Big Endian architecture yet."); - } + var floatArray = ReadSinglesArrayLittleEndian(vectorDataBytes.Span); + items = (TItem[])(object)floatArray; break; case BinaryVectorDataType.Int8: var itemsSpan = MemoryMarshal.Cast<byte, TItem>(vectorDataBytes.Span); @@ -124,6 +112,30 @@ TExpectedItem[] AsTypedArrayOrThrow<TExpectedItem>() } } + private static float[] ReadSinglesArrayLittleEndian(ReadOnlySpan<byte> span) + { + if ((span.Length & 3) != 0) + { + throw new FormatException("Data length of binary vector of type Float32 must be a multiple of 4 bytes."); + } + + float[] result; + if (BitConverter.IsLittleEndian) + { + result = MemoryMarshal.Cast<byte, float>(span).ToArray(); + } + else + { + var count = span.Length / 4; + result = new float[count]; + for (int i = 0; i < count; i++) + { + result[i] = BinaryPrimitivesCompat.ReadSingleLittleEndian(span.Slice(i * 4, 4)); + } + } + return result; + } + public static void ValidateItemType<TItem>(BinaryVectorDataType binaryVectorDataType) { IEnumerable<Type> expectedItemTypes = binaryVectorDataType switch diff --git a/src/MongoDB.Bson/Serialization/BinaryVectorWriter.cs b/src/MongoDB.Bson/Serialization/BinaryVectorWriter.cs index 0e9d5e74f6d..03cd5be9fdd 100644 --- a/src/MongoDB.Bson/Serialization/BinaryVectorWriter.cs +++ b/src/MongoDB.Bson/Serialization/BinaryVectorWriter.cs @@ -15,6 +15,7 @@ using System; using System.Runtime.InteropServices; +using MongoDB.Bson.IO; namespace MongoDB.Bson.Serialization { @@ -35,15 +36,39 @@ public static byte[] WriteToBytes<TItem>(BinaryVector<TItem> binaryVector) public static byte[] WriteToBytes<TItem>(ReadOnlySpan<TItem> vectorData, BinaryVectorDataType binaryVectorDataType, byte padding) where TItem : struct { - if (!BitConverter.IsLittleEndian) + switch (binaryVectorDataType) { - throw new NotSupportedException("Binary vector data is not supported on Big Endian architecture yet."); - } + case BinaryVectorDataType.Float32: + var length = vectorData.Length * 4; + var result = new byte[2 + length]; + result[0] = (byte)binaryVectorDataType; + result[1] = padding; + + var floatSpan = MemoryMarshal.Cast<TItem, float>(vectorData); + var floatOutput = result.AsSpan(2); + + if (BitConverter.IsLittleEndian) + { + MemoryMarshal.Cast<float, byte>(floatSpan).CopyTo(floatOutput); + } + else + { + for (int i = 0; i < floatSpan.Length; i++) + { + BinaryPrimitivesCompat.WriteSingleLittleEndian(floatOutput.Slice(i * 4, 4), floatSpan[i]); + } + } - var vectorDataBytes = MemoryMarshal.Cast<TItem, byte>(vectorData); - byte[] result = [(byte)binaryVectorDataType, padding, .. vectorDataBytes]; + return result; - return result; + case BinaryVectorDataType.Int8: + case BinaryVectorDataType.PackedBit: + var vectorDataBytes = MemoryMarshal.Cast<TItem, byte>(vectorData); + return [(byte)binaryVectorDataType, padding, .. vectorDataBytes]; + + default: + throw new NotSupportedException($"Binary vector serialization is not supported for {binaryVectorDataType}."); + } } } } diff --git a/src/MongoDB.Bson/Serialization/BsonClassMap.cs b/src/MongoDB.Bson/Serialization/BsonClassMap.cs index 87239a33bf5..e81624f0026 100644 --- a/src/MongoDB.Bson/Serialization/BsonClassMap.cs +++ b/src/MongoDB.Bson/Serialization/BsonClassMap.cs @@ -1057,7 +1057,7 @@ public void Reset() _creatorMaps.Clear(); _creator = null; _declaredMemberMaps = new List<BsonMemberMap>(); - _discriminator = _classType.Name; + _discriminator = BsonUtils.GetFriendlyTypeName(_classType); _discriminatorIsRequired = false; _extraElementsMemberMap = null; _idMemberMap = null; @@ -1323,12 +1323,32 @@ internal IDiscriminatorConvention GetDiscriminatorConvention() var discriminatorConvention = _discriminatorConvention; if (discriminatorConvention == null) { - // it's possible but harmless for multiple threads to do the discriminator convention lookukp at the same time + // it's possible but harmless for multiple threads to do the discriminator convention lookup at the same time discriminatorConvention = LookupDiscriminatorConvention(); _discriminatorConvention = discriminatorConvention; + + if (discriminatorConvention != null) + { + EnsureNoMemberMapConflicts(discriminatorConvention.ElementName); + } } + return discriminatorConvention; + void EnsureNoMemberMapConflicts(string elementName) + { + var conflictingMemberMap = _allMemberMaps.FirstOrDefault(memberMap => memberMap.ElementName == elementName); + + if (conflictingMemberMap != null) + { + var fieldOrProperty = conflictingMemberMap.MemberInfo is FieldInfo ? "field" : "property"; + + throw new BsonSerializationException( + $"The discriminator element name cannot be {discriminatorConvention.ElementName} " + + $"because it is already being used by the {fieldOrProperty} {conflictingMemberMap.MemberName} of type {_classType.FullName}"); + } + } + IDiscriminatorConvention LookupDiscriminatorConvention() { var classMap = this; diff --git a/src/MongoDB.Bson/Serialization/BsonSerializationContext.cs b/src/MongoDB.Bson/Serialization/BsonSerializationContext.cs index 5ab93777ceb..a14a62322a8 100644 --- a/src/MongoDB.Bson/Serialization/BsonSerializationContext.cs +++ b/src/MongoDB.Bson/Serialization/BsonSerializationContext.cs @@ -38,7 +38,7 @@ private BsonSerializationContext( // public properties /// <summary> - /// Gets a function that, when executed, will indicate whether the type + /// Gets a function that, when executed, will indicate whether the type /// is a dynamic type. /// </summary> public Func<Type, bool> IsDynamicType diff --git a/src/MongoDB.Bson/Serialization/BsonSerializer.cs b/src/MongoDB.Bson/Serialization/BsonSerializer.cs index 6602e74dec5..df21c6f6bb5 100644 --- a/src/MongoDB.Bson/Serialization/BsonSerializer.cs +++ b/src/MongoDB.Bson/Serialization/BsonSerializer.cs @@ -39,7 +39,7 @@ public static class BsonSerializer private static Dictionary<Type, IIdGenerator> __idGenerators = new Dictionary<Type, IIdGenerator>(); private static Dictionary<Type, IDiscriminatorConvention> __discriminatorConventions = new Dictionary<Type, IDiscriminatorConvention>(); private static Dictionary<BsonValue, HashSet<Type>> __discriminators = new Dictionary<BsonValue, HashSet<Type>>(); - private static HashSet<Type> __discriminatedTypes = new HashSet<Type>(); + private static ConcurrentDictionary<Type, bool> __discriminatedTypes = new (); private static BsonSerializerRegistry __serializerRegistry; private static TypeMappingSerializationProvider __typeMappingSerializationProvider; // ConcurrentDictionary<Type, object> is being used as a concurrent set of Type. The values will always be null. @@ -321,8 +321,7 @@ internal static bool IsDiscriminatorConventionRegisteredAtThisLevel(Type type) /// <returns>True if the type is discriminated.</returns> public static bool IsTypeDiscriminated(Type type) { - var typeInfo = type.GetTypeInfo(); - return typeInfo.IsInterface || __discriminatedTypes.Contains(type); + return type.IsInterface || __discriminatedTypes.ContainsKey(type); } /// <summary> @@ -587,7 +586,8 @@ public static void RegisterDiscriminator(Type type, BsonValue discriminator) // mark all base types as discriminated (so we know that it's worth reading a discriminator) for (var baseType = typeInfo.BaseType; baseType != null; baseType = baseType.GetTypeInfo().BaseType) { - __discriminatedTypes.Add(baseType); + // We expect that TryAdd will always return true, so no need to check the return value. + __discriminatedTypes.TryAdd(baseType, true); } } } diff --git a/src/MongoDB.Bson/Serialization/Conventions/ConventionRegistry.cs b/src/MongoDB.Bson/Serialization/Conventions/ConventionRegistry.cs index f54934d6ed3..217a7b69d86 100644 --- a/src/MongoDB.Bson/Serialization/Conventions/ConventionRegistry.cs +++ b/src/MongoDB.Bson/Serialization/Conventions/ConventionRegistry.cs @@ -116,8 +116,8 @@ public static void Register(string name, IConventionPack conventions, Func<Type, /// Removes the conventions specified by the given name. /// </summary> /// <param name="name">The name.</param> - /// <remarks>Removing a convention allows the removal of the special __defaults__ conventions - /// and the __attributes__ conventions for those who want to completely customize the + /// <remarks>Removing a convention allows the removal of the special __defaults__ conventions + /// and the __attributes__ conventions for those who want to completely customize the /// experience.</remarks> public static void Remove(string name) { diff --git a/src/MongoDB.Bson/Serialization/Conventions/ImmutableTypeClassMapConvention.cs b/src/MongoDB.Bson/Serialization/Conventions/ImmutableTypeClassMapConvention.cs index bb50574b190..717cd61b3fd 100644 --- a/src/MongoDB.Bson/Serialization/Conventions/ImmutableTypeClassMapConvention.cs +++ b/src/MongoDB.Bson/Serialization/Conventions/ImmutableTypeClassMapConvention.cs @@ -130,7 +130,7 @@ private bool PropertyMatchesSomeCreatorParameter(BsonClassMap classMap, Property var constructors = GetUsableConstructors(classTypeInfo); foreach (var constructorInfo in constructors) { - if (classTypeInfo.IsAbstract || + if (classTypeInfo.IsAbstract || constructorInfo.IsFamily || // protected constructorInfo.IsFamilyOrAssembly) // protected internal { diff --git a/src/MongoDB.Bson/Serialization/Conventions/StandardDiscriminatorConvention.cs b/src/MongoDB.Bson/Serialization/Conventions/StandardDiscriminatorConvention.cs index ca4d2cbc117..d2b042d1fc0 100644 --- a/src/MongoDB.Bson/Serialization/Conventions/StandardDiscriminatorConvention.cs +++ b/src/MongoDB.Bson/Serialization/Conventions/StandardDiscriminatorConvention.cs @@ -39,14 +39,15 @@ public abstract class StandardDiscriminatorConvention : IDiscriminatorConvention /// <param name="elementName">The element name.</param> protected StandardDiscriminatorConvention(string elementName) { - if (elementName == null) + if (string.IsNullOrEmpty(elementName)) { - throw new ArgumentNullException("elementName"); + throw new ArgumentException("Discriminator element name name cannot be null or empty.", nameof(elementName)); } if (elementName.IndexOf('\0') != -1) { - throw new ArgumentException("Element names cannot contain nulls.", "elementName"); + throw new ArgumentException("Discriminator element name cannot contain nulls.", nameof(elementName)); } + _elementName = elementName; } diff --git a/src/MongoDB.Bson/Serialization/IdGenerators/AscendingGuidGenerator.cs b/src/MongoDB.Bson/Serialization/IdGenerators/AscendingGuidGenerator.cs index cbf696ea98e..f9cf72d396d 100644 --- a/src/MongoDB.Bson/Serialization/IdGenerators/AscendingGuidGenerator.cs +++ b/src/MongoDB.Bson/Serialization/IdGenerators/AscendingGuidGenerator.cs @@ -20,7 +20,7 @@ namespace MongoDB.Bson.Serialization.IdGenerators { /// <summary> - /// A GUID generator that generates GUIDs in ascending order. To enable + /// A GUID generator that generates GUIDs in ascending order. To enable /// an index to make use of the ascending nature make sure to use /// <see cref="GuidRepresentation.Standard">GuidRepresentation.Standard</see> /// as the storage representation. @@ -62,7 +62,7 @@ public static AscendingGuidGenerator Instance /// Generates an ascending Guid for a document. Consecutive invocations /// should generate Guids that are ascending from a MongoDB perspective /// </summary> - /// <param name="container">The container of the document (will be a + /// <param name="container">The container of the document (will be a /// MongoCollection when called from the driver). </param> /// <param name="document">The document it was generated for.</param> /// <returns>A Guid.</returns> diff --git a/src/MongoDB.Bson/Serialization/Serializers/ExpandoObjectSerializer.cs b/src/MongoDB.Bson/Serialization/Serializers/ExpandoObjectSerializer.cs index 7160bc11288..4ac504304e5 100644 --- a/src/MongoDB.Bson/Serialization/Serializers/ExpandoObjectSerializer.cs +++ b/src/MongoDB.Bson/Serialization/Serializers/ExpandoObjectSerializer.cs @@ -22,7 +22,7 @@ namespace MongoDB.Bson.Serialization.Serializers /// Serializer for <see cref="ExpandoObject"/>. /// </summary> /// <remarks> - /// The use of <see cref="ExpandoObject"/> will serialize any <see cref="List{Object}"/> without type information. + /// The use of <see cref="ExpandoObject"/> will serialize any <see cref="List{Object}"/> without type information. /// To get the best experience out of using an <see cref="ExpandoObject"/>, any member wanting to be used /// as an array should use <see cref="List{Object}"/>. /// </remarks> diff --git a/src/MongoDB.Bson/Serialization/Serializers/IEnumerableDeserializingAsCollectionSerializer.cs b/src/MongoDB.Bson/Serialization/Serializers/IEnumerableDeserializingAsCollectionSerializer.cs index a5ce3afa28d..ca9ec6a771b 100644 --- a/src/MongoDB.Bson/Serialization/Serializers/IEnumerableDeserializingAsCollectionSerializer.cs +++ b/src/MongoDB.Bson/Serialization/Serializers/IEnumerableDeserializingAsCollectionSerializer.cs @@ -179,7 +179,7 @@ public IEnumerableDeserializingAsCollectionSerializer<TIEnumerable, TItem, TColl } // explicit interface implementations - IBsonSerializer IChildSerializerConfigurable.ChildSerializer => ItemSerializer; + IBsonSerializer IChildSerializerConfigurable.ChildSerializer => ItemSerializer; IBsonSerializer IChildSerializerConfigurable.WithChildSerializer(IBsonSerializer childSerializer) => WithItemSerializer((IBsonSerializer<TItem>)childSerializer); diff --git a/src/MongoDB.Bson/Serialization/Serializers/PrimitivesArrayReader.cs b/src/MongoDB.Bson/Serialization/Serializers/PrimitivesArrayReader.cs index e811edef230..c8b9bd27cd7 100644 --- a/src/MongoDB.Bson/Serialization/Serializers/PrimitivesArrayReader.cs +++ b/src/MongoDB.Bson/Serialization/Serializers/PrimitivesArrayReader.cs @@ -1,4 +1,5 @@ using System; +using System.Buffers.Binary; using System.Collections.Generic; using System.Runtime.CompilerServices; using MongoDB.Bson.IO; @@ -73,7 +74,8 @@ private static T[] ReadBsonArray<T>( using var buffer = ThreadStaticBuffer.RentBuffer(array.Length); var bytes = buffer.Bytes; - array.GetBytes(0, bytes, 0, array.Length); + array.GetBytes(0, bytes, 0, array.Length); + var span = bytes.AsSpan(); var result = new List<T>(); @@ -82,10 +84,10 @@ private static T[] ReadBsonArray<T>( while (index < maxIndex) { - ValidateBsonType(bsonDataType); + ValidateBsonType(bsonDataType, span); // Skip name - while (bytes[index] != 0) { index++; }; + while (span[index] != 0) { index++; } index++; // Skip string terminating 0 T value = default; @@ -95,21 +97,22 @@ private static T[] ReadBsonArray<T>( { case ConversionType.DoubleToSingle: { - var v = (float)BitConverter.ToDouble(bytes, index); + var v = (float)BinaryPrimitivesCompat.ReadDoubleLittleEndian(span.Slice(index)); value = Unsafe.As<float, T>(ref v); break; } case ConversionType.DoubleToDouble: { - var v = BitConverter.ToDouble(bytes, index); + var v = BinaryPrimitivesCompat.ReadDoubleLittleEndian(span.Slice(index)); + value = Unsafe.As<double, T>(ref v); break; } case ConversionType.Decimal128ToDecimal128: { - var lowBits = (ulong)BitConverter.ToInt64(bytes, index); - var highBits = (ulong)BitConverter.ToInt64(bytes, index + 8); + var lowBits = BinaryPrimitives.ReadUInt64LittleEndian(span.Slice(index)); + var highBits = BinaryPrimitives.ReadUInt64LittleEndian(span.Slice(index + 8)); var v = Decimal128.ToDecimal(Decimal128.FromIEEEBits(highBits, lowBits)); value = Unsafe.As<decimal, T>(ref v); @@ -117,63 +120,63 @@ private static T[] ReadBsonArray<T>( } case ConversionType.BoolToBool: { - var v = bytes[index] != 0; + var v = span[index] != 0; value = Unsafe.As<bool, T>(ref v); break; } case ConversionType.Int32ToInt8: { - var v = (sbyte)BitConverter.ToInt32(bytes, index); + var v = (sbyte)BinaryPrimitives.ReadInt32LittleEndian(span.Slice(index)); value = Unsafe.As<sbyte, T>(ref v); break; } case ConversionType.Int32ToUInt8: { - var v = (byte)BitConverter.ToInt32(bytes, index); + var v = (byte)BinaryPrimitives.ReadInt32LittleEndian(span.Slice(index)); value = Unsafe.As<byte, T>(ref v); break; } case ConversionType.Int32ToInt16: { - var v = (short)BitConverter.ToInt32(bytes, index); + var v = (short)BinaryPrimitives.ReadInt32LittleEndian(span.Slice(index)); value = Unsafe.As<short, T>(ref v); break; } case ConversionType.Int32ToUInt16: { - var v = (ushort)BitConverter.ToInt32(bytes, index); + var v = (ushort)BinaryPrimitives.ReadInt32LittleEndian(span.Slice(index)); value = Unsafe.As<ushort, T>(ref v); break; } case ConversionType.Int32ToChar: { - var v = BitConverter.ToChar(bytes, index); + var v = (char)(ushort)BinaryPrimitives.ReadInt32LittleEndian(span.Slice(index)); value = Unsafe.As<char, T>(ref v); break; } case ConversionType.Int32ToInt32: { - var v = BitConverter.ToInt32(bytes, index); + var v = BinaryPrimitives.ReadInt32LittleEndian(span.Slice(index)); value = Unsafe.As<int, T>(ref v); break; } case ConversionType.Int32ToUInt32: { - var v = BitConverter.ToUInt32(bytes, index); + var v = BinaryPrimitives.ReadUInt32LittleEndian(span.Slice(index)); value = Unsafe.As<uint, T>(ref v); break; } case ConversionType.Int64ToInt64: { - var v = BitConverter.ToInt64(bytes, index); + var v = BinaryPrimitives.ReadInt64LittleEndian(span.Slice(index)); value = Unsafe.As<long, T>(ref v); break; } case ConversionType.Int64ToUInt64: { - var v = BitConverter.ToUInt64(bytes, index); + var v = BinaryPrimitives.ReadUInt64LittleEndian(span.Slice(index)); value = Unsafe.As<ulong, T>(ref v); break; } @@ -186,13 +189,13 @@ private static T[] ReadBsonArray<T>( index += bsonDataSize; } - ValidateBsonType(BsonType.EndOfDocument); + ValidateBsonType(BsonType.EndOfDocument, span); return result.ToArray(); - void ValidateBsonType(BsonType bsonType) + void ValidateBsonType(BsonType bsonType, Span<byte> span) { - if ((BsonType)bytes[index] != bsonType) + if ((BsonType)span[index] != bsonType) { throw new InvalidOperationException(); } diff --git a/src/MongoDB.Driver.Encryption/AssemblyInfo.cs b/src/MongoDB.Driver.Encryption/AssemblyInfo.cs index 72755506a01..e6175038a2f 100644 --- a/src/MongoDB.Driver.Encryption/AssemblyInfo.cs +++ b/src/MongoDB.Driver.Encryption/AssemblyInfo.cs @@ -2,3 +2,4 @@ [assembly: InternalsVisibleTo("MongoDB.Driver.Encryption.Tests, PublicKey=002400000480000094000000060200000024000052534131000400000100010035287f0d3883c0a075c88e0cda3ce93b621003ecbd5e920d4a8c7238564f4d2f4f68116aca28c9b21341dc3a877679c14556192b2b2f5fe2c11d624e0894d308ff7b94bf6fd72aef1b41017ffe2572e99019d1c61963e68cd0ed67734a42cb333b808e3867cbe631937214e32e409fb1fa62fdb69d494c2530e64a40e417d6ee")] [assembly: InternalsVisibleTo("MongoDB.Driver.Tests, PublicKey=002400000480000094000000060200000024000052534131000400000100010035287f0d3883c0a075c88e0cda3ce93b621003ecbd5e920d4a8c7238564f4d2f4f68116aca28c9b21341dc3a877679c14556192b2b2f5fe2c11d624e0894d308ff7b94bf6fd72aef1b41017ffe2572e99019d1c61963e68cd0ed67734a42cb333b808e3867cbe631937214e32e409fb1fa62fdb69d494c2530e64a40e417d6ee")] +[assembly: InternalsVisibleTo("MongoDB.Driver.TestHelpers, PublicKey=002400000480000094000000060200000024000052534131000400000100010035287f0d3883c0a075c88e0cda3ce93b621003ecbd5e920d4a8c7238564f4d2f4f68116aca28c9b21341dc3a877679c14556192b2b2f5fe2c11d624e0894d308ff7b94bf6fd72aef1b41017ffe2572e99019d1c61963e68cd0ed67734a42cb333b808e3867cbe631937214e32e409fb1fa62fdb69d494c2530e64a40e417d6ee")] diff --git a/src/MongoDB.Driver.Encryption/CryptClient.cs b/src/MongoDB.Driver.Encryption/CryptClient.cs index c2f03c4500f..25ddf39d33e 100644 --- a/src/MongoDB.Driver.Encryption/CryptClient.cs +++ b/src/MongoDB.Driver.Encryption/CryptClient.cs @@ -105,7 +105,7 @@ public CryptContext StartEncryptionContext(string db, byte[] command) /// <summary> /// Starts an explicit encryption context. /// </summary> - public CryptContext StartExplicitEncryptionContext(byte[] keyId, byte[] keyAltName, string queryType, long? contentionFactor, string encryptionAlgorithm, byte[] message, byte[] rangeOptions, bool isExpressionMode = false) + public CryptContext StartExplicitEncryptionContext(byte[] keyId, byte[] keyAltName, string queryType, long? contentionFactor, string encryptionAlgorithm, byte[] message, byte[] rangeOptions, byte[] textOptions, bool isExpressionMode = false) { var handle = Library.mongocrypt_ctx_new(_handle); @@ -123,6 +123,11 @@ public CryptContext StartExplicitEncryptionContext(byte[] keyId, byte[] keyAltNa PinnedBinary.RunAsPinnedBinary(handle, rangeOptions, _status, (h, pb) => Library.mongocrypt_ctx_setopt_algorithm_range(h, pb)); } + if (textOptions != null) + { + PinnedBinary.RunAsPinnedBinary(handle, textOptions, _status, (h, pb) => Library.mongocrypt_ctx_setopt_algorithm_text(h, pb)); + } + handle.Check(_status, Library.mongocrypt_ctx_setopt_algorithm(handle, encryptionAlgorithm, -1)); if (queryType != null) diff --git a/src/MongoDB.Driver.Encryption/CsfleSchemaBuilder.cs b/src/MongoDB.Driver.Encryption/CsfleSchemaBuilder.cs new file mode 100644 index 00000000000..33ca8d1e691 --- /dev/null +++ b/src/MongoDB.Driver.Encryption/CsfleSchemaBuilder.cs @@ -0,0 +1,344 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Linq.Expressions; +using MongoDB.Bson; +using MongoDB.Bson.Serialization; + +namespace MongoDB.Driver.Encryption +{ + /// <summary> + /// A builder class for creating Client-Side Field Level Encryption (CSFLE) schemas. + /// </summary> + public class CsfleSchemaBuilder + { + private readonly Dictionary<string, BsonDocument> _schemas = new(); + + private CsfleSchemaBuilder() + { + } + + /// <summary> + /// Creates a new instance of the <see cref="CsfleSchemaBuilder"/> and configures it using the provided action. + /// </summary> + /// <param name="configure">An action to configure the schema builder.</param> + public static CsfleSchemaBuilder Create(Action<CsfleSchemaBuilder> configure) + { + var builder = new CsfleSchemaBuilder(); + configure(builder); + return builder; + } + + /// <summary> + /// Adds an encrypted collection schema for a specific collection namespace. + /// </summary> + /// <typeparam name="T">The type of the document in the collection.</typeparam> + /// <param name="collectionNamespace">The namespace of the collection.</param> + /// <param name="configure">An action to configure the encrypted collection builder.</param> + /// <returns>The current <see cref="CsfleSchemaBuilder"/> instance.</returns> + public CsfleSchemaBuilder Encrypt<T>(CollectionNamespace collectionNamespace, Action<EncryptedCollectionBuilder<T>> configure) + { + var builder = new EncryptedCollectionBuilder<T>(); + configure(builder); + _schemas.Add(collectionNamespace.FullName, builder.Build()); + return this; + } + + /// <summary> + /// Builds and returns the resulting CSFLE schema. + /// </summary> + public Dictionary<string, BsonDocument> Build() + { + if (!_schemas.Any()) + { + throw new InvalidOperationException("No schemas were added. Use Encrypt<T> to add a schema."); + } + + return _schemas; + } + } + + /// <summary> + /// A builder class for creating encrypted collection schemas. + /// </summary> + /// <typeparam name="TDocument">The type of the document in the collection.</typeparam> + public class EncryptedCollectionBuilder<TDocument> + { + private readonly BsonDocument _schema = new("bsonType", "object"); + private readonly RenderArgs<TDocument> _args = new(BsonSerializer.LookupSerializer<TDocument>(), BsonSerializer.SerializerRegistry); + + internal EncryptedCollectionBuilder() + { + } + + /// <summary> + /// Configures encryption metadata for the collection. + /// </summary> + /// <param name="keyId">The key ID to use for encryption.</param> + /// <param name="algorithm">The encryption algorithm to use.</param> + /// <returns>The current <see cref="EncryptedCollectionBuilder{TDocument}"/> instance.</returns> + public EncryptedCollectionBuilder<TDocument> EncryptMetadata(Guid? keyId = null, EncryptionAlgorithm? algorithm = null) + { + if (keyId is null && algorithm is null) + { + throw new ArgumentException("At least one of keyId or algorithm must be specified."); + } + + _schema["encryptMetadata"] = new BsonDocument + { + { "keyId", () => new BsonArray { new BsonBinaryData(keyId!.Value, GuidRepresentation.Standard) }, keyId is not null }, + { "algorithm", () => MapCsfleEncryptionAlgorithmToString(algorithm!.Value), algorithm is not null } + }; + return this; + } + + /// <summary> + /// Adds a pattern property to the schema with encryption settings. + /// </summary> + /// <param name="pattern">The regex pattern for the property.</param> + /// <param name="bsonType">The BSON type of the property.</param> + /// <param name="algorithm">The encryption algorithm to use.</param> + /// <param name="keyId">The key ID to use for encryption.</param> + /// <returns>The current <see cref="EncryptedCollectionBuilder{TDocument}"/> instance.</returns> + public EncryptedCollectionBuilder<TDocument> PatternProperty( + string pattern, + BsonType bsonType, + EncryptionAlgorithm? algorithm = null, + Guid? keyId = null) + => PatternProperty(pattern, [bsonType], algorithm, keyId); + + /// <summary> + /// Adds a pattern property to the schema with encryption settings. + /// </summary> + /// <param name="pattern">The regex pattern for the property.</param> + /// <param name="bsonTypes">The BSON types of the property.</param> + /// <param name="algorithm">The encryption algorithm to use.</param> + /// <param name="keyId">The key ID to use for encryption.</param> + /// <returns>The current <see cref="EncryptedCollectionBuilder{TDocument}"/> instance.</returns> + public EncryptedCollectionBuilder<TDocument> PatternProperty( + string pattern, + IEnumerable<BsonType> bsonTypes = null, + EncryptionAlgorithm? algorithm = null, + Guid? keyId = null) + { + AddToPatternProperties(pattern, CreateEncryptDocument(bsonTypes, algorithm, keyId)); + return this; + } + + /// <summary> + /// Adds a nested pattern property to the schema. + /// </summary> + /// <typeparam name="TField">The type of the nested field.</typeparam> + /// <param name="path">The field.</param> + /// <param name="configure">An action to configure the nested builder.</param> + /// <returns>The current <see cref="EncryptedCollectionBuilder{TDocument}"/> instance.</returns> + public EncryptedCollectionBuilder<TDocument> PatternProperty<TField>( + Expression<Func<TDocument, TField>> path, + Action<EncryptedCollectionBuilder<TField>> configure) + => PatternProperty(new ExpressionFieldDefinition<TDocument, TField>(path), configure); + + /// <summary> + /// Adds a nested pattern property to the schema. + /// </summary> + /// <typeparam name="TField">The type of the nested field.</typeparam> + /// <param name="path">The field.</param> + /// <param name="configure">An action to configure the nested builder.</param> + /// <returns>The current <see cref="EncryptedCollectionBuilder{TDocument}"/> instance.</returns> + public EncryptedCollectionBuilder<TDocument> PatternProperty<TField>( + FieldDefinition<TDocument> path, + Action<EncryptedCollectionBuilder<TField>> configure) + { + var nestedBuilder = new EncryptedCollectionBuilder<TField>(); + configure(nestedBuilder); + + var fieldName = path.Render(_args).FieldName; + + AddToPatternProperties(fieldName, nestedBuilder.Build()); + return this; + } + + /// <summary> + /// Adds a property to the schema with encryption settings. + /// </summary> + /// <typeparam name="TField">The type of the field.</typeparam> + /// <param name="path">The field.</param> + /// <param name="bsonType">The BSON type of the property.</param> + /// <param name="algorithm">The encryption algorithm to use.</param> + /// <param name="keyId">The key ID to use for encryption.</param> + /// <returns>The current <see cref="EncryptedCollectionBuilder{TDocument}"/> instance.</returns> + public EncryptedCollectionBuilder<TDocument> Property<TField>( + Expression<Func<TDocument, TField>> path, + BsonType bsonType, + EncryptionAlgorithm? algorithm = null, + Guid? keyId = null) + => Property(path, [bsonType], algorithm, keyId); + + /// <summary> + /// Adds a property to the schema with encryption settings. + /// </summary> + /// <typeparam name="TField">The type of the field.</typeparam> + /// <param name="path">The field.</param> + /// <param name="bsonTypes">The BSON types of the property.</param> + /// <param name="algorithm">The encryption algorithm to use.</param> + /// <param name="keyId">The key ID to use for encryption.</param> + /// <returns>The current <see cref="EncryptedCollectionBuilder{TDocument}"/> instance.</returns> + public EncryptedCollectionBuilder<TDocument> Property<TField>( + Expression<Func<TDocument, TField>> path, + IEnumerable<BsonType> bsonTypes = null, + EncryptionAlgorithm? algorithm = null, + Guid? keyId = null) + => Property(new ExpressionFieldDefinition<TDocument, TField>(path), bsonTypes, algorithm, keyId); + + /// <summary> + /// Adds a property to the schema with encryption settings. + /// </summary> + /// <param name="path">The field.</param> + /// <param name="bsonType">The BSON type of the property.</param> + /// <param name="algorithm">The encryption algorithm to use.</param> + /// <param name="keyId">The key ID to use for encryption.</param> + /// <returns>The current <see cref="EncryptedCollectionBuilder{TDocument}"/> instance.</returns> + public EncryptedCollectionBuilder<TDocument> Property( + FieldDefinition<TDocument> path, + BsonType bsonType, + EncryptionAlgorithm? algorithm = null, + Guid? keyId = null) + => Property(path, [bsonType], algorithm, keyId); + + /// <summary> + /// Adds a property to the schema with encryption settings. + /// </summary> + /// <param name="path">The field.</param> + /// <param name="bsonTypes">The BSON types of the property.</param> + /// <param name="algorithm">The encryption algorithm to use.</param> + /// <param name="keyId">The key ID to use for encryption.</param> + /// <returns>The current <see cref="EncryptedCollectionBuilder{TDocument}"/> instance.</returns> + public EncryptedCollectionBuilder<TDocument> Property( + FieldDefinition<TDocument> path, + IEnumerable<BsonType> bsonTypes = null, + EncryptionAlgorithm? algorithm = null, + Guid? keyId = null) + { + var fieldName = path.Render(_args).FieldName; + AddToProperties(fieldName, CreateEncryptDocument(bsonTypes, algorithm, keyId)); + return this; + } + + /// <summary> + /// Adds a nested property to the schema. + /// </summary> + /// <typeparam name="TField">The type of the nested field.</typeparam> + /// <param name="path">The field.</param> + /// <param name="configure">An action to configure the nested builder.</param> + /// <returns>The current <see cref="EncryptedCollectionBuilder{TDocument}"/> instance.</returns> + public EncryptedCollectionBuilder<TDocument> Property<TField>( + Expression<Func<TDocument, TField>> path, + Action<EncryptedCollectionBuilder<TField>> configure) + => Property(new ExpressionFieldDefinition<TDocument, TField>(path), configure); + + + /// <summary> + /// Adds a nested property to the schema. + /// </summary> + /// <typeparam name="TField">The type of the nested field.</typeparam> + /// <param name="path">The field.</param> + /// <param name="configure">An action to configure the nested builder.</param> + /// <returns>The current <see cref="EncryptedCollectionBuilder{TDocument}"/> instance.</returns> + public EncryptedCollectionBuilder<TDocument> Property<TField>( + FieldDefinition<TDocument> path, + Action<EncryptedCollectionBuilder<TField>> configure) + { + var nestedBuilder = new EncryptedCollectionBuilder<TField>(); + configure(nestedBuilder); + + var fieldName = path.Render(_args).FieldName; + AddToProperties(fieldName, nestedBuilder.Build()); + return this; + } + + internal BsonDocument Build() => _schema; + + private static BsonDocument CreateEncryptDocument( + IEnumerable<BsonType> bsonTypes = null, + EncryptionAlgorithm? algorithm = null, + Guid? keyId = null) + { + BsonValue bsonTypeVal = null; + + if (bsonTypes != null) + { + var convertedBsonTypes = bsonTypes.Select(type => type.ToServerString()).ToList(); + + if (convertedBsonTypes.Count == 0) + { + throw new ArgumentException("At least one BSON type must be specified.", nameof(bsonTypes)); + } + + bsonTypeVal = convertedBsonTypes.Count == 1 + ? convertedBsonTypes[0] + : new BsonArray(convertedBsonTypes); + } + + return new BsonDocument + { + { "encrypt", new BsonDocument + { + { "bsonType", () => bsonTypeVal, bsonTypeVal is not null }, + { "algorithm", () => MapCsfleEncryptionAlgorithmToString(algorithm!.Value), algorithm is not null }, + { + "keyId", + () => new BsonArray(new[] { new BsonBinaryData(keyId!.Value, GuidRepresentation.Standard) }), + keyId is not null + }, + } + } + }; + } + + private void AddToPatternProperties(string field, BsonDocument document) + { + if (!_schema.TryGetValue("patternProperties", out var value)) + { + value = new BsonDocument(); + _schema["patternProperties"] = value; + } + var patternProperties = value.AsBsonDocument; + patternProperties[field] = document; + } + + private void AddToProperties(string field, BsonDocument document) + { + if (!_schema.TryGetValue("properties", out var value)) + { + value = new BsonDocument(); + _schema["properties"] = value; + } + var properties = value.AsBsonDocument; + properties[field] = document; + } + + private static string MapCsfleEncryptionAlgorithmToString(EncryptionAlgorithm algorithm) + { + return algorithm switch + { + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random => "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic => "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + _ => throw new ArgumentException($"Unexpected algorithm type: {algorithm}.", nameof(algorithm)) + }; + } + } +} \ No newline at end of file diff --git a/src/MongoDB.Driver.Encryption/EncryptOptions.cs b/src/MongoDB.Driver.Encryption/EncryptOptions.cs index 56c2be13e84..d8306ec6a36 100644 --- a/src/MongoDB.Driver.Encryption/EncryptOptions.cs +++ b/src/MongoDB.Driver.Encryption/EncryptOptions.cs @@ -14,6 +14,7 @@ */ using System; +using System.Linq; using MongoDB.Bson; using MongoDB.Driver.Core.Misc; @@ -91,6 +92,220 @@ public RangeOptions( public long? Sparsity => _sparsity; } + /// <summary> + /// Prefix options. + /// </summary> + /// <remarks> + /// PrefixOptions is used with TextOptions and provides further options to support "prefixPreview" queries. + /// PrefixOptions is currently unstable API and subject to backwards breaking changes. + /// </remarks> + public sealed class PrefixOptions + { + private readonly int _strMaxQueryLength; + private readonly int _strMinQueryLength; + + /// <summary> + /// Initializes a new instance of the <see cref="PrefixOptions"/> class. + /// </summary> + /// <param name="strMaxQueryLength">The maximum allowed query length.</param> + /// <param name="strMinQueryLength">The minimum allowed query length.</param> + public PrefixOptions(int strMaxQueryLength, int strMinQueryLength) + { + Ensure.IsGreaterThanZero(strMaxQueryLength, nameof(strMaxQueryLength)); + Ensure.IsGreaterThanZero(strMinQueryLength, nameof(strMinQueryLength)); + Ensure.That(strMaxQueryLength >= strMinQueryLength, + "strMaxQueryLength must be greater than or equal to strMinQueryLength"); + + _strMaxQueryLength = strMaxQueryLength; + _strMinQueryLength = strMinQueryLength; + } + + /// <summary> + /// Gets the maximum allowed query length. + /// </summary> + /// <remarks> + /// Querying with a longer string will error. + /// </remarks> + public int StrMaxQueryLength => _strMaxQueryLength; + + /// <summary> + /// Gets the minimum allowed query length. + /// </summary> + /// <remarks> + /// Querying with a shorter string will error. + /// </remarks> + public int StrMinQueryLength => _strMinQueryLength; + } + + /// <summary> + /// Substring options. + /// </summary> + /// <remarks> + /// SubstringOptions is used with TextOptions and provides further options to support "substringPreview" queries. + /// SubstringOptions is currently unstable API and subject to backwards breaking changes. + /// </remarks> + public sealed class SubstringOptions + { + private readonly int _strMaxLength; + private readonly int _strMaxQueryLength; + private readonly int _strMinQueryLength; + + /// <summary> + /// Initializes a new instance of the <see cref="SubstringOptions"/> class. + /// </summary> + /// <param name="strMaxLength">The maximum allowed length to insert.</param> + /// <param name="strMaxQueryLength">The maximum allowed query length.</param> + /// <param name="strMinQueryLength">The minimum allowed query length.</param> + public SubstringOptions(int strMaxLength, int strMaxQueryLength, int strMinQueryLength) + { + Ensure.IsGreaterThanZero(strMaxLength, nameof(strMaxLength)); + Ensure.IsGreaterThanZero(strMaxQueryLength, nameof(strMaxQueryLength)); + Ensure.IsGreaterThanZero(strMinQueryLength, nameof(strMinQueryLength)); + + Ensure.That(strMaxLength >= strMaxQueryLength, + "strMaxLength must be greater than or equal to strMaxQueryLength"); + Ensure.That(strMaxQueryLength >= strMinQueryLength, + "strMaxQueryLength must be greater than or equal to strMinQueryLength"); + + _strMaxLength = strMaxLength; + _strMaxQueryLength = strMaxQueryLength; + _strMinQueryLength = strMinQueryLength; + } + + /// <summary> + /// Gets the maximum allowed length to insert. + /// </summary> + /// <remarks> + /// Inserting longer strings will error. + /// </remarks> + public int StrMaxLength => _strMaxLength; + + /// <summary> + /// Gets the maximum allowed query length. + /// </summary> + /// <remarks> + /// Querying with a longer string will error. + /// </remarks> + public int StrMaxQueryLength => _strMaxQueryLength; + + /// <summary> + /// Gets the minimum allowed query length. + /// </summary> + /// <remarks> + /// Querying with a shorter string will error. + /// </remarks> + public int StrMinQueryLength => _strMinQueryLength; + } + + /// <summary> + /// Suffix options. + /// </summary> + /// <remarks> + /// SuffixOptions is used with TextOptions and provides further options to support "suffixPreview" queries. + /// SuffixOptions is currently unstable API and subject to backwards breaking changes. + /// </remarks> + public sealed class SuffixOptions + { + private readonly int _strMaxQueryLength; + private readonly int _strMinQueryLength; + + /// <summary> + /// Initializes a new instance of the <see cref="SuffixOptions"/> class. + /// </summary> + /// <param name="strMaxQueryLength">The maximum allowed query length.</param> + /// <param name="strMinQueryLength">The minimum allowed query length.</param> + public SuffixOptions(int strMaxQueryLength, int strMinQueryLength) + { + Ensure.IsGreaterThanZero(strMaxQueryLength, nameof(strMaxQueryLength)); + Ensure.IsGreaterThanZero(strMinQueryLength, nameof(strMinQueryLength)); + Ensure.That(strMaxQueryLength >= strMinQueryLength, + "strMaxQueryLength must be greater than or equal to strMinQueryLength"); + + _strMaxQueryLength = strMaxQueryLength; + _strMinQueryLength = strMinQueryLength; + } + + /// <summary> + /// Gets the maximum allowed query length. + /// </summary> + /// <remarks> + /// Querying with a longer string will error. + /// </remarks> + public int StrMaxQueryLength => _strMaxQueryLength; + + /// <summary> + /// Gets the minimum allowed query length. + /// </summary> + /// <remarks> + /// Querying with a shorter string will error. + /// </remarks> + public int StrMinQueryLength => _strMinQueryLength; + } + + /// <summary> + /// Text options. + /// </summary> + /// <remarks> + /// TextOptions specifies options for a Queryable Encryption field supporting "textPreview" queries. + /// TextOptions is currently unstable API and subject to backwards breaking changes. + /// TextOptions only applies when the encryption algorithm is "textPreview". + /// </remarks> + public sealed class TextOptions + { + private readonly bool _caseSensitive; + private readonly bool _diacriticSensitive; + private readonly PrefixOptions _prefixOptions; + private readonly SubstringOptions _substringOptions; + private readonly SuffixOptions _suffixOptions; + + /// <summary> + /// Initializes a new instance of the <see cref="TextOptions"/> class. + /// </summary> + /// <param name="caseSensitive">The indicator of whether text indexes for this field are case-sensitive.</param> + /// <param name="diacriticSensitive">The indicator of whether text indexes for this field are diacritic sensitive.</param> + /// <param name="prefixOptions">The prefix options.</param> + /// <param name="substringOptions">The substring options.</param> + /// <param name="suffixOptions">The suffix options.</param> + public TextOptions( + bool caseSensitive, + bool diacriticSensitive, + Optional<PrefixOptions> prefixOptions = default, + Optional<SubstringOptions> substringOptions = default, + Optional<SuffixOptions> suffixOptions = default) + { + _caseSensitive = caseSensitive; + _diacriticSensitive = diacriticSensitive; + _prefixOptions = prefixOptions.WithDefault(null); + _substringOptions = substringOptions.WithDefault(null); + _suffixOptions = suffixOptions.WithDefault(null); + } + + /// <summary> + /// Gets whether text indexes for this field are case-sensitive. + /// </summary> + public bool CaseSensitive => _caseSensitive; + + /// <summary> + /// Gets whether text indexes for this field are diacritic sensitive. + /// </summary> + public bool DiacriticSensitive => _diacriticSensitive; + + /// <summary> + /// Gets the prefix options. + /// </summary> + public PrefixOptions PrefixOptions => _prefixOptions; + + /// <summary> + /// Gets the substring options. + /// </summary> + public SubstringOptions SubstringOptions => _substringOptions; + + /// <summary> + /// Gets the suffix options. + /// </summary> + public SuffixOptions SuffixOptions => _suffixOptions; + } + /// <summary> /// Encryption options for explicit encryption. /// </summary> @@ -104,6 +319,8 @@ private static string ConvertEnumAlgorithmToString(EncryptionAlgorithm encryptio EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random => "AEAD_AES_256_CBC_HMAC_SHA_512-Random", _ => encryptionAlgorithm.ToString(), }; + + private static readonly string[] ValidTextQueryTypes = ["prefixPreview", "substringPreview", "suffixPreview"]; #endregion // private fields @@ -112,6 +329,7 @@ private static string ConvertEnumAlgorithmToString(EncryptionAlgorithm encryptio private readonly long? _contentionFactor; private readonly Guid? _keyId; private readonly RangeOptions _rangeOptions; + private readonly TextOptions _textOptions; private readonly string _queryType; // constructors @@ -150,6 +368,42 @@ public EncryptOptions( EnsureThatOptionsAreValid(); } + /// <summary> + /// Initializes a new instance of the <see cref="EncryptOptions"/> class. + /// </summary> + /// <param name="algorithm">The encryption algorithm.</param> + /// <param name="textOptions">The text options.</param> + /// <param name="alternateKeyName">The alternate key name.</param> + /// <param name="keyId">The key Id.</param> + /// <param name="contentionFactor">The contention factor.</param> + /// <param name="queryType">The query type.</param> + public EncryptOptions( + string algorithm, + TextOptions textOptions, + Optional<string> alternateKeyName = default, + Optional<long?> contentionFactor = default, + Optional<Guid?> keyId = default, + Optional<string> queryType = default) + { + Ensure.IsNotNull(algorithm, nameof(algorithm)); + Ensure.IsNotNull(textOptions, nameof(textOptions)); + if (Enum.TryParse<EncryptionAlgorithm>(algorithm, out var @enum)) + { + _algorithm = ConvertEnumAlgorithmToString(@enum); + } + else + { + _algorithm = algorithm; + } + + _alternateKeyName = alternateKeyName.WithDefault(null); + _contentionFactor = contentionFactor.WithDefault(null); + _keyId = keyId.WithDefault(null); + _textOptions = textOptions; + _queryType = queryType.WithDefault(null); + EnsureThatOptionsAreValid(); + } + /// <summary> /// Initializes a new instance of the <see cref="EncryptOptions"/> class. /// </summary> @@ -176,6 +430,32 @@ public EncryptOptions( { } + /// <summary> + /// Initializes a new instance of the <see cref="EncryptOptions"/> class. + /// </summary> + /// <param name="algorithm">The encryption algorithm.</param> + /// <param name="textOptions">The text options.</param> + /// <param name="alternateKeyName">The alternate key name.</param> + /// <param name="keyId">The key Id.</param> + /// <param name="contentionFactor">The contention factor.</param> + /// <param name="queryType">The query type.</param> + public EncryptOptions( + EncryptionAlgorithm algorithm, + TextOptions textOptions, + Optional<string> alternateKeyName = default, + Optional<Guid?> keyId = default, + Optional<long?> contentionFactor = default, + Optional<string> queryType = default) + : this( + algorithm: ConvertEnumAlgorithmToString(algorithm), + textOptions, + alternateKeyName, + contentionFactor, + keyId, + queryType) + { + } + // public properties /// <summary> /// Gets the algorithm. @@ -215,6 +495,9 @@ public EncryptOptions( /// <value> /// The query type. /// </value> + /// <remarks> + /// Currently, we only support "equality", "range", "prefixPreview", "suffixPreview" or "substringPreview" queryTypes. + /// </remarks> public string QueryType => _queryType; /// <summary> @@ -230,6 +513,16 @@ public EncryptOptions( /// </remarks> public RangeOptions RangeOptions => _rangeOptions; + /// <summary> + /// Gets the text options. + /// </summary> + /// <remarks> + /// TextOptions specifies options for a Queryable Encryption field supporting "textPreview" queries. + /// TextOptions is currently unstable API and subject to backwards breaking changes. + /// TextOptions only applies when the encryption algorithm is "textPreview". + /// </remarks> + public TextOptions TextOptions => _textOptions; + /// <summary> /// Returns a new EncryptOptions instance with some settings changed. /// </summary> @@ -257,14 +550,62 @@ public EncryptOptions With( rangeOptions: rangeOptions.WithDefault(_rangeOptions)); } + /// <summary> + /// Returns a new EncryptOptions instance with some settings changed. + /// </summary> + /// <param name="textOptions">The text options.</param> + /// <param name="algorithm">The encryption algorithm.</param> + /// <param name="alternateKeyName">The alternate key name.</param> + /// <param name="keyId">The keyId.</param> + /// <param name="contentionFactor">The contention factor.</param> + /// <param name="queryType">The query type.</param> + /// <returns>A new EncryptOptions instance.</returns> + public EncryptOptions With( + TextOptions textOptions, + Optional<string> algorithm = default, + Optional<string> alternateKeyName = default, + Optional<Guid?> keyId = default, + Optional<long?> contentionFactor = default, + Optional<string> queryType = default) + { + return new EncryptOptions( + algorithm: algorithm.WithDefault(_algorithm), + alternateKeyName: alternateKeyName.WithDefault(_alternateKeyName), + contentionFactor: contentionFactor.WithDefault(_contentionFactor), + keyId: keyId.WithDefault(_keyId), + queryType: queryType.WithDefault(_queryType), + textOptions: textOptions); + } + // private methods private void EnsureThatOptionsAreValid() { Ensure.That(!(!_keyId.HasValue && _alternateKeyName == null), "Key Id and AlternateKeyName may not both be null."); Ensure.That(!(_keyId.HasValue && _alternateKeyName != null), "Key Id and AlternateKeyName may not both be set."); - Ensure.That(!(_contentionFactor.HasValue && (_algorithm != EncryptionAlgorithm.Indexed.ToString() && _algorithm != EncryptionAlgorithm.Range.ToString())), "ContentionFactor only applies for Indexed or Range algorithm."); - Ensure.That(!(_queryType != null && (_algorithm != EncryptionAlgorithm.Indexed.ToString() && _algorithm != EncryptionAlgorithm.Range.ToString())), "QueryType only applies for Indexed or Range algorithm."); + Ensure.That(!(_contentionFactor.HasValue && (_algorithm != EncryptionAlgorithm.Indexed.ToString() && _algorithm != EncryptionAlgorithm.Range.ToString() && _algorithm != EncryptionAlgorithm.TextPreview.ToString())), "ContentionFactor only applies for Indexed, Range, or TextPreview algorithm."); + Ensure.That(!(_queryType != null && (_algorithm != EncryptionAlgorithm.Indexed.ToString() && _algorithm != EncryptionAlgorithm.Range.ToString() && _algorithm != EncryptionAlgorithm.TextPreview.ToString())), "QueryType only applies for Indexed, Range, or TextPreview algorithm."); Ensure.That(!(_rangeOptions != null && _algorithm != EncryptionAlgorithm.Range.ToString()), "RangeOptions only applies for Range algorithm."); + Ensure.That(!(_textOptions != null && _algorithm != EncryptionAlgorithm.TextPreview.ToString()), "TextOptions only applies for TextPreview algorithm."); + + if (_algorithm == EncryptionAlgorithm.TextPreview.ToString() && _queryType != null) + { + Ensure.That( + ValidTextQueryTypes.Contains(_queryType), + $"QueryType '{_queryType}' is not valid for TextPreview algorithm. Use: {string.Join(", ", ValidTextQueryTypes)}."); + } + + if (_textOptions != null && _queryType != null) + { + Ensure.That( + !(_queryType == "prefixPreview" && _textOptions.PrefixOptions == null), + "PrefixOptions must be set when queryType is 'prefixPreview'"); + Ensure.That( + !(_queryType == "substringPreview" && _textOptions.SubstringOptions == null), + "SubstringOptions must be set when queryType is 'substringPreview'"); + Ensure.That( + !(_queryType == "suffixPreview" && _textOptions.SuffixOptions == null), + "SuffixOptions must be set when queryType is 'suffixPreview'"); + } } } } diff --git a/src/MongoDB.Driver.Encryption/EncryptionAlgorithm.cs b/src/MongoDB.Driver.Encryption/EncryptionAlgorithm.cs index 850a9d02ba2..25a9a50aa03 100644 --- a/src/MongoDB.Driver.Encryption/EncryptionAlgorithm.cs +++ b/src/MongoDB.Driver.Encryption/EncryptionAlgorithm.cs @@ -49,6 +49,20 @@ public enum EncryptionAlgorithm /// <summary> /// Range algorithm. /// </summary> - Range + /// <remarks> + /// To insert or query with a "Range" encrypted payload, use a MongoClient configured with AutoEncryptionOptions. + /// AutoEncryptionOptions.BypassQueryAnalysis may be true. AutoEncryptionOptions.BypassAutoEncryption must be false. + /// </remarks> + Range, + + /// <summary> + /// TextPreview algorithm. + /// </summary> + /// <remarks> + /// The TextPreview algorithm is in preview and should be used for experimental workloads only. + /// To insert or query with a "TextPreview" encrypted payload, use a MongoClient configured with AutoEncryptionOptions. + /// AutoEncryptionOptions.BypassQueryAnalysis may be true. AutoEncryptionOptions.BypassAutoEncryption must be false. + /// </remarks> + TextPreview } } diff --git a/src/MongoDB.Driver.Encryption/EncryptionOptionsExtensions.cs b/src/MongoDB.Driver.Encryption/EncryptionOptionsExtensions.cs new file mode 100644 index 00000000000..8e7d31866b9 --- /dev/null +++ b/src/MongoDB.Driver.Encryption/EncryptionOptionsExtensions.cs @@ -0,0 +1,63 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using MongoDB.Bson; + +namespace MongoDB.Driver.Encryption; + +internal static class EncryptionOptionsExtensions +{ + public static BsonDocument CreateDocument(this RangeOptions rangeOptions) => + new() + { + { "min", rangeOptions.Min, rangeOptions.Min != null }, + { "max", rangeOptions.Max, rangeOptions.Max != null }, + { "precision", rangeOptions.Precision, rangeOptions.Precision != null }, + { "sparsity", rangeOptions.Sparsity, rangeOptions.Sparsity != null }, + { "trimFactor", rangeOptions.TrimFactor, rangeOptions.TrimFactor != null } + }; + + public static BsonDocument CreateDocument(this TextOptions textOptions) => + new() + { + { "caseSensitive", textOptions.CaseSensitive }, + { "diacriticSensitive", textOptions.DiacriticSensitive }, + { + "prefix", () => new BsonDocument + { + { "strMaxQueryLength", textOptions.PrefixOptions.StrMaxQueryLength }, + { "strMinQueryLength", textOptions.PrefixOptions.StrMinQueryLength } + }, + textOptions.PrefixOptions != null + }, + { + "substring", () => new BsonDocument + { + { "strMaxLength", textOptions.SubstringOptions.StrMaxLength }, + { "strMaxQueryLength", textOptions.SubstringOptions.StrMaxQueryLength }, + { "strMinQueryLength", textOptions.SubstringOptions.StrMinQueryLength } + }, + textOptions.SubstringOptions != null + }, + { + "suffix", () => new BsonDocument + { + { "strMaxQueryLength", textOptions.SuffixOptions.StrMaxQueryLength }, + { "strMinQueryLength", textOptions.SuffixOptions.StrMinQueryLength } + }, + textOptions.SuffixOptions != null + } + }; +} \ No newline at end of file diff --git a/src/MongoDB.Driver.Encryption/ExplicitEncryptionLibMongoCryptController.cs b/src/MongoDB.Driver.Encryption/ExplicitEncryptionLibMongoCryptController.cs index 657a4cfa87b..cdb671f15e3 100644 --- a/src/MongoDB.Driver.Encryption/ExplicitEncryptionLibMongoCryptController.cs +++ b/src/MongoDB.Driver.Encryption/ExplicitEncryptionLibMongoCryptController.cs @@ -232,6 +232,7 @@ public BsonValue EncryptField( encryptOptions.Algorithm, wrappedValueBytes, ToBsonIfNotNull(encryptOptions.RangeOptions?.CreateDocument()), + ToBsonIfNotNull(encryptOptions.TextOptions?.CreateDocument()), isExpressionMode); using (context) @@ -267,6 +268,7 @@ public async Task<BsonValue> EncryptFieldAsync( encryptOptions.Algorithm, wrappedValueBytes, ToBsonIfNotNull(encryptOptions.RangeOptions?.CreateDocument()), + ToBsonIfNotNull(encryptOptions.TextOptions?.CreateDocument()), isExpressionMode); using (context) diff --git a/src/MongoDB.Driver.Encryption/LibmongocryptExtensionsMethods.cs b/src/MongoDB.Driver.Encryption/LibmongocryptExtensionsMethods.cs index f3681812cec..591e0f66574 100644 --- a/src/MongoDB.Driver.Encryption/LibmongocryptExtensionsMethods.cs +++ b/src/MongoDB.Driver.Encryption/LibmongocryptExtensionsMethods.cs @@ -15,25 +15,12 @@ using System.Linq; using System.Security.Cryptography.X509Certificates; -using MongoDB.Bson; using MongoDB.Driver.Core.Configuration; namespace MongoDB.Driver.Encryption { internal static class LibmongocryptExtensionsMethods { - public static BsonDocument CreateDocument(this RangeOptions rangeOptions) - { - return new BsonDocument - { - { "min", rangeOptions.Min, rangeOptions.Min != null }, - { "max", rangeOptions.Max, rangeOptions.Max != null }, - { "precision", rangeOptions.Precision, rangeOptions.Precision != null }, - { "sparsity", rangeOptions.Sparsity, rangeOptions.Sparsity != null }, - { "trimFactor", rangeOptions.TrimFactor, rangeOptions.TrimFactor != null } - }; - } - public static SslStreamSettings ToSslStreamSettings(this SslSettings sslSettings) { var clientCertificates = sslSettings.ClientCertificateCollection != null ? (sslSettings.ClientCertificateCollection).Cast<X509Certificate>() : Enumerable.Empty<X509Certificate>(); diff --git a/src/MongoDB.Driver.Encryption/Library.cs b/src/MongoDB.Driver.Encryption/Library.cs index 24f95781c92..a3fb1df2a67 100644 --- a/src/MongoDB.Driver.Encryption/Library.cs +++ b/src/MongoDB.Driver.Encryption/Library.cs @@ -147,6 +147,9 @@ static Library() _mongocrypt_ctx_setopt_algorithm_range = new Lazy<Delegates.mongocrypt_ctx_setopt_algorithm_range>( () => __loader.Value.GetFunction<Delegates.mongocrypt_ctx_setopt_algorithm_range>( ("mongocrypt_ctx_setopt_algorithm_range")), true); + _mongocrypt_ctx_setopt_algorithm_text = new Lazy<Delegates.mongocrypt_ctx_setopt_algorithm_text>( + () => __loader.Value.GetFunction<Delegates.mongocrypt_ctx_setopt_algorithm_text>( + ("mongocrypt_ctx_setopt_algorithm_text")), true); _mongocrypt_ctx_setopt_contention_factor = new Lazy<Delegates.mongocrypt_ctx_setopt_contention_factor>( () => __loader.Value.GetFunction<Delegates.mongocrypt_ctx_setopt_contention_factor>( ("mongocrypt_ctx_setopt_contention_factor")), true); @@ -236,8 +239,7 @@ public static string Version { get { - uint length; - IntPtr p = mongocrypt_version(out length); + IntPtr p = mongocrypt_version(out _); return Marshal.PtrToStringAnsi(p); } } @@ -301,6 +303,7 @@ public static string Version internal static Delegates.mongocrypt_ctx_setopt_key_alt_name mongocrypt_ctx_setopt_key_alt_name => _mongocrypt_ctx_setopt_key_alt_name.Value; internal static Delegates.mongocrypt_ctx_setopt_algorithm mongocrypt_ctx_setopt_algorithm => _mongocrypt_ctx_setopt_algorithm.Value; internal static Delegates.mongocrypt_ctx_setopt_algorithm_range mongocrypt_ctx_setopt_algorithm_range => _mongocrypt_ctx_setopt_algorithm_range.Value; + internal static Delegates.mongocrypt_ctx_setopt_algorithm_text mongocrypt_ctx_setopt_algorithm_text => _mongocrypt_ctx_setopt_algorithm_text.Value; internal static Delegates.mongocrypt_ctx_setopt_contention_factor mongocrypt_ctx_setopt_contention_factor => _mongocrypt_ctx_setopt_contention_factor.Value; internal static Delegates.mongocrypt_ctx_setopt_query_type mongocrypt_ctx_setopt_query_type => _mongocrypt_ctx_setopt_query_type.Value; internal static Delegates.mongocrypt_setopt_retry_kms mongocrypt_setopt_retry_kms => _mongocrypt_setopt_retry_kms.Value; @@ -394,6 +397,7 @@ public static string Version private static readonly Lazy<Delegates.mongocrypt_ctx_setopt_key_alt_name> _mongocrypt_ctx_setopt_key_alt_name; private static readonly Lazy<Delegates.mongocrypt_ctx_setopt_algorithm> _mongocrypt_ctx_setopt_algorithm; private static readonly Lazy<Delegates.mongocrypt_ctx_setopt_algorithm_range> _mongocrypt_ctx_setopt_algorithm_range; + private static readonly Lazy<Delegates.mongocrypt_ctx_setopt_algorithm_text> _mongocrypt_ctx_setopt_algorithm_text; private static readonly Lazy<Delegates.mongocrypt_ctx_setopt_contention_factor> _mongocrypt_ctx_setopt_contention_factor; private static readonly Lazy<Delegates.mongocrypt_ctx_setopt_query_type> _mongocrypt_ctx_setopt_query_type; @@ -655,16 +659,25 @@ public delegate bool /// </summary> [return: MarshalAs(UnmanagedType.I1)] public delegate bool mongocrypt_ctx_setopt_algorithm(ContextSafeHandle handle, [MarshalAs(UnmanagedType.LPStr)] string algorithm, int length); + /// <summary> /// bool mongocrypt_ctx_setopt_algorithm_range(mongocrypt_ctx_t* ctx, mongocrypt_binary_t* opts); /// </summary> [return: MarshalAs(UnmanagedType.I1)] public delegate bool mongocrypt_ctx_setopt_algorithm_range(ContextSafeHandle handle, BinarySafeHandle opts); + + /// <summary> + /// bool mongocrypt_ctx_setopt_algorithm_text(mongocrypt_ctx_t* ctx, mongocrypt_binary_t* opts); + /// </summary> + [return: MarshalAs(UnmanagedType.I1)] + public delegate bool mongocrypt_ctx_setopt_algorithm_text(ContextSafeHandle handle, BinarySafeHandle opts); + /// <summary> /// bool mongocrypt_ctx_setopt_contention_factor(mongocrypt_ctx_t* ctx, int64_t contention_factor); /// </summary> [return: MarshalAs(UnmanagedType.I1)] public delegate bool mongocrypt_ctx_setopt_contention_factor(ContextSafeHandle ctx, long contention_factor); + /// <summary> /// bool mongocrypt_ctx_setopt_query_type(mongocrypt_ctx_t* ctx, const char* query_type, int len) /// </summary> diff --git a/src/MongoDB.Driver.Encryption/MongoDB.Driver.Encryption.csproj b/src/MongoDB.Driver.Encryption/MongoDB.Driver.Encryption.csproj index 713187154e9..5c2a90e15bc 100644 --- a/src/MongoDB.Driver.Encryption/MongoDB.Driver.Encryption.csproj +++ b/src/MongoDB.Driver.Encryption/MongoDB.Driver.Encryption.csproj @@ -13,10 +13,15 @@ <NoWarn>CA1060;CA2101;CA1307;SYSLIB0022;SYSLIB0004</NoWarn> </PropertyGroup> + <PropertyGroup> + <LibMongoCryptVersion>r1.15</LibMongoCryptVersion> + <LibMongoCryptCommit>059d538126eb2a1cd90103147bc9b1be0d46de92</LibMongoCryptCommit> + </PropertyGroup> + <Target Name="DownloadNativeBinaries_MacOS" BeforeTargets="BeforeBuild" Condition="!Exists('$(MSBuildProjectDirectory)/runtimes/osx/native/libmongocrypt.dylib')"> <PropertyGroup> - <LibMongoCryptSourceUrl>https://mciuploads.s3.amazonaws.com/libmongocrypt-release/macos/r1.13/1cf03f1fdd8fa439d43b8548b546c00ce71d1bc1/libmongocrypt.tar.gz</LibMongoCryptSourceUrl> + <LibMongoCryptSourceUrl>https://mciuploads.s3.amazonaws.com/libmongocrypt-release/macos/$(LibMongoCryptVersion)/$(LibMongoCryptCommit)/libmongocrypt.tar.gz</LibMongoCryptSourceUrl> <LibMongoCryptSourcePath>lib/libmongocrypt.dylib</LibMongoCryptSourcePath> <LibMongoCryptPackagePath>runtimes/osx/native</LibMongoCryptPackagePath> </PropertyGroup> @@ -28,7 +33,7 @@ <Target Name="DownloadNativeBinaries_UbuntuX64" BeforeTargets="BeforeBuild" Condition="!Exists('$(MSBuildProjectDirectory)/runtimes/linux/native/x64/libmongocrypt.so')"> <PropertyGroup> - <LibMongoCryptSourceUrl>https://mciuploads.s3.amazonaws.com/libmongocrypt-release/ubuntu1804-64/r1.13/1cf03f1fdd8fa439d43b8548b546c00ce71d1bc1/libmongocrypt.tar.gz</LibMongoCryptSourceUrl> + <LibMongoCryptSourceUrl>https://mciuploads.s3.amazonaws.com/libmongocrypt-release/ubuntu1804-64/$(LibMongoCryptVersion)/$(LibMongoCryptCommit)/libmongocrypt.tar.gz</LibMongoCryptSourceUrl> <LibMongoCryptSourcePath>nocrypto/lib/libmongocrypt.so</LibMongoCryptSourcePath> <LibMongoCryptPackagePath>runtimes/linux/native/x64</LibMongoCryptPackagePath> </PropertyGroup> @@ -40,7 +45,7 @@ <Target Name="DownloadNativeBinaries_UbuntuARM64" BeforeTargets="BeforeBuild" Condition="!Exists('$(MSBuildProjectDirectory)/runtimes/linux/native/arm64/libmongocrypt.so')"> <PropertyGroup> - <LibMongoCryptSourceUrl>https://mciuploads.s3.amazonaws.com/libmongocrypt-release/ubuntu1804-arm64/r1.13/1cf03f1fdd8fa439d43b8548b546c00ce71d1bc1/libmongocrypt.tar.gz</LibMongoCryptSourceUrl> + <LibMongoCryptSourceUrl>https://mciuploads.s3.amazonaws.com/libmongocrypt-release/ubuntu1804-arm64/$(LibMongoCryptVersion)/$(LibMongoCryptCommit)/libmongocrypt.tar.gz</LibMongoCryptSourceUrl> <LibMongoCryptSourcePath>nocrypto/lib/libmongocrypt.so</LibMongoCryptSourcePath> <LibMongoCryptPackagePath>runtimes/linux/native/arm64</LibMongoCryptPackagePath> </PropertyGroup> @@ -52,7 +57,7 @@ <Target Name="DownloadNativeBinaries_Alpine" BeforeTargets="BeforeBuild" Condition="!Exists('$(MSBuildProjectDirectory)/runtimes/linux/native/alpine/libmongocrypt.so')"> <PropertyGroup> - <LibMongoCryptSourceUrl>https://mciuploads.s3.amazonaws.com/libmongocrypt-release/alpine-arm64-earthly/r1.13/1cf03f1fdd8fa439d43b8548b546c00ce71d1bc1/libmongocrypt.tar.gz</LibMongoCryptSourceUrl> + <LibMongoCryptSourceUrl>https://mciuploads.s3.amazonaws.com/libmongocrypt-release/alpine-arm64-earthly/$(LibMongoCryptVersion)/$(LibMongoCryptCommit)/libmongocrypt.tar.gz</LibMongoCryptSourceUrl> <LibMongoCryptSourcePath>nocrypto/lib/libmongocrypt.so</LibMongoCryptSourcePath> <LibMongoCryptPackagePath>runtimes/linux/native/alpine</LibMongoCryptPackagePath> </PropertyGroup> @@ -64,7 +69,7 @@ <Target Name="DownloadNativeBinaries_Windows" BeforeTargets="BeforeBuild" Condition="!Exists('$(MSBuildProjectDirectory)/runtimes/win/native/mongocrypt.dll')"> <PropertyGroup> - <LibMongoCryptSourceUrl>https://mciuploads.s3.amazonaws.com/libmongocrypt-release/windows-test/r1.13/1cf03f1fdd8fa439d43b8548b546c00ce71d1bc1/libmongocrypt.tar.gz</LibMongoCryptSourceUrl> + <LibMongoCryptSourceUrl>https://mciuploads.s3.amazonaws.com/libmongocrypt-release/windows-test/$(LibMongoCryptVersion)/$(LibMongoCryptCommit)/libmongocrypt.tar.gz</LibMongoCryptSourceUrl> <LibMongoCryptSourcePath>bin/mongocrypt.dll</LibMongoCryptSourcePath> <LibMongoCryptPackagePath>runtimes/win/native</LibMongoCryptPackagePath> </PropertyGroup> diff --git a/src/MongoDB.Driver/AbortTransactionOptions.cs b/src/MongoDB.Driver/AbortTransactionOptions.cs new file mode 100644 index 00000000000..649e7be1413 --- /dev/null +++ b/src/MongoDB.Driver/AbortTransactionOptions.cs @@ -0,0 +1,31 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using MongoDB.Driver.Core.Misc; + +namespace MongoDB.Driver +{ + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal sealed class AbortTransactionOptions + { + public AbortTransactionOptions(TimeSpan? timeout) + { + Timeout = Ensure.IsNullOrValidTimeout(timeout, nameof(timeout)); + } + + public TimeSpan? Timeout { get; } + } +} diff --git a/src/MongoDB.Driver/AggregateHelper.cs b/src/MongoDB.Driver/AggregateHelper.cs new file mode 100644 index 00000000000..c61628b5330 --- /dev/null +++ b/src/MongoDB.Driver/AggregateHelper.cs @@ -0,0 +1,96 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using System.Linq; +using MongoDB.Bson; + +namespace MongoDB.Driver +{ + internal static class AggregateHelper + { + public static RenderedPipelineDefinition<TResult> RenderAggregatePipeline<TDocument, TResult>(PipelineDefinition<TDocument, TResult> pipeline, RenderArgs<TDocument> renderArgs, out bool isAggregateToCollection) + { + var renderedPipeline = pipeline.Render(renderArgs); + + var lastStage = renderedPipeline.Documents.LastOrDefault(); + var lastStageName = lastStage?.GetElement(0).Name; + isAggregateToCollection = lastStageName == "$out" || lastStageName == "$merge"; + + return renderedPipeline; + } + + public static CollectionNamespace GetOutCollection(BsonDocument outStage, DatabaseNamespace defaultDatabaseNamespace) + { + var stageName = outStage.GetElement(0).Name; + switch (stageName) + { + case "$out": + { + var outValue = outStage[0]; + DatabaseNamespace outputDatabaseNamespace; + string outputCollectionName; + if (outValue.IsString) + { + outputDatabaseNamespace = defaultDatabaseNamespace; + outputCollectionName = outValue.AsString; + } + else + { + outputDatabaseNamespace = new DatabaseNamespace(outValue["db"].AsString); + outputCollectionName = outValue["coll"].AsString; + } + return new CollectionNamespace(outputDatabaseNamespace, outputCollectionName); + } + case "$merge": + { + var mergeArguments = outStage[0]; + DatabaseNamespace outputDatabaseNamespace; + string outputCollectionName; + if (mergeArguments.IsString) + { + outputDatabaseNamespace = defaultDatabaseNamespace; + outputCollectionName = mergeArguments.AsString; + } + else + { + var into = mergeArguments.AsBsonDocument["into"]; + if (into.IsString) + { + outputDatabaseNamespace = defaultDatabaseNamespace; + outputCollectionName = into.AsString; + } + else + { + if (into.AsBsonDocument.Contains("db")) + { + outputDatabaseNamespace = new DatabaseNamespace(into["db"].AsString); + } + else + { + outputDatabaseNamespace = defaultDatabaseNamespace; + } + outputCollectionName = into["coll"].AsString; + } + } + return new CollectionNamespace(outputDatabaseNamespace, outputCollectionName); + } + default: + throw new ArgumentException($"Unexpected stage name: {stageName}."); + } + } + } +} + diff --git a/src/MongoDB.Driver/AggregateOptions.cs b/src/MongoDB.Driver/AggregateOptions.cs index cafe47e785b..55134aa07d4 100644 --- a/src/MongoDB.Driver/AggregateOptions.cs +++ b/src/MongoDB.Driver/AggregateOptions.cs @@ -1,4 +1,4 @@ -/* Copyright 2015-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -34,6 +34,7 @@ public class AggregateOptions private BsonDocument _let; private TimeSpan? _maxAwaitTime; private TimeSpan? _maxTime; + private TimeSpan? _timeout; private ExpressionTranslationOptions _translationOptions; private bool? _useCursor; @@ -127,6 +128,16 @@ public TimeSpan? MaxTime set { _maxTime = Ensure.IsNullOrInfiniteOrGreaterThanOrEqualToZero(value, nameof(value)); } } + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } + /// <summary> /// Gets or sets the translation options. /// </summary> diff --git a/src/MongoDB.Driver/Authentication/AuthenticationHelper.cs b/src/MongoDB.Driver/Authentication/AuthenticationHelper.cs index 38bda6e6432..679897eb45e 100644 --- a/src/MongoDB.Driver/Authentication/AuthenticationHelper.cs +++ b/src/MongoDB.Driver/Authentication/AuthenticationHelper.cs @@ -17,7 +17,6 @@ using System.Runtime.InteropServices; using System.Security; using System.Security.Cryptography; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.IO; @@ -28,8 +27,9 @@ namespace MongoDB.Driver.Authentication { internal static class AuthenticationHelper { - public static void Authenticate(IConnection connection, ConnectionDescription description, IAuthenticator authenticator, CancellationToken cancellationToken) + public static void Authenticate(OperationContext operationContext, IConnection connection, ConnectionDescription description, IAuthenticator authenticator) { + Ensure.IsNotNull(operationContext, nameof(operationContext)); Ensure.IsNotNull(connection, nameof(connection)); Ensure.IsNotNull(description, nameof(description)); @@ -41,12 +41,13 @@ public static void Authenticate(IConnection connection, ConnectionDescription de // authentication is currently broken on arbiters if (!description.HelloResult.IsArbiter) { - authenticator.Authenticate(connection, description, cancellationToken); + authenticator.Authenticate(operationContext, connection, description); } } - public static async Task AuthenticateAsync(IConnection connection, ConnectionDescription description, IAuthenticator authenticator, CancellationToken cancellationToken) + public static async Task AuthenticateAsync(OperationContext operationContext, IConnection connection, ConnectionDescription description, IAuthenticator authenticator) { + Ensure.IsNotNull(operationContext, nameof(operationContext)); Ensure.IsNotNull(connection, nameof(connection)); Ensure.IsNotNull(description, nameof(description)); @@ -58,7 +59,7 @@ public static async Task AuthenticateAsync(IConnection connection, ConnectionDes // authentication is currently broken on arbiters if (!description.HelloResult.IsArbiter) { - await authenticator.AuthenticateAsync(connection, description, cancellationToken).ConfigureAwait(false); + await authenticator.AuthenticateAsync(operationContext, connection, description).ConfigureAwait(false); } } @@ -68,30 +69,28 @@ public static string MongoPasswordDigest(string username, SecureString password) { return MongoPasswordDigest(username, new byte[0]); } - else + + var passwordIntPtr = Marshal.SecureStringToGlobalAllocUnicode(password); + try { - var passwordIntPtr = Marshal.SecureStringToGlobalAllocUnicode(password); + var passwordChars = new char[password.Length]; + var passwordCharsHandle = GCHandle.Alloc(passwordChars, GCHandleType.Pinned); try { - var passwordChars = new char[password.Length]; - var passwordCharsHandle = GCHandle.Alloc(passwordChars, GCHandleType.Pinned); - try - { - Marshal.Copy(passwordIntPtr, passwordChars, 0, password.Length); + Marshal.Copy(passwordIntPtr, passwordChars, 0, password.Length); - return MongoPasswordDigest(username, passwordChars); - } - finally - { - Array.Clear(passwordChars, 0, passwordChars.Length); - passwordCharsHandle.Free(); - } + return MongoPasswordDigest(username, passwordChars); } finally { - Marshal.ZeroFreeGlobalAllocUnicode(passwordIntPtr); + Array.Clear(passwordChars, 0, passwordChars.Length); + passwordCharsHandle.Free(); } } + finally + { + Marshal.ZeroFreeGlobalAllocUnicode(passwordIntPtr); + } } private static string MongoPasswordDigest(string username, char[] passwordChars) diff --git a/src/MongoDB.Driver/Authentication/DefaultAuthenticator.cs b/src/MongoDB.Driver/Authentication/DefaultAuthenticator.cs index c16032e956e..718474efbc8 100644 --- a/src/MongoDB.Driver/Authentication/DefaultAuthenticator.cs +++ b/src/MongoDB.Driver/Authentication/DefaultAuthenticator.cs @@ -17,7 +17,6 @@ using System.Collections.Generic; using System.Linq; using System.Net; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Driver.Authentication.ScramSha; @@ -54,7 +53,7 @@ internal DefaultAuthenticator( public string Name => "DEFAULT"; - public void Authenticate(IConnection connection, ConnectionDescription description, CancellationToken cancellationToken) + public void Authenticate(OperationContext operationContext, IConnection connection, ConnectionDescription description) { Ensure.IsNotNull(connection, nameof(connection)); Ensure.IsNotNull(description, nameof(description)); @@ -65,9 +64,9 @@ public void Authenticate(IConnection connection, ConnectionDescription descripti if (!description.HelloResult.HasSaslSupportedMechs && Feature.ScramSha256Authentication.IsSupported(description.MaxWireVersion)) { - var command = CustomizeInitialHelloCommand(HelloHelper.CreateCommand(_serverApi, loadBalanced: connection.Settings.LoadBalanced), cancellationToken); + var command = CustomizeInitialHelloCommand(operationContext, HelloHelper.CreateCommand(_serverApi, loadBalanced: connection.Settings.LoadBalanced)); var helloProtocol = HelloHelper.CreateProtocol(command, _serverApi); - var helloResult = HelloHelper.GetResult(connection, helloProtocol, cancellationToken); + var helloResult = HelloHelper.GetResult(operationContext, connection, helloProtocol); var mergedHelloResult = new HelloResult(description.HelloResult.Wrapped.Merge(helloResult.Wrapped)); description = new ConnectionDescription( description.ConnectionId, @@ -75,10 +74,10 @@ public void Authenticate(IConnection connection, ConnectionDescription descripti } var authenticator = GetOrCreateAuthenticator(connection, description); - authenticator.Authenticate(connection, description, cancellationToken); + authenticator.Authenticate(operationContext, connection, description); } - public async Task AuthenticateAsync(IConnection connection, ConnectionDescription description, CancellationToken cancellationToken) + public async Task AuthenticateAsync(OperationContext operationContext, IConnection connection, ConnectionDescription description) { Ensure.IsNotNull(connection, nameof(connection)); Ensure.IsNotNull(description, nameof(description)); @@ -89,9 +88,9 @@ public async Task AuthenticateAsync(IConnection connection, ConnectionDescriptio if (!description.HelloResult.HasSaslSupportedMechs && Feature.ScramSha256Authentication.IsSupported(description.MaxWireVersion)) { - var command = CustomizeInitialHelloCommand(HelloHelper.CreateCommand(_serverApi, loadBalanced: connection.Settings.LoadBalanced), cancellationToken); + var command = CustomizeInitialHelloCommand(operationContext, HelloHelper.CreateCommand(_serverApi, loadBalanced: connection.Settings.LoadBalanced)); var helloProtocol = HelloHelper.CreateProtocol(command, _serverApi); - var helloResult = await HelloHelper.GetResultAsync(connection, helloProtocol, cancellationToken).ConfigureAwait(false); + var helloResult = await HelloHelper.GetResultAsync(operationContext, connection, helloProtocol).ConfigureAwait(false); var mergedHelloResult = new HelloResult(description.HelloResult.Wrapped.Merge(helloResult.Wrapped)); description = new ConnectionDescription( description.ConnectionId, @@ -99,10 +98,10 @@ public async Task AuthenticateAsync(IConnection connection, ConnectionDescriptio } var authenticator = GetOrCreateAuthenticator(connection, description); - await authenticator.AuthenticateAsync(connection, description, cancellationToken).ConfigureAwait(false); + await authenticator.AuthenticateAsync(operationContext, connection, description).ConfigureAwait(false); } - public BsonDocument CustomizeInitialHelloCommand(BsonDocument helloCommand, CancellationToken cancellationToken) + public BsonDocument CustomizeInitialHelloCommand(OperationContext operationContext, BsonDocument helloCommand) { var saslSupportedMechs = CreateSaslSupportedMechsRequest(_identity); helloCommand = helloCommand.Merge(saslSupportedMechs); @@ -116,7 +115,7 @@ public BsonDocument CustomizeInitialHelloCommand(BsonDocument helloCommand, Canc out var authenticator)) { _speculativeAuthenticator = authenticator; - return _speculativeAuthenticator.CustomizeInitialHelloCommand(helloCommand, cancellationToken); + return _speculativeAuthenticator.CustomizeInitialHelloCommand(operationContext, helloCommand); } return helloCommand; diff --git a/src/MongoDB.Driver/Authentication/Gssapi/Sspi/EncryptQualityOfProtection.cs b/src/MongoDB.Driver/Authentication/Gssapi/Sspi/EncryptQualityOfProtection.cs index 53a1f826180..135b039cc85 100644 --- a/src/MongoDB.Driver/Authentication/Gssapi/Sspi/EncryptQualityOfProtection.cs +++ b/src/MongoDB.Driver/Authentication/Gssapi/Sspi/EncryptQualityOfProtection.cs @@ -19,7 +19,7 @@ namespace MongoDB.Driver.Authentication.Gssapi.Sspi /// Flags for EncryptMessage. /// </summary> /// <remarks> - /// See the fQOP parameter at + /// See the fQOP parameter at /// http://msdn.microsoft.com/en-us/library/windows/desktop/aa375378(v=vs.85).aspx. /// </remarks> internal enum EncryptQualityOfProtection : uint diff --git a/src/MongoDB.Driver/Authentication/IAuthenticator.cs b/src/MongoDB.Driver/Authentication/IAuthenticator.cs index 3b1cabd9ffc..658bef92f12 100644 --- a/src/MongoDB.Driver/Authentication/IAuthenticator.cs +++ b/src/MongoDB.Driver/Authentication/IAuthenticator.cs @@ -13,7 +13,6 @@ * limitations under the License. */ -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Driver.Core.Connections; @@ -24,10 +23,10 @@ internal interface IAuthenticator { string Name { get; } - void Authenticate(IConnection connection, ConnectionDescription description, CancellationToken cancellationToken); + void Authenticate(OperationContext operationContext, IConnection connection, ConnectionDescription description); - Task AuthenticateAsync(IConnection connection, ConnectionDescription description, CancellationToken cancellationToken); + Task AuthenticateAsync(OperationContext operationContext, IConnection connection, ConnectionDescription description); - BsonDocument CustomizeInitialHelloCommand(BsonDocument helloCommand, CancellationToken cancellationToken); + BsonDocument CustomizeInitialHelloCommand(OperationContext operationContext, BsonDocument helloCommand); } } diff --git a/src/MongoDB.Driver/Authentication/MongoDBX509Authenticator.cs b/src/MongoDB.Driver/Authentication/MongoDBX509Authenticator.cs index 07612031c97..68d3379bb4b 100644 --- a/src/MongoDB.Driver/Authentication/MongoDBX509Authenticator.cs +++ b/src/MongoDB.Driver/Authentication/MongoDBX509Authenticator.cs @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -45,7 +44,7 @@ public string Name get { return MechanismName; } } - public void Authenticate(IConnection connection, ConnectionDescription description, CancellationToken cancellationToken) + public void Authenticate(OperationContext operationContext, IConnection connection, ConnectionDescription description) { Ensure.IsNotNull(connection, nameof(connection)); Ensure.IsNotNull(description, nameof(description)); @@ -58,7 +57,7 @@ public void Authenticate(IConnection connection, ConnectionDescription descripti try { var protocol = CreateAuthenticateProtocol(); - protocol.Execute(connection, cancellationToken); + protocol.Execute(operationContext, connection); } catch (MongoCommandException ex) { @@ -66,7 +65,7 @@ public void Authenticate(IConnection connection, ConnectionDescription descripti } } - public async Task AuthenticateAsync(IConnection connection, ConnectionDescription description, CancellationToken cancellationToken) + public async Task AuthenticateAsync(OperationContext operationContext, IConnection connection, ConnectionDescription description) { Ensure.IsNotNull(connection, nameof(connection)); Ensure.IsNotNull(description, nameof(description)); @@ -79,7 +78,7 @@ public async Task AuthenticateAsync(IConnection connection, ConnectionDescriptio try { var protocol = CreateAuthenticateProtocol(); - await protocol.ExecuteAsync(connection, cancellationToken).ConfigureAwait(false); + await protocol.ExecuteAsync(operationContext, connection).ConfigureAwait(false); } catch (MongoCommandException ex) { @@ -87,7 +86,7 @@ public async Task AuthenticateAsync(IConnection connection, ConnectionDescriptio } } - public BsonDocument CustomizeInitialHelloCommand(BsonDocument helloCommand, CancellationToken cancellationToken) + public BsonDocument CustomizeInitialHelloCommand(OperationContext operationContext, BsonDocument helloCommand) { helloCommand.Add("speculativeAuthenticate", CreateAuthenticateCommand()); return helloCommand; diff --git a/src/MongoDB.Driver/Authentication/SaslAuthenticator.cs b/src/MongoDB.Driver/Authentication/SaslAuthenticator.cs index fddb5953b60..96209fd8d9b 100644 --- a/src/MongoDB.Driver/Authentication/SaslAuthenticator.cs +++ b/src/MongoDB.Driver/Authentication/SaslAuthenticator.cs @@ -16,7 +16,6 @@ using System; using System.Collections.Generic; using System.Net; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -72,7 +71,7 @@ internal SaslAuthenticator(ISaslMechanism mechanism, ServerApi serverApi) public string Name => Mechanism.Name; - public void Authenticate(IConnection connection, ConnectionDescription description, CancellationToken cancellationToken) + public void Authenticate(OperationContext operationContext, IConnection connection, ConnectionDescription description) { Ensure.IsNotNull(connection, nameof(connection)); Ensure.IsNotNull(description, nameof(description)); @@ -91,7 +90,10 @@ public void Authenticate(IConnection connection, ConnectionDescription descripti while (currentStep != null) { - var executionResult = currentStep.Execute(conversation, result?["payload"]?.AsByteArray, cancellationToken); + operationContext.ThrowIfTimedOutOrCanceled(); +#pragma warning disable CS0618 // Type or member is obsolete + var executionResult = currentStep.Execute(conversation, result?["payload"]?.AsByteArray, operationContext.CombinedCancellationToken); +#pragma warning restore CS0618 // Type or member is obsolete if (executionResult.BytesToSendToServer == null) { currentStep = executionResult.NextStep; @@ -109,7 +111,7 @@ public void Authenticate(IConnection connection, ConnectionDescription descripti try { var protocol = CreateCommandProtocol(command); - result = protocol.Execute(connection, cancellationToken); + result = protocol.Execute(operationContext, connection); conversationId ??= result?.GetValue("conversationId").AsInt32; } catch (MongoException ex) @@ -135,7 +137,7 @@ public void Authenticate(IConnection connection, ConnectionDescription descripti } } - public async Task AuthenticateAsync(IConnection connection, ConnectionDescription description, CancellationToken cancellationToken) + public async Task AuthenticateAsync(OperationContext operationContext, IConnection connection, ConnectionDescription description) { Ensure.IsNotNull(connection, nameof(connection)); Ensure.IsNotNull(description, nameof(description)); @@ -154,7 +156,10 @@ public async Task AuthenticateAsync(IConnection connection, ConnectionDescriptio while (currentStep != null) { - var executionResult = await currentStep.ExecuteAsync(conversation, result?["payload"]?.AsByteArray, cancellationToken).ConfigureAwait(false); + operationContext.ThrowIfTimedOutOrCanceled(); +#pragma warning disable CS0618 // Type or member is obsolete + var executionResult = await currentStep.ExecuteAsync(conversation, result?["payload"]?.AsByteArray, operationContext.CombinedCancellationToken).ConfigureAwait(false); +#pragma warning restore CS0618 // Type or member is obsolete if (executionResult.BytesToSendToServer == null) { currentStep = executionResult.NextStep; @@ -172,7 +177,7 @@ public async Task AuthenticateAsync(IConnection connection, ConnectionDescriptio try { var protocol = CreateCommandProtocol(command); - result = await protocol.ExecuteAsync(connection, cancellationToken).ConfigureAwait(false); + result = await protocol.ExecuteAsync(operationContext, connection).ConfigureAwait(false); conversationId ??= result?.GetValue("conversationId").AsInt32; } catch (MongoException ex) @@ -198,12 +203,14 @@ public async Task AuthenticateAsync(IConnection connection, ConnectionDescriptio } } - public BsonDocument CustomizeInitialHelloCommand(BsonDocument helloCommand, CancellationToken cancellationToken) + public BsonDocument CustomizeInitialHelloCommand(OperationContext operationContext, BsonDocument helloCommand) { var speculativeStep = Mechanism.CreateSpeculativeAuthenticationStep(); if (speculativeStep != null) { - (var bytesToSend, _speculativeContinueStep) = speculativeStep.Execute(null, null, cancellationToken); +#pragma warning disable CS0618 // Type or member is obsolete + (var bytesToSend, _speculativeContinueStep) = speculativeStep.Execute(null, null, operationContext.CombinedCancellationToken); +#pragma warning restore CS0618 // Type or member is obsolete var firstCommand = CreateStartCommand(bytesToSend); firstCommand.Add("db", Mechanism.DatabaseName); helloCommand.Add("speculativeAuthenticate", firstCommand); diff --git a/src/MongoDB.Driver/Authentication/SaslMapParser.cs b/src/MongoDB.Driver/Authentication/SaslMapParser.cs index f4050696fec..dcc5b5480d9 100644 --- a/src/MongoDB.Driver/Authentication/SaslMapParser.cs +++ b/src/MongoDB.Driver/Authentication/SaslMapParser.cs @@ -24,7 +24,7 @@ namespace MongoDB.Driver.Authentication /// "SCRAM is a SASL mechanism whose client response and server challenge /// messages are text-based messages containing one or more attribute- /// value pairs separated by commas. Each attribute has a one-letter - /// name." + /// name." /// </summary> internal static class SaslMapParser { diff --git a/src/MongoDB.Driver/Authentication/SaslPrepHelper.cs b/src/MongoDB.Driver/Authentication/SaslPrepHelper.cs index ae9793f6859..dc14a565a45 100644 --- a/src/MongoDB.Driver/Authentication/SaslPrepHelper.cs +++ b/src/MongoDB.Driver/Authentication/SaslPrepHelper.cs @@ -141,7 +141,7 @@ private static string SaslPrep(string str, bool allowUnassigned) /// <summary> /// Return true if the given <paramref name="ch"/> is an ASCII control character as defined by - /// <a href="https://tools.ietf.org/html/rfc3454#appendix-C.2.1">RFC 3454, Appendix C.2.1</a>. + /// <a href="https://tools.ietf.org/html/rfc3454#appendix-C.2.1">RFC 3454, Appendix C.2.1</a>. /// </summary> /// <param name="ch">The character.</param> /// <returns>Whether the given character is an ASCII control character.</returns> @@ -187,7 +187,7 @@ private static int CharCount(int codepoint) /// <summary> /// Return true if the given <paramref name="codepoint"/> is inappropriate for canonical representation - /// characters as defined by <a href="https://tools.ietf.org/html/rfc3454#appendix-C.7">RFC 3454, Appendix C.7</a>. + /// characters as defined by <a href="https://tools.ietf.org/html/rfc3454#appendix-C.7">RFC 3454, Appendix C.7</a>. /// </summary> /// <param name="codepoint">The Unicode character's codepoint.</param> /// <returns>True if the codepoint is inappropriate for canonical.</returns> @@ -647,7 +647,7 @@ private static bool IsSurrogateCodepoint(int codepoint) /// <summary> /// Return true if the given <paramref name="ch"/> is a "commonly mapped to nothing" character as defined by - /// <a href="https://tools.ietf.org/html/rfc3454#appendix-B.1">RFC 3454, Appendix B.1</a>. + /// <a href="https://tools.ietf.org/html/rfc3454#appendix-B.1">RFC 3454, Appendix B.1</a>. /// </summary> /// <param name="ch">The character.</param> /// <returns>Whether the given character is a "commonly mapped to nothing" character.</returns> @@ -669,7 +669,7 @@ private static bool MappedToNothing(char ch) /// <summary> /// Return true if the given <paramref name="codepoint"/> is a non-ASCII control character as defined by - /// <a href="https://tools.ietf.org/html/rfc3454#appendix-C.2.2">RFC 3454, Appendix C.2.2</a>. + /// <a href="https://tools.ietf.org/html/rfc3454#appendix-C.2.2">RFC 3454, Appendix C.2.2</a>. /// </summary> /// <param name="codepoint">The Unicode character's codepoint.</param> /// <returns>Whether the given character is a non-ASCII control character.</returns> @@ -695,7 +695,7 @@ private static bool NonAsciiControl(int codepoint) /// <summary> /// Return true if the given <paramref name="ch"/> is a non-ASCII space character as defined by - /// <a href="https://tools.ietf.org/html/rfc3454#appendix-C.1.2">RFC 3454, Appendix C.1.2</a>. + /// <a href="https://tools.ietf.org/html/rfc3454#appendix-C.1.2">RFC 3454, Appendix C.1.2</a>. /// </summary> /// <param name="ch">The character.</param> /// <returns>Whether the given character is a non-ASCII space character.</returns> @@ -752,7 +752,7 @@ private static bool PrivateUse(int codepoint) /// <summary> /// Return true if the given <paramref name="codepoint"/> is a prohibited character as defined by - ///<a href="https://tools.ietf.org/html/rfc4013#section-2.3">RFC 4013, Section 2.3</a>. + ///<a href="https://tools.ietf.org/html/rfc4013#section-2.3">RFC 4013, Section 2.3</a>. /// </summary> /// <param name="codepoint">The Unicode character's codepoint.</param> /// <returns>Whether the codepoint is a prohibited character.</returns> diff --git a/src/MongoDB.Driver/BulkWriteInsertOneResult.cs b/src/MongoDB.Driver/BulkWriteInsertOneResult.cs index ca684cf9521..290b65462ed 100644 --- a/src/MongoDB.Driver/BulkWriteInsertOneResult.cs +++ b/src/MongoDB.Driver/BulkWriteInsertOneResult.cs @@ -13,6 +13,7 @@ * limitations under the License. */ +using System; using MongoDB.Bson; namespace MongoDB.Driver @@ -25,6 +26,19 @@ public class BulkWriteInsertOneResult /// <summary> /// The id of the inserted document. /// </summary> - public BsonValue InsertedId { get; init; } + [Obsolete("InsertedId is deprecated and will be removed in future versions. Use DocumentId instead.")] + public BsonValue InsertedId + { + get => BsonValue.Create(DocumentId); + init + { + DocumentId = value; + } + } + + /// <summary> + /// The id of the inserted document. + /// </summary> + public object DocumentId { get; init; } } } diff --git a/src/MongoDB.Driver/BulkWriteOptions.cs b/src/MongoDB.Driver/BulkWriteOptions.cs index 12b75cbb018..7cf28f52fbb 100644 --- a/src/MongoDB.Driver/BulkWriteOptions.cs +++ b/src/MongoDB.Driver/BulkWriteOptions.cs @@ -13,7 +13,9 @@ * limitations under the License. */ +using System; using MongoDB.Bson; +using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver { @@ -27,6 +29,7 @@ public sealed class BulkWriteOptions private BsonValue _comment; private bool _isOrdered; private BsonDocument _let; + private TimeSpan? _timeout; // constructors /// <summary> @@ -73,5 +76,15 @@ public BsonDocument Let get { return _let; } set { _let = value; } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } } diff --git a/src/MongoDB.Driver/ChangeStreamOptions.cs b/src/MongoDB.Driver/ChangeStreamOptions.cs index c4da3830611..044ee09c3e7 100644 --- a/src/MongoDB.Driver/ChangeStreamOptions.cs +++ b/src/MongoDB.Driver/ChangeStreamOptions.cs @@ -1,4 +1,4 @@ -/* Copyright 2017-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,6 +35,7 @@ public class ChangeStreamOptions private bool? _showExpandedEvents; private BsonDocument _startAfter; private BsonTimestamp _startAtOperationTime; + private TimeSpan? _timeout; // public properties /// <summary> @@ -166,5 +167,15 @@ public BsonTimestamp StartAtOperationTime get { return _startAtOperationTime; } set { _startAtOperationTime = value; } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } } diff --git a/src/MongoDB.Driver/ClientBulkWriteOptions.cs b/src/MongoDB.Driver/ClientBulkWriteOptions.cs index 9ba9c366c6b..68faf9984cf 100644 --- a/src/MongoDB.Driver/ClientBulkWriteOptions.cs +++ b/src/MongoDB.Driver/ClientBulkWriteOptions.cs @@ -13,7 +13,9 @@ * limitations under the License. */ +using System; using MongoDB.Bson; +using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver { @@ -22,6 +24,8 @@ namespace MongoDB.Driver /// </summary> public sealed class ClientBulkWriteOptions { + private TimeSpan? _timeout; + /// <summary> /// Initializes a new instance of the <see cref="BulkWriteOptions"/> class. /// </summary> @@ -75,6 +79,16 @@ public ClientBulkWriteOptions( /// </summary> public BsonDocument Let { get; set; } + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } + /// <summary> /// Whether detailed results for each successful operation should be included in the returned results. /// </summary> diff --git a/src/MongoDB.Driver/ClientSessionHandle.cs b/src/MongoDB.Driver/ClientSessionHandle.cs index 2d606fdeb1b..144e6a94991 100644 --- a/src/MongoDB.Driver/ClientSessionHandle.cs +++ b/src/MongoDB.Driver/ClientSessionHandle.cs @@ -1,4 +1,4 @@ -/* Copyright 2017-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ namespace MongoDB.Driver /// A client session handle. /// </summary> /// <seealso cref="MongoDB.Driver.IClientSessionHandle" /> - internal sealed class ClientSessionHandle : IClientSessionHandle + internal sealed class ClientSessionHandle : IClientSessionHandle, IClientSessionInternal { // private fields private readonly IMongoClient _client; @@ -94,16 +94,20 @@ public IServerSession ServerSession // public methods /// <inheritdoc /> - public void AbortTransaction(CancellationToken cancellationToken = default(CancellationToken)) - { - _coreSession.AbortTransaction(cancellationToken); - } + public void AbortTransaction(CancellationToken cancellationToken = default) + => _coreSession.AbortTransaction(cancellationToken); + + // TODO: CSOT: Make it public when CSOT will be ready for GA and add default value to cancellationToken parameter. + void IClientSessionInternal.AbortTransaction(AbortTransactionOptions options, CancellationToken cancellationToken) + => _coreSession.AbortTransaction(options, cancellationToken); /// <inheritdoc /> - public Task AbortTransactionAsync(CancellationToken cancellationToken = default(CancellationToken)) - { - return _coreSession.AbortTransactionAsync(cancellationToken); - } + public Task AbortTransactionAsync(CancellationToken cancellationToken = default) + => _coreSession.AbortTransactionAsync(cancellationToken); + + // TODO: CSOT: Make it public when CSOT will be ready for GA and add default value to cancellationToken parameter. + Task IClientSessionInternal.AbortTransactionAsync(AbortTransactionOptions options, CancellationToken cancellationToken) + => _coreSession.AbortTransactionAsync(options, cancellationToken); /// <inheritdoc /> public void AdvanceClusterTime(BsonDocument newClusterTime) @@ -118,16 +122,20 @@ public void AdvanceOperationTime(BsonTimestamp newOperationTime) } /// <inheritdoc /> - public void CommitTransaction(CancellationToken cancellationToken = default(CancellationToken)) - { - _coreSession.CommitTransaction(cancellationToken); - } + public void CommitTransaction(CancellationToken cancellationToken = default) + => _coreSession.CommitTransaction(cancellationToken); + + // TODO: CSOT: Make it public when CSOT will be ready for GA and add default value to cancellationToken parameter. + void IClientSessionInternal.CommitTransaction(CommitTransactionOptions options, CancellationToken cancellationToken) + => _coreSession.CommitTransaction(options, cancellationToken); /// <inheritdoc /> - public Task CommitTransactionAsync(CancellationToken cancellationToken = default(CancellationToken)) - { - return _coreSession.CommitTransactionAsync(cancellationToken); - } + public Task CommitTransactionAsync(CancellationToken cancellationToken = default) + => _coreSession.CommitTransactionAsync(cancellationToken); + + // TODO: CSOT: Make it public when CSOT will be ready for GA and add default value to cancellationToken parameter. + Task IClientSessionInternal.CommitTransactionAsync(CommitTransactionOptions options, CancellationToken cancellationToken) + => _coreSession.CommitTransactionAsync(options, cancellationToken); /// <inheritdoc /> public void Dispose() diff --git a/src/MongoDB.Driver/ClusterKey.cs b/src/MongoDB.Driver/ClusterKey.cs index d208a7b60e4..dec3b93bad6 100644 --- a/src/MongoDB.Driver/ClusterKey.cs +++ b/src/MongoDB.Driver/ClusterKey.cs @@ -17,6 +17,7 @@ using System.Collections.Generic; using System.Linq; using MongoDB.Driver.Core.Configuration; +using MongoDB.Driver.Core.Connections; using MongoDB.Driver.Core.Servers; using MongoDB.Shared; @@ -55,6 +56,7 @@ internal sealed class ClusterKey private readonly ServerMonitoringMode _serverMonitoringMode; private readonly TimeSpan _serverSelectionTimeout; private readonly TimeSpan _socketTimeout; + private readonly Socks5ProxySettings _socks5ProxySettings; private readonly int _srvMaxHosts; private readonly string _srvServiceName; private readonly SslSettings _sslSettings; @@ -93,6 +95,7 @@ public ClusterKey( ServerMonitoringMode serverMonitoringMode, TimeSpan serverSelectionTimeout, TimeSpan socketTimeout, + Socks5ProxySettings socks5ProxySettings, int srvMaxHosts, string srvServiceName, SslSettings sslSettings, @@ -129,6 +132,7 @@ public ClusterKey( _serverMonitoringMode = serverMonitoringMode; _serverSelectionTimeout = serverSelectionTimeout; _socketTimeout = socketTimeout; + _socks5ProxySettings = socks5ProxySettings; _srvMaxHosts = srvMaxHosts; _srvServiceName = srvServiceName; _sslSettings = sslSettings; @@ -169,6 +173,7 @@ public ClusterKey( public ServerMonitoringMode ServerMonitoringMode { get { return _serverMonitoringMode; } } public TimeSpan ServerSelectionTimeout { get { return _serverSelectionTimeout; } } public TimeSpan SocketTimeout { get { return _socketTimeout; } } + public Socks5ProxySettings Socks5ProxySettings { get { return _socks5ProxySettings; } } public int SrvMaxHosts { get { return _srvMaxHosts; } } public string SrvServiceName { get { return _srvServiceName; } } public SslSettings SslSettings { get { return _sslSettings; } } @@ -224,6 +229,7 @@ public override bool Equals(object obj) _serverMonitoringMode == rhs._serverMonitoringMode && _serverSelectionTimeout == rhs._serverSelectionTimeout && _socketTimeout == rhs._socketTimeout && + object.Equals(_socks5ProxySettings, rhs._socks5ProxySettings) && _srvMaxHosts == rhs._srvMaxHosts && _srvServiceName == rhs.SrvServiceName && object.Equals(_sslSettings, rhs._sslSettings) && diff --git a/src/MongoDB.Driver/ClusterRegistry.cs b/src/MongoDB.Driver/ClusterRegistry.cs index 3359cd4b612..49e599be8d1 100644 --- a/src/MongoDB.Driver/ClusterRegistry.cs +++ b/src/MongoDB.Driver/ClusterRegistry.cs @@ -70,6 +70,11 @@ private IClusterInternal CreateCluster(ClusterKey clusterKey) builder.ConfigureSsl(settings => ConfigureSsl(settings, clusterKey)); } + if (clusterKey.Socks5ProxySettings != null) + { + builder.ConfigureSocks5Proxy(settings => ConfigureSocks5Proxy(settings, clusterKey)); + } + if (clusterKey.ClusterConfigurator != null) { clusterKey.ClusterConfigurator(builder); @@ -174,6 +179,12 @@ private TcpStreamSettings ConfigureTcp(TcpStreamSettings settings, ClusterKey cl writeTimeout: clusterKey.SocketTimeout); } + private Socks5ProxyStreamSettings ConfigureSocks5Proxy(Socks5ProxyStreamSettings settings, ClusterKey clusterKey) + { + return settings.With( + clusterKey.Socks5ProxySettings); + } + internal IClusterInternal GetOrCreateCluster(ClusterKey clusterKey) { lock (_lock) diff --git a/src/MongoDB.Driver/CommitTransactionOptions.cs b/src/MongoDB.Driver/CommitTransactionOptions.cs new file mode 100644 index 00000000000..008e902815e --- /dev/null +++ b/src/MongoDB.Driver/CommitTransactionOptions.cs @@ -0,0 +1,32 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using MongoDB.Driver.Core.Misc; + +namespace MongoDB.Driver +{ + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal sealed class CommitTransactionOptions + { + public CommitTransactionOptions(TimeSpan? timeout) + { + Timeout = Ensure.IsNullOrValidTimeout(timeout, nameof(timeout)); + } + + public TimeSpan? Timeout { get; } + } +} + diff --git a/src/MongoDB.Driver/Core/Bindings/ChannelChannelSource.cs b/src/MongoDB.Driver/Core/Bindings/ChannelChannelSource.cs index 26f7ee2881d..f48a8038428 100644 --- a/src/MongoDB.Driver/Core/Bindings/ChannelChannelSource.cs +++ b/src/MongoDB.Driver/Core/Bindings/ChannelChannelSource.cs @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.Servers; @@ -64,13 +63,13 @@ public void Dispose() } } - public IChannelHandle GetChannel(CancellationToken cancellationToken) + public IChannelHandle GetChannel(OperationContext operationContext) { ThrowIfDisposed(); return GetChannelHelper(); } - public Task<IChannelHandle> GetChannelAsync(CancellationToken cancellationToken) + public Task<IChannelHandle> GetChannelAsync(OperationContext operationContext) { ThrowIfDisposed(); return Task.FromResult(GetChannelHelper()); diff --git a/src/MongoDB.Driver/Core/Bindings/ChannelReadBinding.cs b/src/MongoDB.Driver/Core/Bindings/ChannelReadBinding.cs index 81173ad3639..63dab353cf4 100644 --- a/src/MongoDB.Driver/Core/Bindings/ChannelReadBinding.cs +++ b/src/MongoDB.Driver/Core/Bindings/ChannelReadBinding.cs @@ -15,7 +15,6 @@ using System; using System.Collections.Generic; -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.Servers; @@ -58,26 +57,26 @@ public void Dispose() } } - public IChannelSourceHandle GetReadChannelSource(CancellationToken cancellationToken) + public IChannelSourceHandle GetReadChannelSource(OperationContext operationContext) { ThrowIfDisposed(); return GetReadChannelSourceHelper(); } - public Task<IChannelSourceHandle> GetReadChannelSourceAsync(CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext) { ThrowIfDisposed(); return Task.FromResult<IChannelSourceHandle>(GetReadChannelSourceHelper()); } - public IChannelSourceHandle GetReadChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public IChannelSourceHandle GetReadChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { - return GetReadChannelSource(cancellationToken); + return GetReadChannelSource(operationContext); } - public Task<IChannelSourceHandle> GetReadChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { - return GetReadChannelSourceAsync(cancellationToken); + return GetReadChannelSourceAsync(operationContext); } private IChannelSourceHandle GetReadChannelSourceHelper() diff --git a/src/MongoDB.Driver/Core/Bindings/ChannelReadWriteBinding.cs b/src/MongoDB.Driver/Core/Bindings/ChannelReadWriteBinding.cs index 71a8e76c429..17ae75966bc 100644 --- a/src/MongoDB.Driver/Core/Bindings/ChannelReadWriteBinding.cs +++ b/src/MongoDB.Driver/Core/Bindings/ChannelReadWriteBinding.cs @@ -15,7 +15,6 @@ using System; using System.Collections.Generic; -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.Servers; @@ -56,68 +55,68 @@ public void Dispose() } } - public IChannelSourceHandle GetReadChannelSource(CancellationToken cancellationToken) + public IChannelSourceHandle GetReadChannelSource(OperationContext operationContext) { ThrowIfDisposed(); return GetChannelSourceHelper(); } - public Task<IChannelSourceHandle> GetReadChannelSourceAsync(CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext) { ThrowIfDisposed(); return Task.FromResult(GetChannelSourceHelper()); } - public IChannelSourceHandle GetReadChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public IChannelSourceHandle GetReadChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { - return GetReadChannelSource(cancellationToken); + return GetReadChannelSource(operationContext); } - public Task<IChannelSourceHandle> GetReadChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { - return GetReadChannelSourceAsync(cancellationToken); + return GetReadChannelSourceAsync(operationContext); } - public IChannelSourceHandle GetWriteChannelSource(CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext) { ThrowIfDisposed(); return GetChannelSourceHelper(); } - public IChannelSourceHandle GetWriteChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { - return GetWriteChannelSource(cancellationToken); + return GetWriteChannelSource(operationContext); } - public IChannelSourceHandle GetWriteChannelSource(IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext, IMayUseSecondaryCriteria mayUseSecondary) { - return GetWriteChannelSource(cancellationToken); // ignore mayUseSecondary + return GetWriteChannelSource(operationContext); // ignore mayUseSecondary } - public IChannelSourceHandle GetWriteChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary) { - return GetWriteChannelSource(mayUseSecondary, cancellationToken); + return GetWriteChannelSource(operationContext, mayUseSecondary); } - public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext) { ThrowIfDisposed(); return Task.FromResult(GetChannelSourceHelper()); } - public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { - return GetWriteChannelSourceAsync(cancellationToken); + return GetWriteChannelSourceAsync(operationContext); } - public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext, IMayUseSecondaryCriteria mayUseSecondary) { - return GetWriteChannelSourceAsync(cancellationToken); // ignore mayUseSecondary + return GetWriteChannelSourceAsync(operationContext); // ignore mayUseSecondary } - public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary) { - return GetWriteChannelSourceAsync(mayUseSecondary, cancellationToken); + return GetWriteChannelSourceAsync(operationContext, mayUseSecondary); } private IChannelSourceHandle GetChannelSourceHelper() diff --git a/src/MongoDB.Driver/Core/Bindings/ChannelSourceHandle.cs b/src/MongoDB.Driver/Core/Bindings/ChannelSourceHandle.cs index c7c04912d7b..3b08ff9da33 100644 --- a/src/MongoDB.Driver/Core/Bindings/ChannelSourceHandle.cs +++ b/src/MongoDB.Driver/Core/Bindings/ChannelSourceHandle.cs @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.Servers; @@ -55,16 +54,16 @@ public ICoreSessionHandle Session } // methods - public IChannelHandle GetChannel(CancellationToken cancellationToken) + public IChannelHandle GetChannel(OperationContext operationContext) { ThrowIfDisposed(); - return _reference.Instance.GetChannel(cancellationToken); + return _reference.Instance.GetChannel(operationContext); } - public Task<IChannelHandle> GetChannelAsync(CancellationToken cancellationToken) + public Task<IChannelHandle> GetChannelAsync(OperationContext operationContext) { ThrowIfDisposed(); - return _reference.Instance.GetChannelAsync(cancellationToken); + return _reference.Instance.GetChannelAsync(operationContext); } public void Dispose() diff --git a/src/MongoDB.Driver/Core/Bindings/ChannelSourceReadWriteBinding.cs b/src/MongoDB.Driver/Core/Bindings/ChannelSourceReadWriteBinding.cs index c6fef30f1e6..e0c4772d23f 100644 --- a/src/MongoDB.Driver/Core/Bindings/ChannelSourceReadWriteBinding.cs +++ b/src/MongoDB.Driver/Core/Bindings/ChannelSourceReadWriteBinding.cs @@ -15,7 +15,6 @@ using System; using System.Collections.Generic; -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.Servers; @@ -46,68 +45,68 @@ public ICoreSessionHandle Session get { return _session; } } - public IChannelSourceHandle GetReadChannelSource(CancellationToken cancellationToken) + public IChannelSourceHandle GetReadChannelSource(OperationContext operationContext) { ThrowIfDisposed(); return GetChannelSourceHelper(); } - public Task<IChannelSourceHandle> GetReadChannelSourceAsync(CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext) { ThrowIfDisposed(); return Task.FromResult(GetChannelSourceHelper()); } - public IChannelSourceHandle GetReadChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public IChannelSourceHandle GetReadChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { - return GetReadChannelSource(cancellationToken); + return GetReadChannelSource(operationContext); } - public Task<IChannelSourceHandle> GetReadChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { - return GetReadChannelSourceAsync(cancellationToken); + return GetReadChannelSourceAsync(operationContext); } - public IChannelSourceHandle GetWriteChannelSource(CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext) { ThrowIfDisposed(); return GetChannelSourceHelper(); } - public IChannelSourceHandle GetWriteChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { - return GetWriteChannelSource(cancellationToken); + return GetWriteChannelSource(operationContext); } - public IChannelSourceHandle GetWriteChannelSource(IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext, IMayUseSecondaryCriteria mayUseSecondary) { - return GetWriteChannelSource(cancellationToken); // ignore mayUseSecondary + return GetWriteChannelSource(operationContext); // ignore mayUseSecondary } - public IChannelSourceHandle GetWriteChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary) { - return GetWriteChannelSource(mayUseSecondary, cancellationToken); + return GetWriteChannelSource(operationContext, mayUseSecondary); } - public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext) { ThrowIfDisposed(); return Task.FromResult(GetChannelSourceHelper()); } - public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { - return GetWriteChannelSourceAsync(cancellationToken); + return GetWriteChannelSourceAsync(operationContext); } - public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext, IMayUseSecondaryCriteria mayUseSecondary) { - return GetWriteChannelSourceAsync(cancellationToken); // ignore mayUseSecondary + return GetWriteChannelSourceAsync(operationContext); // ignore mayUseSecondary } - public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary) { - return GetWriteChannelSourceAsync(mayUseSecondary, cancellationToken); + return GetWriteChannelSourceAsync(operationContext, mayUseSecondary); } public void Dispose() diff --git a/src/MongoDB.Driver/Core/Bindings/CoreSession.cs b/src/MongoDB.Driver/Core/Bindings/CoreSession.cs index 66fa294a14f..954c639e244 100644 --- a/src/MongoDB.Driver/Core/Bindings/CoreSession.cs +++ b/src/MongoDB.Driver/Core/Bindings/CoreSession.cs @@ -1,4 +1,4 @@ -/* Copyright 2018-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,7 +29,7 @@ namespace MongoDB.Driver.Core.Bindings /// Represents a session. /// </summary> /// <seealso cref="MongoDB.Driver.Core.Bindings.ICoreSession" /> - public sealed class CoreSession : ICoreSession + public sealed class CoreSession : ICoreSession, ICoreSessionInternal { // private fields #pragma warning disable CA2213 // Disposable fields should be disposed @@ -141,10 +141,15 @@ public bool IsInTransaction // public methods /// <inheritdoc /> - public void AbortTransaction(CancellationToken cancellationToken = default(CancellationToken)) + public void AbortTransaction(CancellationToken cancellationToken = default) + => ((ICoreSessionInternal)this).AbortTransaction(null, cancellationToken); + + // TODO: CSOT: Make it public when CSOT will be ready for GA and add default value to cancellationToken parameter. + void ICoreSessionInternal.AbortTransaction(AbortTransactionOptions options, CancellationToken cancellationToken) { EnsureAbortTransactionCanBeCalled(nameof(AbortTransaction)); + using var operationContext = new OperationContext(GetTimeout(options?.Timeout), cancellationToken); try { if (_currentTransaction.IsEmpty) @@ -154,11 +159,11 @@ public bool IsInTransaction try { - var firstAttempt = CreateAbortTransactionOperation(); - ExecuteEndTransactionOnPrimary(firstAttempt, cancellationToken); + var firstAttempt = CreateAbortTransactionOperation(operationContext); + ExecuteEndTransactionOnPrimary(operationContext, firstAttempt); return; } - catch (Exception exception) when (ShouldRetryEndTransactionException(exception)) + catch (Exception exception) when (ShouldRetryEndTransactionException(operationContext, exception)) { // unpin if retryable error _currentTransaction.UnpinAll(); @@ -172,8 +177,8 @@ public bool IsInTransaction try { - var secondAttempt = CreateAbortTransactionOperation(); - ExecuteEndTransactionOnPrimary(secondAttempt, cancellationToken); + var secondAttempt = CreateAbortTransactionOperation(operationContext); + ExecuteEndTransactionOnPrimary(operationContext, secondAttempt); } catch { @@ -190,10 +195,15 @@ public bool IsInTransaction } /// <inheritdoc /> - public async Task AbortTransactionAsync(CancellationToken cancellationToken = default(CancellationToken)) + public Task AbortTransactionAsync(CancellationToken cancellationToken = default) + => ((ICoreSessionInternal)this).AbortTransactionAsync(null, cancellationToken); + + // TODO: CSOT: Make it public when CSOT will be ready for GA and add default value to cancellationToken parameter. + async Task ICoreSessionInternal.AbortTransactionAsync(AbortTransactionOptions options, CancellationToken cancellationToken) { EnsureAbortTransactionCanBeCalled(nameof(AbortTransaction)); + using var operationContext = new OperationContext(GetTimeout(options?.Timeout), cancellationToken); try { if (_currentTransaction.IsEmpty) @@ -203,11 +213,11 @@ public bool IsInTransaction try { - var firstAttempt = CreateAbortTransactionOperation(); - await ExecuteEndTransactionOnPrimaryAsync(firstAttempt, cancellationToken).ConfigureAwait(false); + var firstAttempt = CreateAbortTransactionOperation(operationContext); + await ExecuteEndTransactionOnPrimaryAsync(operationContext, firstAttempt).ConfigureAwait(false); return; } - catch (Exception exception) when (ShouldRetryEndTransactionException(exception)) + catch (Exception exception) when (ShouldRetryEndTransactionException(operationContext, exception)) { // unpin if retryable error _currentTransaction.UnpinAll(); @@ -221,8 +231,8 @@ public bool IsInTransaction try { - var secondAttempt = CreateAbortTransactionOperation(); - await ExecuteEndTransactionOnPrimaryAsync(secondAttempt, cancellationToken).ConfigureAwait(false); + var secondAttempt = CreateAbortTransactionOperation(operationContext); + await ExecuteEndTransactionOnPrimaryAsync(operationContext, secondAttempt).ConfigureAwait(false); } catch { @@ -288,10 +298,15 @@ public long AdvanceTransactionNumber() } /// <inheritdoc /> - public void CommitTransaction(CancellationToken cancellationToken = default(CancellationToken)) + public void CommitTransaction(CancellationToken cancellationToken = default) + => ((ICoreSessionInternal)this).CommitTransaction(null, cancellationToken); + + // TODO: CSOT: Make it public when CSOT will be ready for GA and add default value to cancellationToken parameter. + void ICoreSessionInternal.CommitTransaction(CommitTransactionOptions options, CancellationToken cancellationToken) { EnsureCommitTransactionCanBeCalled(nameof(CommitTransaction)); + using var operationContext = new OperationContext(GetTimeout(options?.Timeout), cancellationToken); try { _isCommitTransactionInProgress = true; @@ -302,18 +317,18 @@ public long AdvanceTransactionNumber() try { - var firstAttempt = CreateCommitTransactionOperation(IsFirstCommitAttemptRetry()); - ExecuteEndTransactionOnPrimary(firstAttempt, cancellationToken); + var firstAttempt = CreateCommitTransactionOperation(operationContext, IsFirstCommitAttemptRetry()); + ExecuteEndTransactionOnPrimary(operationContext, firstAttempt); return; } - catch (Exception exception) when (ShouldRetryEndTransactionException(exception)) + catch (Exception exception) when (ShouldRetryEndTransactionException(operationContext, exception)) { // unpin server if needed, then ignore exception and retry TransactionHelper.UnpinServerIfNeededOnRetryableCommitException(_currentTransaction, exception); } - var secondAttempt = CreateCommitTransactionOperation(isCommitRetry: true); - ExecuteEndTransactionOnPrimary(secondAttempt, cancellationToken); + var secondAttempt = CreateCommitTransactionOperation(operationContext, isCommitRetry: true); + ExecuteEndTransactionOnPrimary(operationContext, secondAttempt); } finally { @@ -323,10 +338,15 @@ public long AdvanceTransactionNumber() } /// <inheritdoc /> - public async Task CommitTransactionAsync(CancellationToken cancellationToken = default(CancellationToken)) + public Task CommitTransactionAsync(CancellationToken cancellationToken = default) + => ((ICoreSessionInternal)this).CommitTransactionAsync(null, cancellationToken); + + // TODO: CSOT: Make it public when CSOT will be ready for GA and add default value to cancellationToken parameter. + async Task ICoreSessionInternal.CommitTransactionAsync(CommitTransactionOptions options, CancellationToken cancellationToken) { EnsureCommitTransactionCanBeCalled(nameof(CommitTransaction)); + using var operationContext = new OperationContext(GetTimeout(options?.Timeout), cancellationToken); try { _isCommitTransactionInProgress = true; @@ -337,18 +357,18 @@ public long AdvanceTransactionNumber() try { - var firstAttempt = CreateCommitTransactionOperation(IsFirstCommitAttemptRetry()); - await ExecuteEndTransactionOnPrimaryAsync(firstAttempt, cancellationToken).ConfigureAwait(false); + var firstAttempt = CreateCommitTransactionOperation(operationContext, IsFirstCommitAttemptRetry()); + await ExecuteEndTransactionOnPrimaryAsync(operationContext, firstAttempt).ConfigureAwait(false); return; } - catch (Exception exception) when (ShouldRetryEndTransactionException(exception)) + catch (Exception exception) when (ShouldRetryEndTransactionException(operationContext, exception)) { // unpin server if needed, then ignore exception and retry TransactionHelper.UnpinServerIfNeededOnRetryableCommitException(_currentTransaction, exception); } - var secondAttempt = CreateCommitTransactionOperation(isCommitRetry: true); - await ExecuteEndTransactionOnPrimaryAsync(secondAttempt, cancellationToken).ConfigureAwait(false); + var secondAttempt = CreateCommitTransactionOperation(operationContext, isCommitRetry: true); + await ExecuteEndTransactionOnPrimaryAsync(operationContext, secondAttempt).ConfigureAwait(false); } finally { @@ -404,7 +424,7 @@ public void StartTransaction(TransactionOptions transactionOptions = null) throw new InvalidOperationException("Transactions do not support unacknowledged write concerns."); } - _currentTransaction?.UnpinAll(); // unpin data if any when a new transaction is started + _currentTransaction?.UnpinAll(); // unpin data if any when a new transaction is started _currentTransaction = new CoreTransaction(transactionNumber, effectiveTransactionOptions); } @@ -424,14 +444,14 @@ public void WasUsed() } // private methods - private IReadOperation<BsonDocument> CreateAbortTransactionOperation() + private IReadOperation<BsonDocument> CreateAbortTransactionOperation(OperationContext operationContext) { - return new AbortTransactionOperation(_currentTransaction.RecoveryToken, GetTransactionWriteConcern()); + return new AbortTransactionOperation(_currentTransaction.RecoveryToken, GetTransactionWriteConcern(operationContext)); } - private IReadOperation<BsonDocument> CreateCommitTransactionOperation(bool isCommitRetry) + private IReadOperation<BsonDocument> CreateCommitTransactionOperation(OperationContext operationContext, bool isCommitRetry) { - var writeConcern = GetCommitTransactionWriteConcern(isCommitRetry); + var writeConcern = GetCommitTransactionWriteConcern(operationContext, isCommitRetry); var maxCommitTime = _currentTransaction.TransactionOptions.MaxCommitTime; return new CommitTransactionOperation(_currentTransaction.RecoveryToken, writeConcern) { MaxCommitTime = maxCommitTime }; } @@ -537,24 +557,27 @@ private void EnsureTransactionsAreSupported() } } - private TResult ExecuteEndTransactionOnPrimary<TResult>(IReadOperation<TResult> operation, CancellationToken cancellationToken) + private TResult ExecuteEndTransactionOnPrimary<TResult>(OperationContext operationContext, IReadOperation<TResult> operation) { using (var sessionHandle = new NonDisposingCoreSessionHandle(this)) using (var binding = ChannelPinningHelper.CreateReadWriteBinding(_cluster, sessionHandle)) { - return operation.Execute(binding, cancellationToken); + return operation.Execute(operationContext, binding); } } - private async Task<TResult> ExecuteEndTransactionOnPrimaryAsync<TResult>(IReadOperation<TResult> operation, CancellationToken cancellationToken) + private async Task<TResult> ExecuteEndTransactionOnPrimaryAsync<TResult>(OperationContext operationContext, IReadOperation<TResult> operation) { using (var sessionHandle = new NonDisposingCoreSessionHandle(this)) using (var binding = ChannelPinningHelper.CreateReadWriteBinding(_cluster, sessionHandle)) { - return await operation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false); + return await operation.ExecuteAsync(operationContext, binding).ConfigureAwait(false); } } + private TimeSpan? GetTimeout(TimeSpan? timeout) + => timeout ?? _options.DefaultTransactionOptions?.Timeout; + private TransactionOptions GetEffectiveTransactionOptions(TransactionOptions transactionOptions) { var readConcern = transactionOptions?.ReadConcern ?? _options.DefaultTransactionOptions?.ReadConcern ?? ReadConcern.Default; @@ -564,21 +587,27 @@ private TransactionOptions GetEffectiveTransactionOptions(TransactionOptions tra return new TransactionOptions(readConcern, readPreference, writeConcern, maxCommitTime); } - private WriteConcern GetTransactionWriteConcern() + private WriteConcern GetTransactionWriteConcern(OperationContext operationContext) { - return - _currentTransaction.TransactionOptions?.WriteConcern ?? - _options.DefaultTransactionOptions?.WriteConcern ?? - WriteConcern.WMajority; + var writeConcern = _currentTransaction.TransactionOptions?.WriteConcern ?? + _options.DefaultTransactionOptions?.WriteConcern ?? + WriteConcern.WMajority; + + if (operationContext.IsRootContextTimeoutConfigured()) + { + writeConcern = writeConcern.With(wTimeout: null); + } + + return writeConcern; } - private WriteConcern GetCommitTransactionWriteConcern(bool isCommitRetry) + private WriteConcern GetCommitTransactionWriteConcern(OperationContext operationContext, bool isCommitRetry) { - var writeConcern = GetTransactionWriteConcern(); + var writeConcern = GetTransactionWriteConcern(operationContext); if (isCommitRetry) { writeConcern = writeConcern.With(mode: "majority"); - if (writeConcern.WTimeout == null) + if (writeConcern.WTimeout == null && !operationContext.IsRootContextTimeoutConfigured()) { writeConcern = writeConcern.With(wTimeout: TimeSpan.FromMilliseconds(10000)); } @@ -593,9 +622,14 @@ private bool IsFirstCommitAttemptRetry() return _currentTransaction.State == CoreTransactionState.Committed; } - private bool ShouldRetryEndTransactionException(Exception exception) + private bool ShouldRetryEndTransactionException(OperationContext operationContext, Exception exception) { - return RetryabilityHelper.IsRetryableWriteException(exception); + if (!RetryabilityHelper.IsRetryableWriteException(exception)) + { + return false; + } + + return operationContext.IsRootContextTimeoutConfigured() ? !operationContext.IsTimedOut() : true; } } } diff --git a/src/MongoDB.Driver/Core/Bindings/CoreTransaction.cs b/src/MongoDB.Driver/Core/Bindings/CoreTransaction.cs index 53747c8530c..6ed2a4f849e 100644 --- a/src/MongoDB.Driver/Core/Bindings/CoreTransaction.cs +++ b/src/MongoDB.Driver/Core/Bindings/CoreTransaction.cs @@ -1,4 +1,4 @@ -/* Copyright 2018-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -56,6 +56,8 @@ public CoreTransaction(long transactionNumber, TransactionOptions transactionOpt /// </value> public bool IsEmpty => _isEmpty; + internal OperationContext OperationContext { get; set; } + /// <summary> /// Gets the transaction state. /// </summary> diff --git a/src/MongoDB.Driver/Core/Bindings/IBinding.cs b/src/MongoDB.Driver/Core/Bindings/IBinding.cs index 304796f1a9b..275131043ba 100644 --- a/src/MongoDB.Driver/Core/Bindings/IBinding.cs +++ b/src/MongoDB.Driver/Core/Bindings/IBinding.cs @@ -30,24 +30,24 @@ internal interface IReadBinding : IBinding { ReadPreference ReadPreference { get; } - IChannelSourceHandle GetReadChannelSource(CancellationToken cancellationToken); - Task<IChannelSourceHandle> GetReadChannelSourceAsync(CancellationToken cancellationToken); + IChannelSourceHandle GetReadChannelSource(OperationContext operationContext); + Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext); - IChannelSourceHandle GetReadChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken); - Task<IChannelSourceHandle> GetReadChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken); + IChannelSourceHandle GetReadChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers); + Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers); } internal interface IWriteBinding : IBinding { - IChannelSourceHandle GetWriteChannelSource(CancellationToken cancellationToken); - IChannelSourceHandle GetWriteChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken); - IChannelSourceHandle GetWriteChannelSource(IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken); - IChannelSourceHandle GetWriteChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken); + IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext); + IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers); + IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext, IMayUseSecondaryCriteria mayUseSecondary); + IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary); - Task<IChannelSourceHandle> GetWriteChannelSourceAsync(CancellationToken cancellationToken); - Task<IChannelSourceHandle> GetWriteChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken); - Task<IChannelSourceHandle> GetWriteChannelSourceAsync(IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken); - Task<IChannelSourceHandle> GetWriteChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken); + Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext); + Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers); + Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext, IMayUseSecondaryCriteria mayUseSecondary); + Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary); } internal interface IReadWriteBinding : IReadBinding, IWriteBinding diff --git a/src/MongoDB.Driver/Core/Bindings/IChannel.cs b/src/MongoDB.Driver/Core/Bindings/IChannel.cs index f58ba48103e..3a1dca12020 100644 --- a/src/MongoDB.Driver/Core/Bindings/IChannel.cs +++ b/src/MongoDB.Driver/Core/Bindings/IChannel.cs @@ -15,7 +15,6 @@ using System; using System.Collections.Generic; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.IO; @@ -33,6 +32,7 @@ internal interface IChannel : IDisposable ConnectionDescription ConnectionDescription { get; } TResult Command<TResult>( + OperationContext operationContext, ICoreSession session, ReadPreference readPreference, DatabaseNamespace databaseNamespace, @@ -43,10 +43,10 @@ TResult Command<TResult>( Action<IMessageEncoderPostProcessor> postWriteAction, CommandResponseHandling responseHandling, IBsonSerializer<TResult> resultSerializer, - MessageEncoderSettings messageEncoderSettings, - CancellationToken cancellationToken); + MessageEncoderSettings messageEncoderSettings); Task<TResult> CommandAsync<TResult>( + OperationContext operationContext, ICoreSession session, ReadPreference readPreference, DatabaseNamespace databaseNamespace, @@ -57,76 +57,7 @@ Task<TResult> CommandAsync<TResult>( Action<IMessageEncoderPostProcessor> postWriteAction, CommandResponseHandling responseHandling, IBsonSerializer<TResult> resultSerializer, - MessageEncoderSettings messageEncoderSettings, - CancellationToken cancellationToken); - - CursorBatch<TDocument> Query<TDocument>( - CollectionNamespace collectionNamespace, - BsonDocument query, - BsonDocument fields, - IElementNameValidator queryValidator, - int skip, - int batchSize, - bool secondaryOk, - bool partialOk, - bool noCursorTimeout, - bool tailableCursor, - bool awaitData, - IBsonSerializer<TDocument> serializer, - MessageEncoderSettings messageEncoderSettings, - CancellationToken cancellationToken); - - [Obsolete("Use an overload that does not have an oplogReplay parameter instead.")] - CursorBatch<TDocument> Query<TDocument>( - CollectionNamespace collectionNamespace, - BsonDocument query, - BsonDocument fields, - IElementNameValidator queryValidator, - int skip, - int batchSize, - bool secondaryOk, - bool partialOk, - bool noCursorTimeout, - bool oplogReplay, // obsolete: OplogReplay is ignored by server versions 4.4.0 and newer - bool tailableCursor, - bool awaitData, - IBsonSerializer<TDocument> serializer, - MessageEncoderSettings messageEncoderSettings, - CancellationToken cancellationToken); - - Task<CursorBatch<TDocument>> QueryAsync<TDocument>( - CollectionNamespace collectionNamespace, - BsonDocument query, - BsonDocument fields, - IElementNameValidator queryValidator, - int skip, - int batchSize, - bool secondaryOk, - bool partialOk, - bool noCursorTimeout, - bool tailableCursor, - bool awaitData, - IBsonSerializer<TDocument> serializer, - MessageEncoderSettings messageEncoderSettings, - CancellationToken cancellationToken); - - [Obsolete("Use an overload that does not have an oplogReplay parameter instead.")] - Task<CursorBatch<TDocument>> QueryAsync<TDocument>( - CollectionNamespace collectionNamespace, - BsonDocument query, - BsonDocument fields, - IElementNameValidator queryValidator, - int skip, - int batchSize, - bool secondaryOk, - bool partialOk, - bool noCursorTimeout, - bool oplogReplay, // obsolete: OplogReplay is ignored by server versions 4.4.0 and newer - bool tailableCursor, - bool awaitData, - IBsonSerializer<TDocument> serializer, - MessageEncoderSettings messageEncoderSettings, - CancellationToken cancellationToken); + MessageEncoderSettings messageEncoderSettings); } internal interface IChannelHandle : IChannel diff --git a/src/MongoDB.Driver/Core/Bindings/IChannelSource.cs b/src/MongoDB.Driver/Core/Bindings/IChannelSource.cs index 8582a50e7b8..c9bd90ec61b 100644 --- a/src/MongoDB.Driver/Core/Bindings/IChannelSource.cs +++ b/src/MongoDB.Driver/Core/Bindings/IChannelSource.cs @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Servers; @@ -26,8 +25,8 @@ internal interface IChannelSource : IDisposable ServerDescription ServerDescription { get; } ICoreSessionHandle Session { get; } - IChannelHandle GetChannel(CancellationToken cancellationToken); - Task<IChannelHandle> GetChannelAsync(CancellationToken cancellationToken); + IChannelHandle GetChannel(OperationContext operationContext); + Task<IChannelHandle> GetChannelAsync(OperationContext operationContext); } internal interface IChannelSourceHandle : IChannelSource diff --git a/src/MongoDB.Driver/Core/Bindings/ICoreSessionExtensions.cs b/src/MongoDB.Driver/Core/Bindings/ICoreSessionExtensions.cs new file mode 100644 index 00000000000..5d176c6181b --- /dev/null +++ b/src/MongoDB.Driver/Core/Bindings/ICoreSessionExtensions.cs @@ -0,0 +1,67 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System.Threading; +using System.Threading.Tasks; + +namespace MongoDB.Driver.Core.Bindings +{ + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal static class ICoreSessionExtensions + { + // TODO: Merge these extension methods in ICoreSession interface on major release + public static void AbortTransaction(this ICoreSession session, AbortTransactionOptions options, CancellationToken cancellationToken = default) + { + if (options == null || session.Options.DefaultTransactionOptions?.Timeout == options.Timeout) + { + session.AbortTransaction(cancellationToken); + return; + } + + ((ICoreSessionInternal)session).AbortTransaction(options, cancellationToken); + } + + public static Task AbortTransactionAsync(this ICoreSession session, AbortTransactionOptions options, CancellationToken cancellationToken = default) + { + if (options == null || session.Options.DefaultTransactionOptions?.Timeout == options.Timeout) + { + return session.AbortTransactionAsync(cancellationToken); + } + + return ((ICoreSessionInternal)session).AbortTransactionAsync(options, cancellationToken); + } + + public static void CommitTransaction(this ICoreSession session, CommitTransactionOptions options, CancellationToken cancellationToken = default) + { + if (options == null || session.Options.DefaultTransactionOptions?.Timeout == options.Timeout) + { + session.CommitTransaction(cancellationToken); + return; + } + + ((ICoreSessionInternal)session).CommitTransaction(options, cancellationToken); + } + + public static Task CommitTransactionAsync(this ICoreSession session, CommitTransactionOptions options, CancellationToken cancellationToken = default) + { + if (options == null || session.Options.DefaultTransactionOptions?.Timeout == options.Timeout) + { + return session.CommitTransactionAsync(cancellationToken); + } + + return ((ICoreSessionInternal)session).CommitTransactionAsync(options, cancellationToken); + } + } +} diff --git a/src/MongoDB.Driver/Core/Bindings/ICoreSessionInternal.cs b/src/MongoDB.Driver/Core/Bindings/ICoreSessionInternal.cs new file mode 100644 index 00000000000..1844ae6fa8c --- /dev/null +++ b/src/MongoDB.Driver/Core/Bindings/ICoreSessionInternal.cs @@ -0,0 +1,28 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System.Threading; +using System.Threading.Tasks; + +namespace MongoDB.Driver.Core.Bindings; + +// TODO: Merge this interface into ICoreSession on major release +internal interface ICoreSessionInternal +{ + void AbortTransaction(AbortTransactionOptions options, CancellationToken cancellationToken = default); + Task AbortTransactionAsync(AbortTransactionOptions options, CancellationToken cancellationToken = default); + void CommitTransaction(CommitTransactionOptions options, CancellationToken cancellationToken = default); + Task CommitTransactionAsync(CommitTransactionOptions options, CancellationToken cancellationToken = default); +} diff --git a/src/MongoDB.Driver/Core/Bindings/NoCoreSession.cs b/src/MongoDB.Driver/Core/Bindings/NoCoreSession.cs index 7be93d81b43..ac7e68abf06 100644 --- a/src/MongoDB.Driver/Core/Bindings/NoCoreSession.cs +++ b/src/MongoDB.Driver/Core/Bindings/NoCoreSession.cs @@ -24,7 +24,7 @@ namespace MongoDB.Driver.Core.Bindings /// An object that represents no core session. /// </summary> /// <seealso cref="MongoDB.Driver.Core.Bindings.ICoreSession" /> - public sealed class NoCoreSession : ICoreSession + public sealed class NoCoreSession : ICoreSession, ICoreSessionInternal { #region static // private static fields @@ -89,13 +89,25 @@ public static ICoreSessionHandle NewHandle() // public methods /// <inheritdoc /> - public void AbortTransaction(CancellationToken cancellationToken = default(CancellationToken)) + public void AbortTransaction(CancellationToken cancellationToken = default) + { + throw new NotSupportedException("NoCoreSession does not support AbortTransaction."); + } + + // TODO: CSOT: Make it public when CSOT will be ready for GA and add default value to cancellationToken parameter. + void ICoreSessionInternal.AbortTransaction(AbortTransactionOptions options, CancellationToken cancellationToken ) { throw new NotSupportedException("NoCoreSession does not support AbortTransaction."); } /// <inheritdoc /> - public Task AbortTransactionAsync(CancellationToken cancellationToken = default(CancellationToken)) + public Task AbortTransactionAsync(CancellationToken cancellationToken = default) + { + throw new NotSupportedException("NoCoreSession does not support AbortTransactionAsync."); + } + + // TODO: CSOT: Make it public when CSOT will be ready for GA and add default value to cancellationToken parameter. + Task ICoreSessionInternal.AbortTransactionAsync(AbortTransactionOptions options, CancellationToken cancellationToken ) { throw new NotSupportedException("NoCoreSession does not support AbortTransactionAsync."); } @@ -122,13 +134,25 @@ public long AdvanceTransactionNumber() } /// <inheritdoc /> - public void CommitTransaction(CancellationToken cancellationToken = default(CancellationToken)) + public void CommitTransaction(CancellationToken cancellationToken = default) + { + throw new NotSupportedException("NoCoreSession does not support CommitTransaction."); + } + + // TODO: CSOT: Make it public when CSOT will be ready for GA and add default value to cancellationToken parameter. + void ICoreSessionInternal.CommitTransaction(CommitTransactionOptions options, CancellationToken cancellationToken) { throw new NotSupportedException("NoCoreSession does not support CommitTransaction."); } /// <inheritdoc /> - public Task CommitTransactionAsync(CancellationToken cancellationToken = default(CancellationToken)) + public Task CommitTransactionAsync(CancellationToken cancellationToken = default) + { + throw new NotSupportedException("NoCoreSession does not support CommitTransactionAsync."); + } + + // TODO: CSOT: Make it public when CSOT will be ready for GA and add default value to cancellationToken parameter. + Task ICoreSessionInternal.CommitTransactionAsync(CommitTransactionOptions options, CancellationToken cancellationToken) { throw new NotSupportedException("NoCoreSession does not support CommitTransactionAsync."); } diff --git a/src/MongoDB.Driver/Core/Bindings/ReadBindingHandle.cs b/src/MongoDB.Driver/Core/Bindings/ReadBindingHandle.cs index 63605e4b6c3..99dfa24212d 100644 --- a/src/MongoDB.Driver/Core/Bindings/ReadBindingHandle.cs +++ b/src/MongoDB.Driver/Core/Bindings/ReadBindingHandle.cs @@ -15,7 +15,6 @@ using System; using System.Collections.Generic; -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.Servers; @@ -47,28 +46,28 @@ public ICoreSessionHandle Session get { return _reference.Instance.Session; } } - public IChannelSourceHandle GetReadChannelSource(CancellationToken cancellationToken) + public IChannelSourceHandle GetReadChannelSource(OperationContext operationContext) { ThrowIfDisposed(); - return _reference.Instance.GetReadChannelSource(cancellationToken); + return _reference.Instance.GetReadChannelSource(operationContext); } - public Task<IChannelSourceHandle> GetReadChannelSourceAsync(CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext) { ThrowIfDisposed(); - return _reference.Instance.GetReadChannelSourceAsync(cancellationToken); + return _reference.Instance.GetReadChannelSourceAsync(operationContext); } - public IChannelSourceHandle GetReadChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public IChannelSourceHandle GetReadChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { ThrowIfDisposed(); - return _reference.Instance.GetReadChannelSource(deprioritizedServers, cancellationToken); + return _reference.Instance.GetReadChannelSource(operationContext, deprioritizedServers); } - public Task<IChannelSourceHandle> GetReadChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { ThrowIfDisposed(); - return _reference.Instance.GetReadChannelSourceAsync(deprioritizedServers, cancellationToken); + return _reference.Instance.GetReadChannelSourceAsync(operationContext, deprioritizedServers); } public void Dispose() diff --git a/src/MongoDB.Driver/Core/Bindings/ReadPreferenceBinding.cs b/src/MongoDB.Driver/Core/Bindings/ReadPreferenceBinding.cs index 54cd2faf4a2..32106f0efcd 100644 --- a/src/MongoDB.Driver/Core/Bindings/ReadPreferenceBinding.cs +++ b/src/MongoDB.Driver/Core/Bindings/ReadPreferenceBinding.cs @@ -15,7 +15,6 @@ using System; using System.Collections.Generic; -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Clusters; using MongoDB.Driver.Core.Clusters.ServerSelectors; @@ -52,27 +51,27 @@ public ICoreSessionHandle Session get { return _session; } } - public IChannelSourceHandle GetReadChannelSource(CancellationToken cancellationToken) + public IChannelSourceHandle GetReadChannelSource(OperationContext operationContext) { - return GetReadChannelSource(null, cancellationToken); + return GetReadChannelSource(operationContext, null); } - public Task<IChannelSourceHandle> GetReadChannelSourceAsync(CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext) { - return GetReadChannelSourceAsync(null, cancellationToken); + return GetReadChannelSourceAsync(operationContext, null); } - public IChannelSourceHandle GetReadChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public IChannelSourceHandle GetReadChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { ThrowIfDisposed(); - var server = _cluster.SelectServerAndPinIfNeeded(_session, _serverSelector, deprioritizedServers, cancellationToken); + var server = _cluster.SelectServerAndPinIfNeeded(operationContext, _session, _serverSelector, deprioritizedServers); return GetChannelSourceHelper(server); } - public async Task<IChannelSourceHandle> GetReadChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public async Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { ThrowIfDisposed(); - var server = await _cluster.SelectServerAndPinIfNeededAsync(_session, _serverSelector, deprioritizedServers, cancellationToken).ConfigureAwait(false); + var server = await _cluster.SelectServerAndPinIfNeededAsync(operationContext, _session, _serverSelector, deprioritizedServers).ConfigureAwait(false); return GetChannelSourceHelper(server); } diff --git a/src/MongoDB.Driver/Core/Bindings/ReadWriteBindingHandle.cs b/src/MongoDB.Driver/Core/Bindings/ReadWriteBindingHandle.cs index 7a298b3af60..0409d3ae844 100644 --- a/src/MongoDB.Driver/Core/Bindings/ReadWriteBindingHandle.cs +++ b/src/MongoDB.Driver/Core/Bindings/ReadWriteBindingHandle.cs @@ -15,7 +15,6 @@ using System; using System.Collections.Generic; -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.Servers; @@ -47,76 +46,76 @@ public ICoreSessionHandle Session get { return _reference.Instance.Session; } } - public IChannelSourceHandle GetReadChannelSource(CancellationToken cancellationToken) + public IChannelSourceHandle GetReadChannelSource(OperationContext operationContext) { ThrowIfDisposed(); - return _reference.Instance.GetReadChannelSource(cancellationToken); + return _reference.Instance.GetReadChannelSource(operationContext); } - public Task<IChannelSourceHandle> GetReadChannelSourceAsync(CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext) { ThrowIfDisposed(); - return _reference.Instance.GetReadChannelSourceAsync(cancellationToken); + return _reference.Instance.GetReadChannelSourceAsync(operationContext); } - public IChannelSourceHandle GetReadChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public IChannelSourceHandle GetReadChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { ThrowIfDisposed(); - return _reference.Instance.GetReadChannelSource(deprioritizedServers, cancellationToken); + return _reference.Instance.GetReadChannelSource(operationContext, deprioritizedServers); } - public Task<IChannelSourceHandle> GetReadChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { ThrowIfDisposed(); - return _reference.Instance.GetReadChannelSourceAsync(deprioritizedServers, cancellationToken); + return _reference.Instance.GetReadChannelSourceAsync(operationContext, deprioritizedServers); } - public IChannelSourceHandle GetWriteChannelSource(CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext) { ThrowIfDisposed(); - return _reference.Instance.GetWriteChannelSource(cancellationToken); + return _reference.Instance.GetWriteChannelSource(operationContext); } - public IChannelSourceHandle GetWriteChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { ThrowIfDisposed(); - return _reference.Instance.GetWriteChannelSource(deprioritizedServers, cancellationToken); + return _reference.Instance.GetWriteChannelSource(operationContext, deprioritizedServers); } - public IChannelSourceHandle GetWriteChannelSource(IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext, IMayUseSecondaryCriteria mayUseSecondary) { ThrowIfDisposed(); - return _reference.Instance.GetWriteChannelSource(mayUseSecondary, cancellationToken); + return _reference.Instance.GetWriteChannelSource(operationContext, mayUseSecondary); } - public IChannelSourceHandle GetWriteChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary) { ThrowIfDisposed(); - return _reference.Instance.GetWriteChannelSource(deprioritizedServers, mayUseSecondary, cancellationToken); + return _reference.Instance.GetWriteChannelSource(operationContext, deprioritizedServers, mayUseSecondary); } - public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext) { ThrowIfDisposed(); - return _reference.Instance.GetWriteChannelSourceAsync(cancellationToken); + return _reference.Instance.GetWriteChannelSourceAsync(operationContext); } - public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { ThrowIfDisposed(); - return _reference.Instance.GetWriteChannelSourceAsync(deprioritizedServers, cancellationToken); + return _reference.Instance.GetWriteChannelSourceAsync(operationContext, deprioritizedServers); } - public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext, IMayUseSecondaryCriteria mayUseSecondary) { ThrowIfDisposed(); - return _reference.Instance.GetWriteChannelSourceAsync(mayUseSecondary, cancellationToken); + return _reference.Instance.GetWriteChannelSourceAsync(operationContext, mayUseSecondary); } - public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary) { ThrowIfDisposed(); - return _reference.Instance.GetWriteChannelSourceAsync(deprioritizedServers, mayUseSecondary, cancellationToken); + return _reference.Instance.GetWriteChannelSourceAsync(operationContext, deprioritizedServers, mayUseSecondary); } public void Dispose() diff --git a/src/MongoDB.Driver/Core/Bindings/ServerChannelSource.cs b/src/MongoDB.Driver/Core/Bindings/ServerChannelSource.cs index a4cadb0001f..bd8080404ca 100644 --- a/src/MongoDB.Driver/Core/Bindings/ServerChannelSource.cs +++ b/src/MongoDB.Driver/Core/Bindings/ServerChannelSource.cs @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.Servers; @@ -36,20 +35,11 @@ public ServerChannelSource(IServer server, ICoreSessionHandle session) } // properties - public IServer Server - { - get { return _server; } - } + public IServer Server => _server; - public ServerDescription ServerDescription - { - get { return _server.Description; } - } + public ServerDescription ServerDescription => _server.Description; - public ICoreSessionHandle Session - { - get { return _session; } - } + public ICoreSessionHandle Session => _session; // methods public void Dispose() @@ -61,16 +51,16 @@ public void Dispose() } } - public IChannelHandle GetChannel(CancellationToken cancellationToken) + public IChannelHandle GetChannel(OperationContext operationContext) { ThrowIfDisposed(); - return _server.GetChannel(cancellationToken); + return _server.GetChannel(operationContext); } - public Task<IChannelHandle> GetChannelAsync(CancellationToken cancellationToken) + public Task<IChannelHandle> GetChannelAsync(OperationContext operationContext) { ThrowIfDisposed(); - return _server.GetChannelAsync(cancellationToken); + return _server.GetChannelAsync(operationContext); } private void ThrowIfDisposed() diff --git a/src/MongoDB.Driver/Core/Bindings/SingleServerReadBinding.cs b/src/MongoDB.Driver/Core/Bindings/SingleServerReadBinding.cs index 6bd5b858021..04a65fbd4b3 100644 --- a/src/MongoDB.Driver/Core/Bindings/SingleServerReadBinding.cs +++ b/src/MongoDB.Driver/Core/Bindings/SingleServerReadBinding.cs @@ -48,26 +48,26 @@ public ICoreSessionHandle Session get { return _session; } } - public IChannelSourceHandle GetReadChannelSource(CancellationToken cancellationToken) + public IChannelSourceHandle GetReadChannelSource(OperationContext operationContext) { ThrowIfDisposed(); return GetChannelSourceHelper(); } - public Task<IChannelSourceHandle> GetReadChannelSourceAsync(CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext) { ThrowIfDisposed(); return Task.FromResult(GetChannelSourceHelper()); } - public IChannelSourceHandle GetReadChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public IChannelSourceHandle GetReadChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { - return GetReadChannelSource(cancellationToken); + return GetReadChannelSource(operationContext); } - public Task<IChannelSourceHandle> GetReadChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { - return GetReadChannelSourceAsync(cancellationToken); + return GetReadChannelSourceAsync(operationContext); } public void Dispose() diff --git a/src/MongoDB.Driver/Core/Bindings/SingleServerReadWriteBinding.cs b/src/MongoDB.Driver/Core/Bindings/SingleServerReadWriteBinding.cs index 4f7756a1d7d..5113baa09c0 100644 --- a/src/MongoDB.Driver/Core/Bindings/SingleServerReadWriteBinding.cs +++ b/src/MongoDB.Driver/Core/Bindings/SingleServerReadWriteBinding.cs @@ -15,7 +15,6 @@ using System; using System.Collections.Generic; -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.Servers; @@ -53,68 +52,68 @@ public void Dispose() } } - public IChannelSourceHandle GetReadChannelSource(CancellationToken cancellationToken) + public IChannelSourceHandle GetReadChannelSource(OperationContext operationContext) { ThrowIfDisposed(); return GetChannelSourceHelper(); } - public Task<IChannelSourceHandle> GetReadChannelSourceAsync(CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext) { ThrowIfDisposed(); return Task.FromResult(GetChannelSourceHelper()); } - public IChannelSourceHandle GetReadChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public IChannelSourceHandle GetReadChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { - return GetReadChannelSource(cancellationToken); + return GetReadChannelSource(operationContext); } - public Task<IChannelSourceHandle> GetReadChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { - return GetReadChannelSourceAsync(cancellationToken); + return GetReadChannelSourceAsync(operationContext); } - public IChannelSourceHandle GetWriteChannelSource(CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext) { ThrowIfDisposed(); return GetChannelSourceHelper(); } - public IChannelSourceHandle GetWriteChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { - return GetWriteChannelSource(cancellationToken); + return GetWriteChannelSource(operationContext); } - public IChannelSourceHandle GetWriteChannelSource(IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext, IMayUseSecondaryCriteria mayUseSecondary) { - return GetWriteChannelSource(cancellationToken); // ignore mayUseSecondary + return GetWriteChannelSource(operationContext); // ignore mayUseSecondary } - public IChannelSourceHandle GetWriteChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary) { - return GetWriteChannelSource(mayUseSecondary, cancellationToken); + return GetWriteChannelSource(operationContext, mayUseSecondary); } - public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext) { ThrowIfDisposed(); return Task.FromResult(GetChannelSourceHelper()); } - public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { - return GetWriteChannelSourceAsync(cancellationToken); + return GetWriteChannelSourceAsync(operationContext); } - public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext, IMayUseSecondaryCriteria mayUseSecondary) { - return GetWriteChannelSourceAsync(cancellationToken); // ignore mayUseSecondary + return GetWriteChannelSourceAsync(operationContext); // ignore mayUseSecondary } - public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary) { - return GetWriteChannelSourceAsync(mayUseSecondary, cancellationToken); + return GetWriteChannelSourceAsync(operationContext, mayUseSecondary); } private IChannelSourceHandle GetChannelSourceHelper() diff --git a/src/MongoDB.Driver/Core/Bindings/WrappingCoreSession.cs b/src/MongoDB.Driver/Core/Bindings/WrappingCoreSession.cs index 991e46ab115..1d61b552d9d 100644 --- a/src/MongoDB.Driver/Core/Bindings/WrappingCoreSession.cs +++ b/src/MongoDB.Driver/Core/Bindings/WrappingCoreSession.cs @@ -25,7 +25,7 @@ namespace MongoDB.Driver.Core.Bindings /// An abstract base class for a core session that wraps another core session. /// </summary> /// <seealso cref="MongoDB.Driver.Core.Bindings.ICoreSession" /> - public abstract class WrappingCoreSession : ICoreSession + public abstract class WrappingCoreSession : ICoreSession, ICoreSessionInternal { // private fields private bool _disposed; @@ -182,19 +182,33 @@ public ICoreSession Wrapped // public methods /// <inheritdoc /> - public virtual void AbortTransaction(CancellationToken cancellationToken = default(CancellationToken)) + public virtual void AbortTransaction(CancellationToken cancellationToken = default) { ThrowIfDisposed(); _wrapped.AbortTransaction(cancellationToken); } + // TODO: CSOT: Make it public when CSOT will be ready for GA and add default value to cancellationToken parameter. + void ICoreSessionInternal.AbortTransaction(AbortTransactionOptions options, CancellationToken cancellationToken) + { + ThrowIfDisposed(); + _wrapped.AbortTransaction(options, cancellationToken); + } + /// <inheritdoc /> - public virtual Task AbortTransactionAsync(CancellationToken cancellationToken = default(CancellationToken)) + public virtual Task AbortTransactionAsync(CancellationToken cancellationToken = default) { ThrowIfDisposed(); return _wrapped.AbortTransactionAsync(cancellationToken); } + // TODO: CSOT: Make it public when CSOT will be ready for GA and add default value to cancellationToken parameter. + Task ICoreSessionInternal.AbortTransactionAsync(AbortTransactionOptions options, CancellationToken cancellationToken) + { + ThrowIfDisposed(); + return _wrapped.AbortTransactionAsync(options, cancellationToken); + } + /// <inheritdoc /> public virtual void AboutToSendCommand() { @@ -223,19 +237,34 @@ public long AdvanceTransactionNumber() } /// <inheritdoc /> - public virtual void CommitTransaction(CancellationToken cancellationToken = default(CancellationToken)) + public virtual void CommitTransaction(CancellationToken cancellationToken = default) { ThrowIfDisposed(); _wrapped.CommitTransaction(cancellationToken); } + + // TODO: CSOT: Make it public when CSOT will be ready for GA and add default value to cancellationToken parameter. + void ICoreSessionInternal.CommitTransaction(CommitTransactionOptions options, CancellationToken cancellationToken) + { + ThrowIfDisposed(); + _wrapped.CommitTransaction(options, cancellationToken); + } + /// <inheritdoc /> - public virtual Task CommitTransactionAsync(CancellationToken cancellationToken = default(CancellationToken)) + public virtual Task CommitTransactionAsync(CancellationToken cancellationToken = default) { ThrowIfDisposed(); return _wrapped.CommitTransactionAsync(cancellationToken); } + // TODO: CSOT: Make it public when CSOT will be ready for GA and add default value to cancellationToken parameter. + Task ICoreSessionInternal.CommitTransactionAsync(CommitTransactionOptions options, CancellationToken cancellationToken) + { + ThrowIfDisposed(); + return _wrapped.CommitTransactionAsync(options, cancellationToken); + } + /// <inheritdoc /> public void Dispose() { diff --git a/src/MongoDB.Driver/Core/Bindings/WritableServerBinding.cs b/src/MongoDB.Driver/Core/Bindings/WritableServerBinding.cs index 27ea948577e..764bdc0e0ae 100644 --- a/src/MongoDB.Driver/Core/Bindings/WritableServerBinding.cs +++ b/src/MongoDB.Driver/Core/Bindings/WritableServerBinding.cs @@ -15,7 +15,6 @@ using System; using System.Collections.Generic; -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Clusters; using MongoDB.Driver.Core.Clusters.ServerSelectors; @@ -48,48 +47,48 @@ public ICoreSessionHandle Session get { return _session; } } - public IChannelSourceHandle GetReadChannelSource(CancellationToken cancellationToken) + public IChannelSourceHandle GetReadChannelSource(OperationContext operationContext) { - return GetReadChannelSource(null, cancellationToken); + return GetReadChannelSource(operationContext, null); } - public Task<IChannelSourceHandle> GetReadChannelSourceAsync(CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext) { - return GetReadChannelSourceAsync(null, cancellationToken); + return GetReadChannelSourceAsync(operationContext, null); } - public IChannelSourceHandle GetReadChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public IChannelSourceHandle GetReadChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { ThrowIfDisposed(); - var server = _cluster.SelectServerAndPinIfNeeded(_session, WritableServerSelector.Instance, deprioritizedServers, cancellationToken); + var server = _cluster.SelectServerAndPinIfNeeded(operationContext, _session, WritableServerSelector.Instance, deprioritizedServers); return CreateServerChannelSource(server); } - public async Task<IChannelSourceHandle> GetReadChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public async Task<IChannelSourceHandle> GetReadChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { ThrowIfDisposed(); - var server = await _cluster.SelectServerAndPinIfNeededAsync(_session, WritableServerSelector.Instance, deprioritizedServers, cancellationToken).ConfigureAwait(false); + var server = await _cluster.SelectServerAndPinIfNeededAsync(operationContext, _session, WritableServerSelector.Instance, deprioritizedServers).ConfigureAwait(false); return CreateServerChannelSource(server); } - public IChannelSourceHandle GetWriteChannelSource(CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext) { - return GetWriteChannelSource(deprioritizedServers: null, cancellationToken); + return GetWriteChannelSource(operationContext, deprioritizedServers: null); } - public IChannelSourceHandle GetWriteChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { ThrowIfDisposed(); - var server = _cluster.SelectServerAndPinIfNeeded(_session, WritableServerSelector.Instance, deprioritizedServers, cancellationToken); + var server = _cluster.SelectServerAndPinIfNeeded(operationContext, _session, WritableServerSelector.Instance, deprioritizedServers); return CreateServerChannelSource(server); } - public IChannelSourceHandle GetWriteChannelSource(IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext, IMayUseSecondaryCriteria mayUseSecondary) { - return GetWriteChannelSource(null, mayUseSecondary, cancellationToken); + return GetWriteChannelSource(operationContext, null, mayUseSecondary); } - public IChannelSourceHandle GetWriteChannelSource(IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public IChannelSourceHandle GetWriteChannelSource(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary) { if (IsSessionPinnedToServer()) { @@ -102,28 +101,28 @@ public IChannelSourceHandle GetWriteChannelSource(IReadOnlyCollection<ServerDesc ? (IServerSelector)new CompositeServerSelector(new IServerSelector[] { new PriorityServerSelector(deprioritizedServers), writableServerSelector }) : writableServerSelector; - var server = _cluster.SelectServer(selector, cancellationToken); + var server = _cluster.SelectServer(operationContext, selector); return CreateServerChannelSource(server); } - public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext) { - return GetWriteChannelSourceAsync(deprioritizedServers: null, cancellationToken); + return GetWriteChannelSourceAsync(operationContext, deprioritizedServers: null); } - public async Task<IChannelSourceHandle> GetWriteChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, CancellationToken cancellationToken) + public async Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { ThrowIfDisposed(); - var server = await _cluster.SelectServerAndPinIfNeededAsync(_session, WritableServerSelector.Instance, deprioritizedServers, cancellationToken).ConfigureAwait(false); + var server = await _cluster.SelectServerAndPinIfNeededAsync(operationContext, _session, WritableServerSelector.Instance, deprioritizedServers).ConfigureAwait(false); return CreateServerChannelSource(server); } - public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext, IMayUseSecondaryCriteria mayUseSecondary) { - return GetWriteChannelSourceAsync(null, mayUseSecondary, cancellationToken); + return GetWriteChannelSourceAsync(operationContext, null, mayUseSecondary); } - public async Task<IChannelSourceHandle> GetWriteChannelSourceAsync(IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary, CancellationToken cancellationToken) + public async Task<IChannelSourceHandle> GetWriteChannelSourceAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers, IMayUseSecondaryCriteria mayUseSecondary) { if (IsSessionPinnedToServer()) { @@ -136,7 +135,7 @@ public async Task<IChannelSourceHandle> GetWriteChannelSourceAsync(IReadOnlyColl ? new CompositeServerSelector(new IServerSelector[] { new PriorityServerSelector(deprioritizedServers), writableServerSelector }) : writableServerSelector; - var server = await _cluster.SelectServerAsync(selector, cancellationToken).ConfigureAwait(false); + var server = await _cluster.SelectServerAsync(operationContext, selector).ConfigureAwait(false); return CreateServerChannelSource(server); } diff --git a/src/MongoDB.Driver/Core/Clusters/Cluster.cs b/src/MongoDB.Driver/Core/Clusters/Cluster.cs index f1031387530..db14eed8b19 100644 --- a/src/MongoDB.Driver/Core/Clusters/Cluster.cs +++ b/src/MongoDB.Driver/Core/Clusters/Cluster.cs @@ -36,7 +36,6 @@ internal abstract class Cluster : IClusterInternal #region static private static readonly TimeSpan __minHeartbeatIntervalDefault = TimeSpan.FromMilliseconds(500); - private static readonly IServerSelector __randomServerSelector = new RandomServerSelector(); public static SemanticVersion MinSupportedServerVersion { get; } = WireVersion.ToServerVersion(WireVersion.SupportedWireVersionRange.Min); public static Range<int> SupportedWireVersionRange { get; } = WireVersion.SupportedWireVersionRange; @@ -46,18 +45,15 @@ internal abstract class Cluster : IClusterInternal private readonly TimeSpan _minHeartbeatInterval = __minHeartbeatIntervalDefault; private readonly IClusterClock _clusterClock = new ClusterClock(); private readonly ClusterId _clusterId; - private ClusterDescriptionChangeSource _descriptionWithChangedTaskCompletionSource; + private ExpirableClusterDescription _expirableClusterDescription; private readonly LatencyLimitingServerSelector _latencyLimitingServerSelector; protected readonly EventLogger<LogCategories.SDAM> _clusterEventLogger; protected readonly EventLogger<LogCategories.ServerSelection> _serverSelectionEventLogger; - private Timer _rapidHeartbeatTimer; - private readonly object _serverSelectionWaitQueueLock = new object(); - private int _serverSelectionWaitQueueSize; private readonly IClusterableServerFactory _serverFactory; + private readonly ServerSelectionWaitQueue _serverSelectionWaitQueue; private readonly ICoreServerSessionPool _serverSessionPool; private readonly ClusterSettings _settings; private readonly InterlockedInt32 _state; - private readonly InterlockedInt32 _rapidHeartbeatTimerCallbackState; // constructors protected Cluster(ClusterSettings settings, IClusterableServerFactory serverFactory, IEventSubscriber eventSubscriber, ILoggerFactory loggerFactory) @@ -67,15 +63,11 @@ protected Cluster(ClusterSettings settings, IClusterableServerFactory serverFact _serverFactory = Ensure.IsNotNull(serverFactory, nameof(serverFactory)); Ensure.IsNotNull(eventSubscriber, nameof(eventSubscriber)); _state = new InterlockedInt32(State.Initial); - _rapidHeartbeatTimerCallbackState = new InterlockedInt32(RapidHeartbeatTimerCallbackState.NotRunning); _clusterId = new ClusterId(); - _descriptionWithChangedTaskCompletionSource = new (ClusterDescription.CreateInitial(_clusterId, _settings.DirectConnection)); + _expirableClusterDescription = new (this, ClusterDescription.CreateInitial(_clusterId, _settings.DirectConnection)); _latencyLimitingServerSelector = new LatencyLimitingServerSelector(settings.LocalThreshold); - - _rapidHeartbeatTimer = new Timer(RapidHeartbeatTimerCallback, null, Timeout.InfiniteTimeSpan, Timeout.InfiniteTimeSpan); - + _serverSelectionWaitQueue = new ServerSelectionWaitQueue(this); _serverSessionPool = new CoreServerSessionPool(this); - _clusterEventLogger = loggerFactory.CreateEventLogger<LogCategories.SDAM>(eventSubscriber); _serverSelectionEventLogger = loggerFactory.CreateEventLogger<LogCategories.ServerSelection>(eventSubscriber); } @@ -93,10 +85,12 @@ public ClusterDescription Description { get { - return _descriptionWithChangedTaskCompletionSource.ClusterDescription; + return _expirableClusterDescription.ClusterDescription; } } + public abstract IEnumerable<IClusterableServer> Servers { get; } + public ClusterSettings Settings { get { return _settings; } @@ -127,53 +121,19 @@ protected virtual void Dispose(bool disposing) var newClusterDescription = new ClusterDescription( _clusterId, - _descriptionWithChangedTaskCompletionSource.ClusterDescription.DirectConnection, + _expirableClusterDescription.ClusterDescription.DirectConnection, dnsMonitorException: null, ClusterType.Unknown, Enumerable.Empty<ServerDescription>()); UpdateClusterDescription(newClusterDescription); - _rapidHeartbeatTimer.Dispose(); + _serverSelectionWaitQueue.Dispose(); _clusterEventLogger.Logger?.LogTrace(_clusterId, "Cluster disposed"); } } - private void EnterServerSelectionWaitQueue(IServerSelector selector, ClusterDescription clusterDescription, long? operationId, TimeSpan remainingTime) - { - lock (_serverSelectionWaitQueueLock) - { - if (_serverSelectionWaitQueueSize >= _settings.MaxServerSelectionWaitQueueSize) - { - throw MongoWaitQueueFullException.ForServerSelection(); - } - - if (++_serverSelectionWaitQueueSize == 1) - { - _rapidHeartbeatTimer.Change(TimeSpan.Zero, _minHeartbeatInterval); - } - - _serverSelectionEventLogger.LogAndPublish(new ClusterEnteredSelectionQueueEvent( - clusterDescription, - selector, - operationId, - EventContext.OperationName, - remainingTime)); - } - } - - private void ExitServerSelectionWaitQueue() - { - lock (_serverSelectionWaitQueueLock) - { - if (--_serverSelectionWaitQueueSize == 0) - { - _rapidHeartbeatTimer.Change(Timeout.InfiniteTimeSpan, Timeout.InfiniteTimeSpan); - } - } - } - public virtual void Initialize() { ThrowIfDisposed(); @@ -183,28 +143,6 @@ public virtual void Initialize() } } - private void RapidHeartbeatTimerCallback(object args) - { - // avoid requesting heartbeat reentrantly - if (_rapidHeartbeatTimerCallbackState.TryChange(RapidHeartbeatTimerCallbackState.NotRunning, RapidHeartbeatTimerCallbackState.Running)) - { - try - { - RequestHeartbeat(); - } - catch - { - // TODO: Trace this - // If we don't protect this call, we could - // take down the app domain. - } - finally - { - _rapidHeartbeatTimerCallbackState.TryChange(RapidHeartbeatTimerCallbackState.NotRunning); - } - } - } - protected abstract void RequestHeartbeat(); protected void OnDescriptionChanged(ClusterDescription oldDescription, ClusterDescription newDescription, bool shouldClusterDescriptionChangedEventBePublished) @@ -217,62 +155,80 @@ protected void OnDescriptionChanged(ClusterDescription oldDescription, ClusterDe DescriptionChanged?.Invoke(this, new ClusterDescriptionChangedEventArgs(oldDescription, newDescription)); } - public IServer SelectServer(IServerSelector selector, CancellationToken cancellationToken) + public IServer SelectServer(OperationContext operationContext, IServerSelector selector) { - ThrowIfDisposedOrNotOpen(); Ensure.IsNotNull(selector, nameof(selector)); + Ensure.IsNotNull(operationContext, nameof(operationContext)); + ThrowIfDisposedOrNotOpen(); - using (var helper = new SelectServerHelper(this, selector)) + using var serverSelectionOperationContext = operationContext.WithTimeout(Settings.ServerSelectionTimeout); + var expirableClusterDescription = _expirableClusterDescription; + IDisposable serverSelectionWaitQueueDisposer = null; + (selector, var operationCountSelector, var stopwatch) = BeginServerSelection(expirableClusterDescription.ClusterDescription, selector); + + try { - try + while (true) { - while (true) + var server = SelectServer(expirableClusterDescription, selector, operationCountSelector); + if (server != null) { - var server = helper.SelectServer(); - if (server != null) - { - return server; - } - - helper.WaitingForDescriptionToChange(); - WaitForDescriptionChanged(helper.Selector, helper.Description, helper.DescriptionChangedTask, helper.TimeoutRemaining, cancellationToken); + EndServerSelection(expirableClusterDescription.ClusterDescription, selector, server.Description, stopwatch); + return server; } + + serverSelectionWaitQueueDisposer ??= _serverSelectionWaitQueue.Enter(serverSelectionOperationContext, selector, expirableClusterDescription.ClusterDescription, EventContext.OperationId); + + serverSelectionOperationContext.WaitTask(expirableClusterDescription.Expired); + expirableClusterDescription = _expirableClusterDescription; } - catch (Exception ex) - { - helper.HandleException(ex); - throw; - } + } + catch (Exception ex) + { + throw HandleServerSelectionException(expirableClusterDescription.ClusterDescription, selector, ex, stopwatch); + } + finally + { + serverSelectionWaitQueueDisposer?.Dispose(); } } - public async Task<IServer> SelectServerAsync(IServerSelector selector, CancellationToken cancellationToken) + public async Task<IServer> SelectServerAsync(OperationContext operationContext, IServerSelector selector) { - ThrowIfDisposedOrNotOpen(); Ensure.IsNotNull(selector, nameof(selector)); + Ensure.IsNotNull(operationContext, nameof(operationContext)); + ThrowIfDisposedOrNotOpen(); + + using var serverSelectionOperationContext = operationContext.WithTimeout(Settings.ServerSelectionTimeout); + var expirableClusterDescription = _expirableClusterDescription; + IDisposable serverSelectionWaitQueueDisposer = null; + (selector, var operationCountSelector, var stopwatch) = BeginServerSelection(expirableClusterDescription.ClusterDescription, selector); - using (var helper = new SelectServerHelper(this, selector)) + try { - try + while (true) { - while (true) + var server = SelectServer(expirableClusterDescription, selector, operationCountSelector); + if (server != null) { - var server = helper.SelectServer(); - if (server != null) - { - return server; - } - - helper.WaitingForDescriptionToChange(); - await WaitForDescriptionChangedAsync(helper.Selector, helper.Description, helper.DescriptionChangedTask, helper.TimeoutRemaining, cancellationToken).ConfigureAwait(false); + EndServerSelection(expirableClusterDescription.ClusterDescription, selector, server.Description, stopwatch); + return server; } - } - catch (Exception ex) - { - helper.HandleException(ex); - throw; + + serverSelectionWaitQueueDisposer ??= _serverSelectionWaitQueue.Enter(serverSelectionOperationContext, selector, expirableClusterDescription.ClusterDescription, EventContext.OperationId); + + await serverSelectionOperationContext.WaitTaskAsync(expirableClusterDescription.Expired).ConfigureAwait(false); + expirableClusterDescription = _expirableClusterDescription; } } + catch (Exception ex) + { + throw HandleServerSelectionException(expirableClusterDescription.ClusterDescription, selector, ex, stopwatch); + } + finally + { + serverSelectionWaitQueueDisposer?.Dispose(); + } } public ICoreSessionHandle StartSession(CoreSessionOptions options) @@ -286,321 +242,292 @@ public ICoreSessionHandle StartSession(CoreSessionOptions options) protected void UpdateClusterDescription(ClusterDescription newClusterDescription, bool shouldClusterDescriptionChangedEventBePublished = true) { - var oldClusterDescription = Interlocked.Exchange(ref _descriptionWithChangedTaskCompletionSource, new(newClusterDescription)); - - OnDescriptionChanged(oldClusterDescription.ClusterDescription, newClusterDescription, shouldClusterDescriptionChangedEventBePublished); + var expiredClusterDescription = Interlocked.Exchange(ref _expirableClusterDescription, new(this, newClusterDescription)); - oldClusterDescription.TrySetChanged(); - } + OnDescriptionChanged(expiredClusterDescription.ClusterDescription, newClusterDescription, shouldClusterDescriptionChangedEventBePublished); - private string BuildTimeoutExceptionMessage(TimeSpan timeout, IServerSelector selector, ClusterDescription clusterDescription) - { - var ms = (int)Math.Round(timeout.TotalMilliseconds); - return string.Format( - "A timeout occurred after {0}ms selecting a server using {1}. Client view of cluster state is {2}.", - ms.ToString(), - selector.ToString(), - clusterDescription.ToString()); + expiredClusterDescription.TrySetExpired(); } - private void ThrowIfDisposed() + private (IServerSelector Selector, OperationsCountServerSelector OperationCountSelector, Stopwatch Stopwatch) BeginServerSelection(ClusterDescription clusterDescription, IServerSelector selector) { - if (_state.Value == State.Disposed) + _serverSelectionEventLogger.LogAndPublish(new ClusterSelectingServerEvent( + clusterDescription, + selector, + EventContext.OperationId, + EventContext.OperationName)); + + var allSelectors = new List<IServerSelector>(5); + if (Settings.PreServerSelector != null) { - throw new ObjectDisposedException(GetType().Name); + allSelectors.Add(Settings.PreServerSelector); } - } - private void ThrowIfDisposedOrNotOpen() - { - if (_state.Value != State.Open) + allSelectors.Add(selector); + if (Settings.PostServerSelector != null) { - ThrowIfDisposed(); - throw new InvalidOperationException("Server must be initialized."); + allSelectors.Add(Settings.PostServerSelector); } - } - private void WaitForDescriptionChanged(IServerSelector selector, ClusterDescription description, Task descriptionChangedTask, TimeSpan timeout, CancellationToken cancellationToken) - { - using (var helper = new WaitForDescriptionChangedHelper(this, selector, description, descriptionChangedTask, timeout, cancellationToken)) - { - var index = Task.WaitAny(helper.Tasks); - helper.HandleCompletedTask(helper.Tasks[index]); - } - } + allSelectors.Add(_latencyLimitingServerSelector); + var operationCountSelector = new OperationsCountServerSelector(Array.Empty<IClusterableServer>()); + allSelectors.Add(operationCountSelector); - private async Task WaitForDescriptionChangedAsync(IServerSelector selector, ClusterDescription description, Task descriptionChangedTask, TimeSpan timeout, CancellationToken cancellationToken) - { - using (var helper = new WaitForDescriptionChangedHelper(this, selector, description, descriptionChangedTask, timeout, cancellationToken)) - { - var completedTask = await Task.WhenAny(helper.Tasks).ConfigureAwait(false); - helper.HandleCompletedTask(completedTask); - } + return (new CompositeServerSelector(allSelectors), operationCountSelector, Stopwatch.StartNew()); } - private void ThrowTimeoutException(IServerSelector selector, ClusterDescription description) + private void EndServerSelection(ClusterDescription clusterDescription, IServerSelector selector, ServerDescription selectedServerDescription, Stopwatch stopwatch) { - var message = BuildTimeoutExceptionMessage(_settings.ServerSelectionTimeout, selector, description); - throw new TimeoutException(message); + stopwatch.Stop(); + _serverSelectionEventLogger.LogAndPublish(new ClusterSelectedServerEvent( + clusterDescription, + selector, + selectedServerDescription, + stopwatch.Elapsed, + EventContext.OperationId, + EventContext.OperationName)); } - // nested classes - internal sealed class ClusterDescriptionChangeSource + private Exception HandleServerSelectionException(ClusterDescription clusterDescription, IServerSelector selector, Exception exception, Stopwatch stopwatch) { - private readonly TaskCompletionSource<bool> _changedTaskCompletionSource; - private readonly ClusterDescription _clusterDescription; + stopwatch.Stop(); - public ClusterDescriptionChangeSource(ClusterDescription clusterDescription) + if (exception is TimeoutException) { - _changedTaskCompletionSource = new TaskCompletionSource<bool>(TaskCreationOptions.RunContinuationsAsynchronously); - _clusterDescription = clusterDescription; + var message = $"A timeout occurred after {stopwatch.ElapsedMilliseconds}ms selecting a server using {selector}. Client view of cluster state is {clusterDescription}."; + exception = new TimeoutException(message); } - public ClusterDescription ClusterDescription => _clusterDescription; - - public Task Changed => _changedTaskCompletionSource.Task; + _serverSelectionEventLogger.LogAndPublish(new ClusterSelectingServerFailedEvent( + clusterDescription, + selector, + exception, + EventContext.OperationId, + EventContext.OperationName)); - public bool TrySetChanged() - => _changedTaskCompletionSource.TrySetResult(true); + return exception; } - private class SelectServerHelper : IDisposable + private SelectedServer SelectServer(ExpirableClusterDescription clusterDescriptionChangeSource, IServerSelector selector, OperationsCountServerSelector operationCountSelector) { - private readonly Cluster _cluster; - private readonly List<IClusterableServer> _connectedServers; - private readonly List<ServerDescription> _connectedServerDescriptions; - private ClusterDescription _description; - private Task _descriptionChangedTask; - private bool _serverSelectionWaitQueueEntered; - private readonly IServerSelector _selector; - private readonly OperationsCountServerSelector _operationCountServerSelector; - private readonly Stopwatch _stopwatch; - private readonly DateTime _timeoutAt; - - public SelectServerHelper(Cluster cluster, IServerSelector selector) - { - _cluster = cluster; + MongoIncompatibleDriverException.ThrowIfNotSupported(clusterDescriptionChangeSource.ClusterDescription); - _connectedServers = new List<IClusterableServer>(_cluster._descriptionWithChangedTaskCompletionSource.ClusterDescription?.Servers?.Count ?? 1); - _connectedServerDescriptions = new List<ServerDescription>(_connectedServers.Count); - _operationCountServerSelector = new OperationsCountServerSelector(_connectedServers); + operationCountSelector.PopulateServers(clusterDescriptionChangeSource.ConnectedServers); + var selectedServerDescription = selector + .SelectServers(clusterDescriptionChangeSource.ClusterDescription, clusterDescriptionChangeSource.ConnectedServerDescriptions) + .SingleOrDefault(); - _selector = DecorateSelector(selector); - _stopwatch = Stopwatch.StartNew(); - _timeoutAt = DateTime.UtcNow + _cluster.Settings.ServerSelectionTimeout; - } - - public ClusterDescription Description + if (selectedServerDescription != null) { - get { return _description; } + var selectedServer = clusterDescriptionChangeSource.ConnectedServers.FirstOrDefault(s => EndPointHelper.Equals(s.EndPoint, selectedServerDescription.EndPoint)); + if (selectedServer != null) + { + return new(selectedServer, selectedServerDescription); + } } - public Task DescriptionChangedTask + return default; + } + + private void ThrowIfDisposed() + { + if (_state.Value == State.Disposed) { - get { return _descriptionChangedTask; } + throw new ObjectDisposedException(GetType().Name); } + } - public IServerSelector Selector + private void ThrowIfDisposedOrNotOpen() + { + if (_state.Value != State.Open) { - get { return _selector; } + ThrowIfDisposed(); + throw new InvalidOperationException("Server must be initialized."); } + } - public TimeSpan TimeoutRemaining + // nested classes + internal sealed class ExpirableClusterDescription + { + private readonly Cluster _cluster; + private readonly TaskCompletionSource<bool> _expireCompletionSource; + private readonly ClusterDescription _clusterDescription; + private readonly object _connectedServersLock = new(); + private IReadOnlyList<IClusterableServer> _connectedServers; + private IReadOnlyList<ServerDescription> _connectedServerDescriptions; + + public ExpirableClusterDescription(Cluster cluster, ClusterDescription clusterDescription) { - get { return _timeoutAt - DateTime.UtcNow; } + _cluster = cluster; + _clusterDescription = clusterDescription; + _expireCompletionSource = new TaskCompletionSource<bool>(TaskCreationOptions.RunContinuationsAsynchronously); } - public void Dispose() + public ClusterDescription ClusterDescription => _clusterDescription; + + public Task Expired => _expireCompletionSource.Task; + + public IReadOnlyList<IClusterableServer> ConnectedServers { - if (_serverSelectionWaitQueueEntered) + get { - _cluster.ExitServerSelectionWaitQueue(); + EnsureConnectedServersInitialized(); + return _connectedServers; } } - public void HandleException(Exception exception) + public IReadOnlyList<ServerDescription> ConnectedServerDescriptions { - _cluster._serverSelectionEventLogger.LogAndPublish(new ClusterSelectingServerFailedEvent( - _description, - _selector, - exception, - EventContext.OperationId, - EventContext.OperationName)); + get + { + EnsureConnectedServersInitialized(); + return _connectedServerDescriptions; + } } - public IServer SelectServer() - { - var clusterDescription = _cluster._descriptionWithChangedTaskCompletionSource; - _descriptionChangedTask = clusterDescription.Changed; - _description = clusterDescription.ClusterDescription; + public bool TrySetExpired() + => _expireCompletionSource.TrySetResult(true); - if (!_serverSelectionWaitQueueEntered) + private void EnsureConnectedServersInitialized() + { + if (_connectedServers != null) { - // this is our first time through... - _cluster._serverSelectionEventLogger.LogAndPublish(new ClusterSelectingServerEvent( - _description, - _selector, - EventContext.OperationId, - EventContext.OperationName)); + return; } - MongoIncompatibleDriverException.ThrowIfNotSupported(_description); + lock (_connectedServersLock) + { + if (_connectedServers != null) + { + return; + } - _connectedServers.Clear(); - _connectedServerDescriptions.Clear(); + var connectedServerDescriptions = new List<ServerDescription>(ClusterDescription.Servers?.Count ?? 1); + var connectedServers = new List<IClusterableServer>(connectedServerDescriptions.Capacity); - foreach (var description in _description.Servers) - { - if (description.State == ServerState.Connected && - _cluster.TryGetServer(description.EndPoint, out var server)) + if (ClusterDescription.Servers != null) { - _connectedServers.Add(server); - _connectedServerDescriptions.Add(description); + foreach (var description in ClusterDescription.Servers) + { + if (description.State == ServerState.Connected && + _cluster.TryGetServer(description.EndPoint, out var server)) + { + connectedServers.Add(server); + connectedServerDescriptions.Add(description); + } + } } - } - var selectedServersDescriptions = _selector - .SelectServers(_description, _connectedServerDescriptions) - .ToList(); + _connectedServerDescriptions = connectedServerDescriptions; + _connectedServers = connectedServers; + } + } + } - IServer selectedServer = null; + private static class State + { + public const int Initial = 0; + public const int Open = 1; + public const int Disposed = 2; + } - if (selectedServersDescriptions.Count > 0) - { - var selectedServerDescription = selectedServersDescriptions.Count == 1 - ? selectedServersDescriptions[0] - : __randomServerSelector.SelectServers(_description, selectedServersDescriptions).Single(); + private static class RapidHeartbeatTimerCallbackState + { + public const int NotRunning = 0; + public const int Running = 1; + } - selectedServer = _connectedServers.FirstOrDefault(s => EndPointHelper.Equals(s.EndPoint, selectedServerDescription.EndPoint)); - } + private sealed class ServerSelectionWaitQueue : IDisposable + { + private readonly Cluster _cluster; + private readonly object _serverSelectionWaitQueueLock = new(); + private readonly Timer _rapidHeartbeatTimer; + private readonly InterlockedInt32 _rapidHeartbeatTimerCallbackState; - if (selectedServer != null) - { - _stopwatch.Stop(); - - _cluster._serverSelectionEventLogger.LogAndPublish(new ClusterSelectedServerEvent( - _description, - _selector, - selectedServer.Description, - _stopwatch.Elapsed, - EventContext.OperationId, - EventContext.OperationName)); - } + private int _serverSelectionWaitQueueSize; - return selectedServer; + public ServerSelectionWaitQueue(Cluster cluster) + { + _cluster = cluster; + _rapidHeartbeatTimerCallbackState = new InterlockedInt32(RapidHeartbeatTimerCallbackState.NotRunning); + _rapidHeartbeatTimer = new Timer(RapidHeartbeatTimerCallback, null, Timeout.InfiniteTimeSpan, Timeout.InfiniteTimeSpan); } - public void WaitingForDescriptionToChange() + public void Dispose() { - if (!_serverSelectionWaitQueueEntered) - { - _cluster.EnterServerSelectionWaitQueue(_selector, _description, EventContext.OperationId, _timeoutAt - DateTime.UtcNow); - _serverSelectionWaitQueueEntered = true; - } - - var timeoutRemaining = _timeoutAt - DateTime.UtcNow; - if (timeoutRemaining <= TimeSpan.Zero) - { - _cluster.ThrowTimeoutException(_selector, _description); - } + _rapidHeartbeatTimer.Dispose(); } - private IServerSelector DecorateSelector(IServerSelector selector) + public IDisposable Enter(OperationContext operationContext, IServerSelector selector, ClusterDescription clusterDescription, long? operationId) { - var settings = _cluster.Settings; - var allSelectors = new List<IServerSelector>(); - - if (settings.PreServerSelector != null) + lock (_serverSelectionWaitQueueLock) { - allSelectors.Add(settings.PreServerSelector); - } + if (_serverSelectionWaitQueueSize >= _cluster._settings.MaxServerSelectionWaitQueueSize) + { + throw MongoWaitQueueFullException.ForServerSelection(); + } - allSelectors.Add(selector); + if (++_serverSelectionWaitQueueSize == 1) + { + _rapidHeartbeatTimer.Change(TimeSpan.Zero, _cluster._minHeartbeatInterval); + } - if (settings.PostServerSelector != null) - { - allSelectors.Add(settings.PostServerSelector); + _cluster._serverSelectionEventLogger.LogAndPublish(new ClusterEnteredSelectionQueueEvent( + clusterDescription, + selector, + operationId, + EventContext.OperationName, + operationContext.RemainingTimeout)); } - allSelectors.Add(_cluster._latencyLimitingServerSelector); - allSelectors.Add(_operationCountServerSelector); - - return new CompositeServerSelector(allSelectors); + return new ServerSelectionQueueDisposer(this); } - } - private sealed class WaitForDescriptionChangedHelper : IDisposable - { - private readonly CancellationToken _cancellationToken; - private readonly TaskCompletionSource<bool> _cancellationTaskCompletionSource; - private readonly CancellationTokenRegistration _cancellationTokenRegistration; - private readonly Cluster _cluster; - private readonly ClusterDescription _description; - private readonly Task _descriptionChangedTask; - private readonly IServerSelector _selector; - private readonly CancellationTokenSource _timeoutCancellationTokenSource; - private readonly Task _timeoutTask; - - public WaitForDescriptionChangedHelper(Cluster cluster, IServerSelector selector, ClusterDescription description, Task descriptionChangedTask, TimeSpan timeout, CancellationToken cancellationToken) - { - _cluster = cluster; - _description = description; - _selector = selector; - _descriptionChangedTask = descriptionChangedTask; - _cancellationToken = cancellationToken; - _cancellationTaskCompletionSource = new TaskCompletionSource<bool>(); - _cancellationTokenRegistration = cancellationToken.Register(() => _cancellationTaskCompletionSource.TrySetCanceled()); - _timeoutCancellationTokenSource = new CancellationTokenSource(); - _timeoutTask = Task.Delay(timeout, _timeoutCancellationTokenSource.Token); - } - - public Task[] Tasks + private void ExitServerSelectionWaitQueue() { - get + lock (_serverSelectionWaitQueueLock) { - return new Task[] + if (--_serverSelectionWaitQueueSize == 0) { - _descriptionChangedTask, - _timeoutTask, - _cancellationTaskCompletionSource.Task - }; + _rapidHeartbeatTimer.Change(Timeout.InfiniteTimeSpan, Timeout.InfiniteTimeSpan); + } } } - public void Dispose() + private void RapidHeartbeatTimerCallback(object args) { - _cancellationTokenRegistration.Dispose(); - _timeoutCancellationTokenSource.Dispose(); + // avoid requesting heartbeat reentrantly + if (_rapidHeartbeatTimerCallbackState.TryChange(RapidHeartbeatTimerCallbackState.NotRunning, RapidHeartbeatTimerCallbackState.Running)) + { + try + { + _cluster.RequestHeartbeat(); + } + catch + { + // TODO: Trace this + // If we don't protect this call, we could + // take down the app domain. + } + finally + { + _rapidHeartbeatTimerCallbackState.TryChange(RapidHeartbeatTimerCallbackState.NotRunning); + } + } } - public void HandleCompletedTask(Task completedTask) + private sealed class ServerSelectionQueueDisposer : IDisposable { - if (completedTask == _timeoutTask) - { - _cluster.ThrowTimeoutException(_selector, _description); - } - _timeoutCancellationTokenSource.Cancel(); + private readonly ServerSelectionWaitQueue _waitQueue; - if (completedTask == _cancellationTaskCompletionSource.Task) + public ServerSelectionQueueDisposer(ServerSelectionWaitQueue waitQueue) { - _cancellationToken.ThrowIfCancellationRequested(); + _waitQueue = waitQueue; } - _descriptionChangedTask.GetAwaiter().GetResult(); // propagate exceptions + public void Dispose() + => _waitQueue.ExitServerSelectionWaitQueue(); } } - - private static class State - { - public const int Initial = 0; - public const int Open = 1; - public const int Disposed = 2; - } - - private static class RapidHeartbeatTimerCallbackState - { - public const int NotRunning = 0; - public const int Running = 1; - } } } diff --git a/src/MongoDB.Driver/Core/Clusters/ClusterClock.cs b/src/MongoDB.Driver/Core/Clusters/ClusterClock.cs index 7cc7c164435..b816cf7a10f 100644 --- a/src/MongoDB.Driver/Core/Clusters/ClusterClock.cs +++ b/src/MongoDB.Driver/Core/Clusters/ClusterClock.cs @@ -52,7 +52,7 @@ public void AdvanceClusterTime(BsonDocument newClusterTime) _clusterTime = GreaterClusterTime(_clusterTime, newClusterTime); } } - + internal sealed class NoClusterClock : IClusterClock { public BsonDocument ClusterTime => null; diff --git a/src/MongoDB.Driver/Core/Clusters/ElectionId.cs b/src/MongoDB.Driver/Core/Clusters/ElectionId.cs index 20d0d4aa77c..63ee73465c9 100644 --- a/src/MongoDB.Driver/Core/Clusters/ElectionId.cs +++ b/src/MongoDB.Driver/Core/Clusters/ElectionId.cs @@ -79,7 +79,7 @@ public bool Equals(ElectionId other) /// Returns a hash code for this instance. /// </summary> /// <returns> - /// A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + /// A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. /// </returns> public override int GetHashCode() { diff --git a/src/MongoDB.Driver/Core/Clusters/ICluster.cs b/src/MongoDB.Driver/Core/Clusters/ICluster.cs index fcc817ec844..97c9a686226 100644 --- a/src/MongoDB.Driver/Core/Clusters/ICluster.cs +++ b/src/MongoDB.Driver/Core/Clusters/ICluster.cs @@ -14,7 +14,7 @@ */ using System; -using System.Threading; +using System.Collections.Generic; using System.Threading.Tasks; using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Clusters.ServerSelectors; @@ -56,14 +56,16 @@ public interface ICluster : IDisposable internal interface IClusterInternal : ICluster { + IEnumerable<IClusterableServer> Servers { get; } + event EventHandler<ClusterDescriptionChangedEventArgs> DescriptionChanged; ICoreServerSession AcquireServerSession(); void Initialize(); - IServer SelectServer(IServerSelector selector, CancellationToken cancellationToken); - Task<IServer> SelectServerAsync(IServerSelector selector, CancellationToken cancellationToken); + IServer SelectServer(OperationContext operationContext, IServerSelector selector); + Task<IServer> SelectServerAsync(OperationContext operationContext, IServerSelector selector); ICoreSessionHandle StartSession(CoreSessionOptions options = null); } diff --git a/src/MongoDB.Driver/Core/Clusters/IClusterClock.cs b/src/MongoDB.Driver/Core/Clusters/IClusterClock.cs index 0e3dabc5593..9e11dc88859 100644 --- a/src/MongoDB.Driver/Core/Clusters/IClusterClock.cs +++ b/src/MongoDB.Driver/Core/Clusters/IClusterClock.cs @@ -20,7 +20,7 @@ namespace MongoDB.Driver.Core.Clusters internal interface IClusterClock { BsonDocument ClusterTime { get; } - + void AdvanceClusterTime(BsonDocument newClusterTime); } } diff --git a/src/MongoDB.Driver/Core/Clusters/IClusterExtensions.cs b/src/MongoDB.Driver/Core/Clusters/IClusterExtensions.cs index 245ba8a48a3..e8060a75a07 100644 --- a/src/MongoDB.Driver/Core/Clusters/IClusterExtensions.cs +++ b/src/MongoDB.Driver/Core/Clusters/IClusterExtensions.cs @@ -14,7 +14,6 @@ */ using System.Collections.Generic; -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Clusters.ServerSelectors; @@ -26,10 +25,10 @@ internal static class IClusterExtensions { public static IServer SelectServerAndPinIfNeeded( this IClusterInternal cluster, + OperationContext operationContext, ICoreSessionHandle session, IServerSelector selector, - IReadOnlyCollection<ServerDescription> deprioritizedServers, - CancellationToken cancellationToken) + IReadOnlyCollection<ServerDescription> deprioritizedServers) { var pinnedServer = GetPinnedServerIfValid(cluster, session); if (pinnedServer != null) @@ -41,19 +40,19 @@ public static IServer SelectServerAndPinIfNeeded( ? new CompositeServerSelector(new[] { new PriorityServerSelector(deprioritizedServers), selector }) : selector; - // Server selection also updates the cluster type, allowing us to to determine if the server + // Server selection also updates the cluster type, allowing us to determine if the server // should be pinned. - var server = cluster.SelectServer(selector, cancellationToken); + var server = cluster.SelectServer(operationContext, selector); PinServerIfNeeded(cluster, session, server); return server; } public static async Task<IServer> SelectServerAndPinIfNeededAsync( this IClusterInternal cluster, + OperationContext operationContext, ICoreSessionHandle session, IServerSelector selector, - IReadOnlyCollection<ServerDescription> deprioritizedServers, - CancellationToken cancellationToken) + IReadOnlyCollection<ServerDescription> deprioritizedServers) { var pinnedServer = GetPinnedServerIfValid(cluster, session); if (pinnedServer != null) @@ -65,9 +64,9 @@ public static async Task<IServer> SelectServerAndPinIfNeededAsync( ? new CompositeServerSelector(new[] { new PriorityServerSelector(deprioritizedServers), selector }) : selector; - // Server selection also updates the cluster type, allowing us to to determine if the server + // Server selection also updates the cluster type, allowing us to determine if the server // should be pinned. - var server = await cluster.SelectServerAsync(selector, cancellationToken).ConfigureAwait(false); + var server = await cluster.SelectServerAsync(operationContext, selector).ConfigureAwait(false); PinServerIfNeeded(cluster, session, server); return server; diff --git a/src/MongoDB.Driver/Core/Clusters/LoadBalancedCluster.cs b/src/MongoDB.Driver/Core/Clusters/LoadBalancedCluster.cs index 712d9f52fc7..bbe903661ad 100644 --- a/src/MongoDB.Driver/Core/Clusters/LoadBalancedCluster.cs +++ b/src/MongoDB.Driver/Core/Clusters/LoadBalancedCluster.cs @@ -142,6 +142,19 @@ private void Dispose(bool disposing) } } + public IEnumerable<IClusterableServer> Servers + { + get + { + if (_server == null) + { + return []; + } + + return [_server]; + } + } + public void Initialize() { ThrowIfDisposed(); @@ -170,69 +183,87 @@ public void Initialize() } } - public IServer SelectServer(IServerSelector selector, CancellationToken cancellationToken) + public IServer SelectServer(OperationContext operationContext, IServerSelector selector) { + Ensure.IsNotNull(selector, nameof(selector)); + Ensure.IsNotNull(operationContext, nameof(operationContext)); ThrowIfDisposed(); + using var serverSelectionOperationContext = operationContext.WithTimeout(_settings.ServerSelectionTimeout); + _serverSelectionEventLogger.LogAndPublish(new ClusterSelectingServerEvent( _description, selector, null, EventContext.OperationName)); - var index = Task.WaitAny(new[] { _serverReadyTaskCompletionSource.Task }, (int)_settings.ServerSelectionTimeout.TotalMilliseconds, cancellationToken); - if (index != 0) + var stopwatch = Stopwatch.StartNew(); + try + { + serverSelectionOperationContext.WaitTask(_serverReadyTaskCompletionSource.Task); + } + catch (TimeoutException) { - cancellationToken.ThrowIfCancellationRequested(); throw CreateTimeoutException(_description); // _description will contain dnsException } if (_server != null) { + stopwatch.Stop(); + _serverSelectionEventLogger.LogAndPublish(new ClusterSelectedServerEvent( _description, selector, _server.Description, - TimeSpan.FromSeconds(1), + stopwatch.Elapsed, null, EventContext.OperationName)); + + return new SelectedServer(_server, _server.Description); } - return _server ?? - throw new InvalidOperationException("The server must be created before usage."); // should not be reached + throw new InvalidOperationException("The server must be created before usage."); // should not be reached } - public async Task<IServer> SelectServerAsync(IServerSelector selector, CancellationToken cancellationToken) + public async Task<IServer> SelectServerAsync(OperationContext operationContext, IServerSelector selector) { + Ensure.IsNotNull(selector, nameof(selector)); + Ensure.IsNotNull(operationContext, nameof(operationContext)); ThrowIfDisposed(); + using var serverSelectionOperationContext = operationContext.WithTimeout(_settings.ServerSelectionTimeout); + _serverSelectionEventLogger.LogAndPublish(new ClusterSelectingServerEvent( _description, selector, null, EventContext.OperationName)); - var timeoutTask = Task.Delay(_settings.ServerSelectionTimeout, cancellationToken); - var triggeredTask = await Task.WhenAny(_serverReadyTaskCompletionSource.Task, timeoutTask).ConfigureAwait(false); - if (triggeredTask == timeoutTask) + var stopwatch = Stopwatch.StartNew(); + try + { + await serverSelectionOperationContext.WaitTaskAsync(_serverReadyTaskCompletionSource.Task).ConfigureAwait(false); + } + catch (TimeoutException) { - cancellationToken.ThrowIfCancellationRequested(); throw CreateTimeoutException(_description); // _description will contain dnsException } if (_server != null) { + stopwatch.Stop(); _serverSelectionEventLogger.LogAndPublish(new ClusterSelectedServerEvent( _description, selector, _server.Description, - TimeSpan.FromSeconds(1), + stopwatch.Elapsed, null, EventContext.OperationName)); + + return new SelectedServer(_server, _server.Description); } - return _server ?? - throw new InvalidOperationException("The server must be created before usage."); // should not be reached + throw new InvalidOperationException("The server must be created before usage."); // should not be reached } public ICoreSessionHandle StartSession(CoreSessionOptions options = null) diff --git a/src/MongoDB.Driver/Core/Clusters/MultiServerCluster.cs b/src/MongoDB.Driver/Core/Clusters/MultiServerCluster.cs index 2b463f1c894..57087d7789f 100644 --- a/src/MongoDB.Driver/Core/Clusters/MultiServerCluster.cs +++ b/src/MongoDB.Driver/Core/Clusters/MultiServerCluster.cs @@ -90,6 +90,8 @@ protected override void Dispose(bool disposing) } } + public override IEnumerable<IClusterableServer> Servers => _servers; + public override void Initialize() { base.Initialize(); diff --git a/src/MongoDB.Driver/Core/Clusters/ServerSelectors/OperationsCountServerSelector.cs b/src/MongoDB.Driver/Core/Clusters/ServerSelectors/OperationsCountServerSelector.cs index 3442d6bd436..c6cf48bffcb 100644 --- a/src/MongoDB.Driver/Core/Clusters/ServerSelectors/OperationsCountServerSelector.cs +++ b/src/MongoDB.Driver/Core/Clusters/ServerSelectors/OperationsCountServerSelector.cs @@ -22,9 +22,9 @@ namespace MongoDB.Driver.Core.Clusters.ServerSelectors { internal sealed class OperationsCountServerSelector : IServerSelector { - private readonly IEnumerable<IClusterableServer> _clusterableServers; + private IReadOnlyList<IClusterableServer> _clusterableServers; - public OperationsCountServerSelector(IEnumerable<IClusterableServer> clusterableServers) + public OperationsCountServerSelector(IReadOnlyList<IClusterableServer> clusterableServers) { _clusterableServers = clusterableServers; } @@ -58,6 +58,11 @@ public IEnumerable<ServerDescription> SelectServers(ClusterDescription cluster, } } + public void PopulateServers(IReadOnlyList<IClusterableServer> clusterableServers) + { + _clusterableServers = clusterableServers; + } + /// <inheritdoc/> public override string ToString() => nameof(OperationsCountServerSelector); diff --git a/src/MongoDB.Driver/Core/Clusters/SingleServerCluster.cs b/src/MongoDB.Driver/Core/Clusters/SingleServerCluster.cs index 0f6651a2b92..ff391eb48eb 100644 --- a/src/MongoDB.Driver/Core/Clusters/SingleServerCluster.cs +++ b/src/MongoDB.Driver/Core/Clusters/SingleServerCluster.cs @@ -13,6 +13,7 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Diagnostics; using System.Net; using Microsoft.Extensions.Logging; @@ -72,6 +73,19 @@ protected override void Dispose(bool disposing) } } + public override IEnumerable<IClusterableServer> Servers + { + get + { + if (_server == null) + { + return []; + } + + return [_server]; + } + } + public override void Initialize() { base.Initialize(); diff --git a/src/MongoDB.Driver/Core/Compression/ICompressor.cs b/src/MongoDB.Driver/Core/Compression/ICompressor.cs index 4d7a74cc0c0..5db2c91b724 100644 --- a/src/MongoDB.Driver/Core/Compression/ICompressor.cs +++ b/src/MongoDB.Driver/Core/Compression/ICompressor.cs @@ -33,11 +33,11 @@ public enum CompressorType /// </summary> Snappy = 1, /// <summary> - /// The content of the message is compressed using zlib. + /// The content of the message is compressed using zlib. /// </summary> Zlib = 2, /// <summary> - /// The content of the message is compressed using zstandard. + /// The content of the message is compressed using zstandard. /// </summary> ZStandard = 3 } diff --git a/src/MongoDB.Driver/Core/Compression/SnappyCompressor.cs b/src/MongoDB.Driver/Core/Compression/SnappyCompressor.cs index dbb11f48e59..4e84bbfd213 100644 --- a/src/MongoDB.Driver/Core/Compression/SnappyCompressor.cs +++ b/src/MongoDB.Driver/Core/Compression/SnappyCompressor.cs @@ -34,7 +34,7 @@ public void Compress(Stream input, Stream output) { var uncompressedSize = (int)(input.Length - input.Position); var uncompressedBytes = new byte[uncompressedSize]; // does not include uncompressed message headers - input.ReadBytes(uncompressedBytes, offset: 0, count: uncompressedSize, CancellationToken.None); + input.ReadBytes(OperationContext.NoTimeout, uncompressedBytes, offset: 0, count: uncompressedSize, socketTimeout: Timeout.InfiniteTimeSpan); var maxCompressedSize = Snappy.GetMaxCompressedLength(uncompressedSize); var compressedBytes = new byte[maxCompressedSize]; var compressedSize = Snappy.Compress(uncompressedBytes, compressedBytes); @@ -50,7 +50,7 @@ public void Decompress(Stream input, Stream output) { var compressedSize = (int)(input.Length - input.Position); var compressedBytes = new byte[compressedSize]; - input.ReadBytes(compressedBytes, offset: 0, count: compressedSize, CancellationToken.None); + input.ReadBytes(OperationContext.NoTimeout, compressedBytes, offset: 0, count: compressedSize, socketTimeout: Timeout.InfiniteTimeSpan); var uncompressedSize = Snappy.GetUncompressedLength(compressedBytes); var decompressedBytes = new byte[uncompressedSize]; var decompressedSize = Snappy.Decompress(compressedBytes, decompressedBytes); diff --git a/src/MongoDB.Driver/Core/Compression/ZlibCompressor.cs b/src/MongoDB.Driver/Core/Compression/ZlibCompressor.cs index 21ad94542a0..9e7b7e8ad88 100644 --- a/src/MongoDB.Driver/Core/Compression/ZlibCompressor.cs +++ b/src/MongoDB.Driver/Core/Compression/ZlibCompressor.cs @@ -24,7 +24,7 @@ namespace MongoDB.Driver.Core.Compression { /// <summary> /// Compressor according to the zlib algorithm. - /// </summary> + /// </summary> internal sealed class ZlibCompressor : ICompressor { private readonly CompressionLevel _compressionLevel; diff --git a/src/MongoDB.Driver/Core/Configuration/ClusterBuilder.cs b/src/MongoDB.Driver/Core/Configuration/ClusterBuilder.cs index d1989965ee6..cb868f198ad 100644 --- a/src/MongoDB.Driver/Core/Configuration/ClusterBuilder.cs +++ b/src/MongoDB.Driver/Core/Configuration/ClusterBuilder.cs @@ -30,9 +30,6 @@ namespace MongoDB.Driver.Core.Configuration /// </summary> public class ClusterBuilder { - // constants - private const string __traceSourceName = "MongoDB-SDAM"; - // fields private EventAggregator _eventAggregator; private ClusterSettings _clusterSettings; @@ -43,6 +40,7 @@ public class ClusterBuilder private SslStreamSettings _sslStreamSettings; private Func<IStreamFactory, IStreamFactory> _streamFactoryWrapper; private TcpStreamSettings _tcpStreamSettings; + private Socks5ProxyStreamSettings _socks5ProxyStreamSettings; // constructors /// <summary> @@ -160,6 +158,19 @@ public ClusterBuilder ConfigureTcp(Func<TcpStreamSettings, TcpStreamSettings> co return this; } + /// <summary> + /// Configures the SOCKS5 proxy settings for the cluster. + /// </summary> + /// <param name="configurator"> The SOCKS5 proxy settings configurator delegate.</param> + /// <returns>A reconfigured cluster builder.</returns> + public ClusterBuilder ConfigureSocks5Proxy(Func<Socks5ProxyStreamSettings, Socks5ProxyStreamSettings> configurator) + { + Ensure.IsNotNull(configurator, nameof(configurator)); + + _socks5ProxyStreamSettings = configurator(_socks5ProxyStreamSettings ?? new Socks5ProxyStreamSettings()); + return this; + } + internal ClusterBuilder RegisterStreamFactory(Func<IStreamFactory, IStreamFactory> wrapper) { Ensure.IsNotNull(wrapper, nameof(wrapper)); @@ -216,7 +227,9 @@ private IConnectionPoolFactory CreateConnectionPoolFactory() streamFactory, _eventAggregator, _clusterSettings.ServerApi, - _loggingSettings.ToInternalLoggerFactory()); + _loggingSettings.ToInternalLoggerFactory(), + _tcpStreamSettings.ReadTimeout, + _tcpStreamSettings.WriteTimeout); var connectionPoolSettings = _connectionPoolSettings.WithInternal(isPausable: !_connectionSettings.LoadBalanced); @@ -252,30 +265,33 @@ private IServerMonitorFactory CreateServerMonitorFactory() { heartbeatConnectTimeout = TimeSpan.FromSeconds(30); } - var heartbeatSocketTimeout = _serverSettings.HeartbeatTimeout; - if (heartbeatSocketTimeout == TimeSpan.Zero || heartbeatSocketTimeout == Timeout.InfiniteTimeSpan) + var heartbeatTimeout = _serverSettings.HeartbeatTimeout; + if (heartbeatTimeout == TimeSpan.Zero || heartbeatTimeout == Timeout.InfiniteTimeSpan) { - heartbeatSocketTimeout = heartbeatConnectTimeout; + heartbeatTimeout = heartbeatConnectTimeout; } var serverMonitorTcpStreamSettings = new TcpStreamSettings(_tcpStreamSettings) .With( connectTimeout: heartbeatConnectTimeout, - readTimeout: heartbeatSocketTimeout, - writeTimeout: heartbeatSocketTimeout + readTimeout: null, + writeTimeout: null ); var serverMonitorStreamFactory = CreateTcpStreamFactory(serverMonitorTcpStreamSettings); var serverMonitorSettings = new ServerMonitorSettings( - connectTimeout: serverMonitorTcpStreamSettings.ConnectTimeout, - heartbeatInterval: _serverSettings.HeartbeatInterval, - serverMonitoringMode: _serverSettings.ServerMonitoringMode); + ConnectTimeout: heartbeatConnectTimeout, + HeartbeatInterval: _serverSettings.HeartbeatInterval, + HeartbeatTimeout: heartbeatTimeout, + _serverSettings.ServerMonitoringMode); var serverMonitorConnectionFactory = new BinaryConnectionFactory( serverMonitorConnectionSettings, serverMonitorStreamFactory, new EventAggregator(), _clusterSettings.ServerApi, - loggerFactory: null); + loggerFactory: null, + _tcpStreamSettings.ReadTimeout, + _tcpStreamSettings.WriteTimeout); return new ServerMonitorFactory( serverMonitorSettings, @@ -288,6 +304,12 @@ private IServerMonitorFactory CreateServerMonitorFactory() private IStreamFactory CreateTcpStreamFactory(TcpStreamSettings tcpStreamSettings) { var streamFactory = (IStreamFactory)new TcpStreamFactory(tcpStreamSettings); + + if (_socks5ProxyStreamSettings != null) + { + streamFactory = new Socks5ProxyStreamFactory(_socks5ProxyStreamSettings, streamFactory); + } + if (_sslStreamSettings != null) { streamFactory = new SslStreamFactory(_sslStreamSettings, streamFactory); diff --git a/src/MongoDB.Driver/Core/Configuration/ClusterBuilderExtensions.cs b/src/MongoDB.Driver/Core/Configuration/ClusterBuilderExtensions.cs index 3fe6c4a3da2..81c3e0bf9e6 100644 --- a/src/MongoDB.Driver/Core/Configuration/ClusterBuilderExtensions.cs +++ b/src/MongoDB.Driver/Core/Configuration/ClusterBuilderExtensions.cs @@ -23,6 +23,7 @@ using MongoDB.Driver.Authentication; using MongoDB.Driver.Authentication.Gssapi; using MongoDB.Driver.Authentication.Oidc; +using MongoDB.Driver.Core.Connections; using MongoDB.Driver.Core.Events.Diagnostics; using MongoDB.Driver.Core.Misc; @@ -118,6 +119,15 @@ public static ClusterBuilder ConfigureWithConnectionString( builder = builder.ConfigureTcp(s => s.With(addressFamily: AddressFamily.InterNetworkV6)); } + if (connectionString.ProxyHost != null) + { + builder = builder.ConfigureSocks5Proxy(s => s.With(Socks5ProxySettings.Create( + connectionString.ProxyHost, + connectionString.ProxyPort, + connectionString.ProxyUsername, + connectionString.ProxyPassword))); + } + if (connectionString.SocketTimeout != null) { builder = builder.ConfigureTcp(s => s.With( diff --git a/src/MongoDB.Driver/Core/Configuration/ConnectionString.cs b/src/MongoDB.Driver/Core/Configuration/ConnectionString.cs index 79349c035cc..73e9279ec4f 100644 --- a/src/MongoDB.Driver/Core/Configuration/ConnectionString.cs +++ b/src/MongoDB.Driver/Core/Configuration/ConnectionString.cs @@ -92,12 +92,19 @@ public sealed class ConnectionString private string _replicaSet; private bool? _retryReads; private bool? _retryWrites; + private string _proxyHost; + private int? _proxyPort; + private string _proxyUsername; + private string _proxyPassword; private ConnectionStringScheme _scheme; private ServerMonitoringMode? _serverMonitoringMode; private TimeSpan? _serverSelectionTimeout; private TimeSpan? _socketTimeout; private int? _srvMaxHosts; private string _srvServiceName; +#pragma warning disable CS0649 // Field is never assigned to, and will always have its default value + private TimeSpan? _timeout; +#pragma warning restore CS0649 // Field is never assigned to, and will always have its default value private bool? _tls; private bool? _tlsDisableCertificateRevocationCheck; private bool? _tlsInsecure; @@ -356,6 +363,26 @@ public string Password get { return _password; } } + /// <summary> + /// Gets the proxy host. + /// </summary> + public string ProxyHost => _proxyHost; + + /// <summary> + /// Gets the proxy port. + /// </summary> + public int? ProxyPort => _proxyPort; + + /// <summary> + /// Gets the proxy username. + /// </summary> + public string ProxyUsername => _proxyUsername; + + /// <summary> + /// Gets the proxy password. + /// </summary> + public string ProxyPassword => _proxyPassword; + /// <summary> /// Gets the read concern level. /// </summary> @@ -399,7 +426,6 @@ public bool? RetryReads get { return _retryReads; } } - /// <summary> /// Gets a value indicating whether or not to retry writes. /// </summary> @@ -468,6 +494,12 @@ public bool? Ssl [Obsolete("Use TlsInsecure instead.")] public bool? SslVerifyCertificate => !_tlsInsecure; + /// <summary> + /// Gets the per-operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout => _timeout; + /// <summary> /// Gets whether to use TLS. /// </summary> @@ -903,6 +935,29 @@ private void Parse() } } + if (string.IsNullOrEmpty(_proxyHost)) + { + if (_proxyPort is not null) + { + throw new MongoConfigurationException("proxyPort cannot be specified without proxyHost."); + } + + if (!string.IsNullOrEmpty(_proxyUsername)) + { + throw new MongoConfigurationException("proxyUsername cannot be specified without proxyHost."); + } + + if (!string.IsNullOrEmpty(_proxyPassword)) + { + throw new MongoConfigurationException("proxyPassword cannot be specified without proxyHost."); + } + } + + if (string.IsNullOrEmpty(_proxyUsername) != string.IsNullOrEmpty(_proxyPassword)) + { + throw new MongoConfigurationException("proxyUsername and proxyPassword must both be specified or neither."); + } + string ProtectConnectionString(string connectionString) { var protectedString = Regex.Replace(connectionString, @"(?<=://)[^/]*(?=@)", "<hidden>"); @@ -995,6 +1050,55 @@ private void ParseOption(string name, string value) case "minpoolsize": _minPoolSize = ParseInt32(name, value); break; + case "proxyhost": + if (_proxyHost != null) + { + throw new MongoConfigurationException("Multiple proxyHost options are not allowed."); + } + + _proxyHost = value; + if (_proxyHost.Length == 0) + { + throw new MongoConfigurationException("proxyHost cannot be empty."); + } + break; + case "proxyport": + if (_proxyPort != null) + { + throw new MongoConfigurationException("Multiple proxyPort options are not allowed."); + } + + var proxyPortValue = ParseInt32(name, value); + if (proxyPortValue is < 1 or > 65535) + { + throw new MongoConfigurationException($"Invalid proxy port {proxyPortValue}: must be between 1 and 65535, inclusive."); + } + _proxyPort = proxyPortValue; + break; + case "proxyusername": + if (_proxyUsername != null) + { + throw new MongoConfigurationException("Multiple proxyUsername options are not allowed."); + } + + _proxyUsername = value; + if (_proxyUsername.Length == 0) + { + throw new MongoConfigurationException("proxyUsername cannot be empty."); + } + break; + case "proxypassword": + if (_proxyPassword != null) + { + throw new MongoConfigurationException("Multiple proxyPassword options are not allowed."); + } + + _proxyPassword = value; + if (_proxyPassword.Length == 0) + { + throw new MongoConfigurationException("proxyPassword cannot be empty."); + } + break; case "readconcernlevel": _readConcernLevel = ParseEnum<ReadConcernLevel>(name, value); break; @@ -1089,6 +1193,12 @@ private void ParseOption(string name, string value) var sslVerifyCertificateValue = ParseBoolean(name, value); _tlsInsecure = EnsureTlsInsecureIsValid(!sslVerifyCertificateValue); break; +#if DEBUG // TODO: CSOT: Make it public when CSOT will be ready for GA + case "timeout": + case "timeoutms": + _timeout = value == "0" ? System.Threading.Timeout.InfiniteTimeSpan : ParseTimeSpan(name, value); + break; +#endif case "tlsdisablecertificaterevocationcheck": var tlsDisableCertificateRevocationCheckValue = ParseBoolean(name, value); _tlsDisableCertificateRevocationCheck = diff --git a/src/MongoDB.Driver/Core/Configuration/Socks5ProxyStreamSettings.cs b/src/MongoDB.Driver/Core/Configuration/Socks5ProxyStreamSettings.cs new file mode 100644 index 00000000000..b87c01b0a5f --- /dev/null +++ b/src/MongoDB.Driver/Core/Configuration/Socks5ProxyStreamSettings.cs @@ -0,0 +1,48 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using MongoDB.Driver.Core.Connections; + +namespace MongoDB.Driver.Core.Configuration; + +/// <summary> +/// Represents settings for a SOCKS5 proxy stream. +/// </summary> +public sealed class Socks5ProxyStreamSettings +{ + private Socks5ProxySettings _socks5ProxySettings; + + /// <summary> + /// Initializes a new instance of the <see cref="Socks5ProxyStreamSettings"/> class. + /// </summary> + /// <param name="socks5ProxySettings"> The settings for the SOCKS5 proxy.</param> + public Socks5ProxyStreamSettings(Optional<Socks5ProxySettings> socks5ProxySettings = default) + { + _socks5ProxySettings = socks5ProxySettings.WithDefault(null); + } + + /// <summary> + /// Gets the settings for the SOCKS5 proxy. + /// </summary> + public Socks5ProxySettings Socks5ProxySettings => _socks5ProxySettings; + + /// <summary> + /// Creates a new instance of <see cref="Socks5ProxyStreamSettings"/> with the specified SOCKS5 proxy settings. + /// </summary> + /// <param name="socks5ProxySettings"></param> + /// <returns></returns> + public Socks5ProxyStreamSettings With(Socks5ProxySettings socks5ProxySettings) + => new(socks5ProxySettings); +} \ No newline at end of file diff --git a/src/MongoDB.Driver/Core/ConnectionPools/ExclusiveConnectionPool.Helpers.cs b/src/MongoDB.Driver/Core/ConnectionPools/ExclusiveConnectionPool.Helpers.cs index 77895ed99a4..151cb8719e4 100644 --- a/src/MongoDB.Driver/Core/ConnectionPools/ExclusiveConnectionPool.Helpers.cs +++ b/src/MongoDB.Driver/Core/ConnectionPools/ExclusiveConnectionPool.Helpers.cs @@ -1,4 +1,4 @@ -/* Copyright 2021-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -33,7 +33,25 @@ namespace MongoDB.Driver.Core.ConnectionPools internal sealed partial class ExclusiveConnectionPool { // private methods - private Exception CreateTimeoutException(Stopwatch stopwatch, string message) + private TimeSpan CalculateRemainingTimeout(TimeSpan timeout, Stopwatch stopwatch) + { + if (timeout == Timeout.InfiniteTimeSpan) + { + return Timeout.InfiniteTimeSpan; + } + + var elapsed = stopwatch.Elapsed; + var remainingTimeout = timeout - elapsed; + + if (remainingTimeout < TimeSpan.Zero) + { + throw CreateTimeoutException(elapsed, $"Timed out waiting for a connection after {elapsed.TotalMilliseconds}ms."); + } + + return remainingTimeout; + } + + private Exception CreateTimeoutException(TimeSpan elapsed, string message) { var checkOutsForCursorCount = _checkOutReasonCounter.GetCheckOutsCount(CheckOutReason.Cursor); var checkOutsForTransactionCount = _checkOutReasonCounter.GetCheckOutsCount(CheckOutReason.Transaction); @@ -47,7 +65,7 @@ private Exception CreateTimeoutException(Stopwatch stopwatch, string message) var checkOutsForOtherCount = checkOutsCount - checkOutsForCursorCount - checkOutsForTransactionCount; message = - $"Timed out after {stopwatch.ElapsedMilliseconds}ms waiting for a connection from the connection pool. " + + $"Timed out after {elapsed.TotalMilliseconds}ms waiting for a connection from the connection pool. " + $"maxPoolSize: {maxPoolSize}, " + $"connections in use by cursors: {checkOutsForCursorCount}, " + $"connections in use by transactions: {checkOutsForTransactionCount}, " + @@ -166,7 +184,6 @@ private sealed class AcquireConnectionHelper : IDisposable { // private fields private readonly ExclusiveConnectionPool _pool; - private readonly TimeSpan _timeout; private bool _enteredWaitQueue; private SemaphoreSlimSignalable.SemaphoreWaitResult _poolQueueWaitResult; @@ -175,32 +192,33 @@ private sealed class AcquireConnectionHelper : IDisposable public AcquireConnectionHelper(ExclusiveConnectionPool pool) { _pool = pool; - _timeout = pool._settings.WaitQueueTimeout; } - public IConnectionHandle AcquireConnection(CancellationToken cancellationToken) + public IConnectionHandle AcquireConnection(OperationContext operationContext) { var stopwatch = new Stopwatch(); try { StartCheckingOut(stopwatch); - _poolQueueWaitResult = _pool._maxConnectionsQueue.WaitSignaled(_timeout, cancellationToken); + var waitQueueTimeout = operationContext.RemainingTimeoutOrDefault(_pool.Settings.WaitQueueTimeout); + _poolQueueWaitResult = _pool._maxConnectionsQueue.WaitSignaled(waitQueueTimeout, operationContext.CancellationToken); if (_poolQueueWaitResult == SemaphoreSlimSignalable.SemaphoreWaitResult.Entered) { PooledConnection pooledConnection; - var timeout = EnsureTimeout(stopwatch); + ThrowIfTimedOut(operationContext, stopwatch); - using (var connectionCreator = new ConnectionCreator(_pool, timeout)) + using (var connectionCreator = new ConnectionCreator(_pool)) { - pooledConnection = connectionCreator.CreateOpenedOrReuse(cancellationToken); + waitQueueTimeout = _pool.CalculateRemainingTimeout(waitQueueTimeout, stopwatch); + pooledConnection = connectionCreator.CreateOpenedOrReuse(operationContext, waitQueueTimeout); } return EndCheckingOut(pooledConnection, stopwatch); } stopwatch.Stop(); - throw CreateException(stopwatch); + throw CreateException(stopwatch.Elapsed); } catch (Exception ex) { @@ -210,29 +228,31 @@ public IConnectionHandle AcquireConnection(CancellationToken cancellationToken) } } - public async Task<IConnectionHandle> AcquireConnectionAsync(CancellationToken cancellationToken) + public async Task<IConnectionHandle> AcquireConnectionAsync(OperationContext operationContext) { var stopwatch = new Stopwatch(); try { StartCheckingOut(stopwatch); - _poolQueueWaitResult = await _pool._maxConnectionsQueue.WaitSignaledAsync(_timeout, cancellationToken).ConfigureAwait(false); + var waitQueueTimeout = operationContext.RemainingTimeoutOrDefault(_pool.Settings.WaitQueueTimeout); + _poolQueueWaitResult = await _pool._maxConnectionsQueue.WaitSignaledAsync(waitQueueTimeout, operationContext.CancellationToken).ConfigureAwait(false); if (_poolQueueWaitResult == SemaphoreSlimSignalable.SemaphoreWaitResult.Entered) { PooledConnection pooledConnection; - var timeout = EnsureTimeout(stopwatch); + ThrowIfTimedOut(operationContext, stopwatch); - using (var connectionCreator = new ConnectionCreator(_pool, timeout)) + using (var connectionCreator = new ConnectionCreator(_pool)) { - pooledConnection = await connectionCreator.CreateOpenedOrReuseAsync(cancellationToken).ConfigureAwait(false); + waitQueueTimeout = _pool.CalculateRemainingTimeout(waitQueueTimeout, stopwatch); + pooledConnection = await connectionCreator.CreateOpenedOrReuseAsync(operationContext, waitQueueTimeout).ConfigureAwait(false); } return EndCheckingOut(pooledConnection, stopwatch); } stopwatch.Stop(); - throw CreateException(stopwatch); + throw CreateException(stopwatch.Elapsed); } catch (Exception ex) { @@ -281,13 +301,21 @@ private void AcquireWaitQueueSlot() _enteredWaitQueue = true; } + private void ThrowIfTimedOut(OperationContext operationContext, Stopwatch stopwatch) + { + if (operationContext.IsTimedOut()) + { + stopwatch.Stop(); + throw _pool.CreateTimeoutException(stopwatch.Elapsed, $"Timed out waiting for a connection after {stopwatch.ElapsedMilliseconds}ms."); + } + } + private void StartCheckingOut(Stopwatch stopwatch) { _pool._eventLogger.LogAndPublish(new ConnectionPoolCheckingOutConnectionEvent(_pool._serverId, EventContext.OperationId)); - stopwatch.Start(); - _pool._poolState.ThrowIfNotReady(); + _pool._poolState.ThrowIfNotReady(); AcquireWaitQueueSlot(); } @@ -296,7 +324,6 @@ private IConnectionHandle EndCheckingOut(PooledConnection pooledConnection, Stop var reference = new ReferenceCounted<PooledConnection>(pooledConnection, _pool.ReleaseConnection); var connectionHandle = new AcquiredConnection(_pool, reference); - stopwatch.Stop(); _pool._eventLogger.LogAndPublish(new ConnectionPoolCheckedOutConnectionEvent(connectionHandle.ConnectionId, stopwatch.Elapsed, EventContext.OperationId)); // no need to release the semaphore @@ -305,26 +332,13 @@ private IConnectionHandle EndCheckingOut(PooledConnection pooledConnection, Stop return connectionHandle; } - private TimeSpan EnsureTimeout(Stopwatch stopwatch) - { - var timeSpentInWaitQueue = stopwatch.Elapsed; - var timeout = _timeout - timeSpentInWaitQueue; - - if (timeout < TimeSpan.Zero) - { - throw _pool.CreateTimeoutException(stopwatch, $"Timed out waiting for a connection after {timeSpentInWaitQueue.TotalMilliseconds}ms."); - } - - return timeout; - } - - private Exception CreateException(Stopwatch stopwatch) => + private Exception CreateException(TimeSpan elapsed) => _poolQueueWaitResult switch { SemaphoreSlimSignalable.SemaphoreWaitResult.Signaled => MongoConnectionPoolPausedException.ForConnectionPool(_pool._endPoint), SemaphoreSlimSignalable.SemaphoreWaitResult.TimedOut => - _pool.CreateTimeoutException(stopwatch, $"Timed out waiting for a connection after {stopwatch.ElapsedMilliseconds}ms."), + _pool.CreateTimeoutException(elapsed, $"Timed out waiting for a connection after {elapsed.TotalMilliseconds}ms."), // should not be reached _ => new InvalidOperationException($"Invalid {_poolQueueWaitResult}.") }; @@ -409,11 +423,11 @@ public void Dispose() } } - public void Open(CancellationToken cancellationToken) + public void Open(OperationContext operationContext) { try { - _connection.Open(cancellationToken); + _connection.Open(operationContext); SetEffectiveGenerationIfRequired(_connection.Description); } catch (MongoConnectionException ex) @@ -424,11 +438,11 @@ public void Open(CancellationToken cancellationToken) } } - public async Task OpenAsync(CancellationToken cancellationToken) + public async Task OpenAsync(OperationContext operationContext) { try { - await _connection.OpenAsync(cancellationToken).ConfigureAwait(false); + await _connection.OpenAsync(operationContext).ConfigureAwait(false); SetEffectiveGenerationIfRequired(_connection.Description); } catch (MongoConnectionException ex) @@ -439,15 +453,15 @@ public async Task OpenAsync(CancellationToken cancellationToken) } } - public void Reauthenticate(CancellationToken cancellationToken) => _connection.Reauthenticate(cancellationToken); + public void Reauthenticate(OperationContext operationContext) => _connection.Reauthenticate(operationContext); - public Task ReauthenticateAsync(CancellationToken cancellationToken) => _connection.ReauthenticateAsync(cancellationToken); + public Task ReauthenticateAsync(OperationContext operationContext) => _connection.ReauthenticateAsync(operationContext); - public ResponseMessage ReceiveMessage(int responseTo, IMessageEncoderSelector encoderSelector, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken) + public ResponseMessage ReceiveMessage(OperationContext operationContext, int responseTo, IMessageEncoderSelector encoderSelector, MessageEncoderSettings messageEncoderSettings) { try { - return _connection.ReceiveMessage(responseTo, encoderSelector, messageEncoderSettings, cancellationToken); + return _connection.ReceiveMessage(operationContext, responseTo, encoderSelector, messageEncoderSettings); } catch (MongoConnectionException ex) { @@ -456,11 +470,11 @@ public ResponseMessage ReceiveMessage(int responseTo, IMessageEncoderSelector en } } - public async Task<ResponseMessage> ReceiveMessageAsync(int responseTo, IMessageEncoderSelector encoderSelector, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken) + public async Task<ResponseMessage> ReceiveMessageAsync(OperationContext operationContext, int responseTo, IMessageEncoderSelector encoderSelector, MessageEncoderSettings messageEncoderSettings) { try { - return await _connection.ReceiveMessageAsync(responseTo, encoderSelector, messageEncoderSettings, cancellationToken).ConfigureAwait(false); + return await _connection.ReceiveMessageAsync(operationContext, responseTo, encoderSelector, messageEncoderSettings).ConfigureAwait(false); } catch (MongoConnectionException ex) { @@ -469,11 +483,11 @@ public async Task<ResponseMessage> ReceiveMessageAsync(int responseTo, IMessageE } } - public void SendMessages(IEnumerable<RequestMessage> messages, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken) + public void SendMessage(OperationContext operationContext, RequestMessage message, MessageEncoderSettings messageEncoderSettings) { try { - _connection.SendMessages(messages, messageEncoderSettings, cancellationToken); + _connection.SendMessage(operationContext, message, messageEncoderSettings); } catch (MongoConnectionException ex) { @@ -482,11 +496,11 @@ public void SendMessages(IEnumerable<RequestMessage> messages, MessageEncoderSet } } - public async Task SendMessagesAsync(IEnumerable<RequestMessage> messages, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken) + public async Task SendMessageAsync(OperationContext operationContext, RequestMessage message, MessageEncoderSettings messageEncoderSettings) { try { - await _connection.SendMessagesAsync(messages, messageEncoderSettings, cancellationToken).ConfigureAwait(false); + await _connection.SendMessageAsync(operationContext, message, messageEncoderSettings).ConfigureAwait(false); } catch (MongoConnectionException ex) { @@ -504,11 +518,6 @@ public void SetCheckOutReasonIfNotAlreadySet(CheckOutReason reason) } } - public void SetReadTimeout(TimeSpan timeout) - { - _connection.SetReadTimeout(timeout); - } - // private methods private void EnrichExceptionDetails(MongoConnectionException ex) { @@ -595,52 +604,52 @@ public IConnectionHandle Fork() return new AcquiredConnection(_connectionPool, _reference); } - public void Open(CancellationToken cancellationToken) + public void Open(OperationContext operationContext) { ThrowIfDisposed(); - _reference.Instance.Open(cancellationToken); + _reference.Instance.Open(operationContext); } - public Task OpenAsync(CancellationToken cancellationToken) + public Task OpenAsync(OperationContext operationContext) { ThrowIfDisposed(); - return _reference.Instance.OpenAsync(cancellationToken); + return _reference.Instance.OpenAsync(operationContext); } - public void Reauthenticate(CancellationToken cancellationToken) + public void Reauthenticate(OperationContext operationContext) { ThrowIfDisposed(); - _reference.Instance.Reauthenticate(cancellationToken); + _reference.Instance.Reauthenticate(operationContext); } - public Task ReauthenticateAsync(CancellationToken cancellationToken) + public Task ReauthenticateAsync(OperationContext operationContext) { ThrowIfDisposed(); - return _reference.Instance.ReauthenticateAsync(cancellationToken); + return _reference.Instance.ReauthenticateAsync(operationContext); } - public Task<ResponseMessage> ReceiveMessageAsync(int responseTo, IMessageEncoderSelector encoderSelector, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken) + public Task<ResponseMessage> ReceiveMessageAsync(OperationContext operationContext, int responseTo, IMessageEncoderSelector encoderSelector, MessageEncoderSettings messageEncoderSettings) { ThrowIfDisposed(); - return _reference.Instance.ReceiveMessageAsync(responseTo, encoderSelector, messageEncoderSettings, cancellationToken); + return _reference.Instance.ReceiveMessageAsync(operationContext, responseTo, encoderSelector, messageEncoderSettings); } - public ResponseMessage ReceiveMessage(int responseTo, IMessageEncoderSelector encoderSelector, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken) + public ResponseMessage ReceiveMessage(OperationContext operationContext, int responseTo, IMessageEncoderSelector encoderSelector, MessageEncoderSettings messageEncoderSettings) { ThrowIfDisposed(); - return _reference.Instance.ReceiveMessage(responseTo, encoderSelector, messageEncoderSettings, cancellationToken); + return _reference.Instance.ReceiveMessage(operationContext, responseTo, encoderSelector, messageEncoderSettings); } - public void SendMessages(IEnumerable<RequestMessage> messages, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken) + public void SendMessage(OperationContext operationContext, RequestMessage message, MessageEncoderSettings messageEncoderSettings) { ThrowIfDisposed(); - _reference.Instance.SendMessages(messages, messageEncoderSettings, cancellationToken); + _reference.Instance.SendMessage(operationContext, message, messageEncoderSettings); } - public Task SendMessagesAsync(IEnumerable<RequestMessage> messages, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken) + public Task SendMessageAsync(OperationContext operationContext, RequestMessage message, MessageEncoderSettings messageEncoderSettings) { ThrowIfDisposed(); - return _reference.Instance.SendMessagesAsync(messages, messageEncoderSettings, cancellationToken); + return _reference.Instance.SendMessageAsync(operationContext, message, messageEncoderSettings); } public void SetCheckOutReasonIfNotAlreadySet(CheckOutReason reason) @@ -649,12 +658,6 @@ public void SetCheckOutReasonIfNotAlreadySet(CheckOutReason reason) _reference.Instance.SetCheckOutReasonIfNotAlreadySet(reason); } - public void SetReadTimeout(TimeSpan timeout) - { - ThrowIfDisposed(); - _reference.Instance.SetReadTimeout(timeout); - } - private void ThrowIfDisposed() { if (_disposed) @@ -846,41 +849,35 @@ public void UntrackInUseConnection(PooledConnection connection) internal sealed class ConnectionCreator : IDisposable { private readonly ExclusiveConnectionPool _pool; - private readonly TimeSpan _connectingTimeout; private PooledConnection _connection; private bool _disposeConnection; private SemaphoreSlimSignalable.SemaphoreWaitResult _connectingWaitStatus; - private Stopwatch _stopwatch; - - public ConnectionCreator(ExclusiveConnectionPool pool, TimeSpan connectingTimeout) + public ConnectionCreator(ExclusiveConnectionPool pool) { _pool = pool; - _connectingTimeout = connectingTimeout; _connectingWaitStatus = SemaphoreSlimSignalable.SemaphoreWaitResult.None; _connection = null; _disposeConnection = true; - _stopwatch = null; } - public PooledConnection CreateOpened(CancellationToken cancellationToken) + public PooledConnection CreateOpened(TimeSpan maxConnectingQueueTimeout, CancellationToken cancellationToken) { try { var stopwatch = Stopwatch.StartNew(); - _connectingWaitStatus = _pool._maxConnectingQueue.Wait(_connectingTimeout, cancellationToken); + _connectingWaitStatus = _pool._maxConnectingQueue.Wait(maxConnectingQueueTimeout, cancellationToken); stopwatch.Stop(); - _pool._poolState.ThrowIfNotReady(); if (_connectingWaitStatus == SemaphoreSlimSignalable.SemaphoreWaitResult.TimedOut) { - _pool.CreateTimeoutException(stopwatch, $"Timed out waiting for in connecting queue after {stopwatch.ElapsedMilliseconds}ms."); + throw _pool.CreateTimeoutException(stopwatch.Elapsed, $"Timed out waiting for in connecting queue after {stopwatch.ElapsedMilliseconds}ms."); } - return CreateOpenedInternal(cancellationToken); + return CreateOpenedInternal(new(Timeout.InfiniteTimeSpan, cancellationToken)); } catch (Exception ex) { @@ -889,38 +886,31 @@ public PooledConnection CreateOpened(CancellationToken cancellationToken) } } - public PooledConnection CreateOpenedOrReuse(CancellationToken cancellationToken) + public PooledConnection CreateOpenedOrReuse(OperationContext operationContext, TimeSpan waitQueueTimeout) { try { var connection = _pool._connectionHolder.Acquire(); - var waitTimeout = _connectingTimeout; var stopwatch = Stopwatch.StartNew(); while (connection == null) { _pool._poolState.ThrowIfNotReady(); + var waitTimeout = _pool.CalculateRemainingTimeout(waitQueueTimeout, stopwatch); // Try to acquire connecting semaphore. Possible operation results: // Entered: The request was successfully fulfilled, and a connection establishment can start // Signaled: The request was interrupted because Connection was return to pool and can be reused // Timeout: The request was timed out after WaitQueueTimeout period. - _connectingWaitStatus = _pool._maxConnectingQueue.WaitSignaled(waitTimeout, cancellationToken); + _connectingWaitStatus = _pool._maxConnectingQueue.WaitSignaled(waitTimeout, operationContext.CancellationToken); connection = _connectingWaitStatus switch { SemaphoreSlimSignalable.SemaphoreWaitResult.Signaled => _pool._connectionHolder.Acquire(), - SemaphoreSlimSignalable.SemaphoreWaitResult.Entered => CreateOpenedInternal(cancellationToken), - SemaphoreSlimSignalable.SemaphoreWaitResult.TimedOut => throw CreateTimeoutException(stopwatch), + SemaphoreSlimSignalable.SemaphoreWaitResult.Entered => CreateOpenedInternal(operationContext), + SemaphoreSlimSignalable.SemaphoreWaitResult.TimedOut => throw CreateTimeoutException(stopwatch.Elapsed), _ => throw new InvalidOperationException($"Invalid wait result {_connectingWaitStatus}") }; - - waitTimeout = _connectingTimeout - stopwatch.Elapsed; - - if (connection == null && waitTimeout <= TimeSpan.Zero) - { - throw CreateTimeoutException(stopwatch); - } } return connection; @@ -932,39 +922,31 @@ public PooledConnection CreateOpenedOrReuse(CancellationToken cancellationToken) } } - public async Task<PooledConnection> CreateOpenedOrReuseAsync(CancellationToken cancellationToken) + public async Task<PooledConnection> CreateOpenedOrReuseAsync(OperationContext operationContext, TimeSpan waitQueueTimeout) { try { var connection = _pool._connectionHolder.Acquire(); - - var waitTimeout = _connectingTimeout; var stopwatch = Stopwatch.StartNew(); while (connection == null) { _pool._poolState.ThrowIfNotReady(); + var waitTimeout = _pool.CalculateRemainingTimeout(waitQueueTimeout, stopwatch); // Try to acquire connecting semaphore. Possible operation results: // Entered: The request was successfully fulfilled, and a connection establishment can start // Signaled: The request was interrupted because Connection was return to pool and can be reused // Timeout: The request was timed out after WaitQueueTimeout period. - _connectingWaitStatus = await _pool._maxConnectingQueue.WaitSignaledAsync(waitTimeout, cancellationToken).ConfigureAwait(false); + _connectingWaitStatus = await _pool._maxConnectingQueue.WaitSignaledAsync(waitTimeout, operationContext.CancellationToken).ConfigureAwait(false); connection = _connectingWaitStatus switch { SemaphoreSlimSignalable.SemaphoreWaitResult.Signaled => _pool._connectionHolder.Acquire(), - SemaphoreSlimSignalable.SemaphoreWaitResult.Entered => await CreateOpenedInternalAsync(cancellationToken).ConfigureAwait(false), - SemaphoreSlimSignalable.SemaphoreWaitResult.TimedOut => throw CreateTimeoutException(stopwatch), + SemaphoreSlimSignalable.SemaphoreWaitResult.Entered => await CreateOpenedInternalAsync(operationContext).ConfigureAwait(false), + SemaphoreSlimSignalable.SemaphoreWaitResult.TimedOut => throw CreateTimeoutException(stopwatch.Elapsed), _ => throw new InvalidOperationException($"Invalid wait result {_connectingWaitStatus}") }; - - waitTimeout = _connectingTimeout - stopwatch.Elapsed; - - if (connection == null && waitTimeout <= TimeSpan.Zero) - { - throw CreateTimeoutException(stopwatch); - } } return connection; @@ -991,54 +973,53 @@ public void Dispose() } // private methods - private PooledConnection CreateOpenedInternal(CancellationToken cancellationToken) + private PooledConnection CreateOpenedInternal(OperationContext operationContext) { - StartCreating(cancellationToken); + var stopwatch = StartCreating(operationContext); - _connection.Open(cancellationToken); + _connection.Open(operationContext); - FinishCreating(_connection.Description); + FinishCreating(_connection.Description, stopwatch); return _connection; } - private async Task<PooledConnection> CreateOpenedInternalAsync(CancellationToken cancellationToken) + private async Task<PooledConnection> CreateOpenedInternalAsync(OperationContext operationContext) { - StartCreating(cancellationToken); + var stopwatch = StartCreating(operationContext); - await _connection.OpenAsync(cancellationToken).ConfigureAwait(false); + await _connection.OpenAsync(operationContext).ConfigureAwait(false); - FinishCreating(_connection.Description); + FinishCreating(_connection.Description, stopwatch); return _connection; } - private void StartCreating(CancellationToken cancellationToken) + private Stopwatch StartCreating(OperationContext operationContext) { _pool._eventLogger.LogAndPublish(new ConnectionPoolAddingConnectionEvent(_pool._serverId, EventContext.OperationId)); - cancellationToken.ThrowIfCancellationRequested(); - - _stopwatch = Stopwatch.StartNew(); + operationContext.ThrowIfTimedOutOrCanceled(); + var stopwatch = Stopwatch.StartNew(); _connection = _pool.CreateNewConnection(); + return stopwatch; } - private void FinishCreating(ConnectionDescription description) + private void FinishCreating(ConnectionDescription description, Stopwatch stopwatch) { - _stopwatch.Stop(); - - _pool._eventLogger.LogAndPublish(new ConnectionPoolAddedConnectionEvent(_connection.ConnectionId, _stopwatch.Elapsed, EventContext.OperationId)); + stopwatch.Stop(); + _pool._eventLogger.LogAndPublish(new ConnectionPoolAddedConnectionEvent(_connection.ConnectionId, stopwatch.Elapsed, EventContext.OperationId)); // Only if reached this stage, connection should not be disposed _disposeConnection = false; _pool._serviceStates.IncrementConnectionCount(description?.ServiceId); } - private Exception CreateTimeoutException(Stopwatch stopwatch) + private Exception CreateTimeoutException(TimeSpan elapsed) { - var message = $"Timed out waiting in connecting queue after {stopwatch.ElapsedMilliseconds}ms."; - return _pool.CreateTimeoutException(stopwatch, message); + var message = $"Timed out waiting in connecting queue after {elapsed.TotalMilliseconds}ms."; + return _pool.CreateTimeoutException(elapsed, message); } } } diff --git a/src/MongoDB.Driver/Core/ConnectionPools/ExclusiveConnectionPool.cs b/src/MongoDB.Driver/Core/ConnectionPools/ExclusiveConnectionPool.cs index 8490bba1b07..4e67ec42254 100644 --- a/src/MongoDB.Driver/Core/ConnectionPools/ExclusiveConnectionPool.cs +++ b/src/MongoDB.Driver/Core/ConnectionPools/ExclusiveConnectionPool.cs @@ -15,7 +15,6 @@ using System; using System.Net; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Driver.Core.Configuration; @@ -140,16 +139,16 @@ public int UsedCount internal ListConnectionHolder ConnectionHolder => _connectionHolder; // public methods - public IConnectionHandle AcquireConnection(CancellationToken cancellationToken) + public IConnectionHandle AcquireConnection(OperationContext operationContext) { using var helper = new AcquireConnectionHelper(this); - return helper.AcquireConnection(cancellationToken); + return helper.AcquireConnection(operationContext); } - public async Task<IConnectionHandle> AcquireConnectionAsync(CancellationToken cancellationToken) + public async Task<IConnectionHandle> AcquireConnectionAsync(OperationContext operationContext) { using var helper = new AcquireConnectionHelper(this); - return await helper.AcquireConnectionAsync(cancellationToken).ConfigureAwait(false); + return await helper.AcquireConnectionAsync(operationContext).ConfigureAwait(false); } public void Clear(bool closeInUseConnections = false) diff --git a/src/MongoDB.Driver/Core/ConnectionPools/IConnectionPool.cs b/src/MongoDB.Driver/Core/ConnectionPools/IConnectionPool.cs index 24f2ff48e4e..599185c797a 100644 --- a/src/MongoDB.Driver/Core/ConnectionPools/IConnectionPool.cs +++ b/src/MongoDB.Driver/Core/ConnectionPools/IConnectionPool.cs @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Driver.Core.Connections; @@ -27,8 +26,8 @@ internal interface IConnectionPool : IDisposable int Generation { get; } ServerId ServerId { get; } - IConnectionHandle AcquireConnection(CancellationToken cancellationToken); - Task<IConnectionHandle> AcquireConnectionAsync(CancellationToken cancellationToken); + IConnectionHandle AcquireConnection(OperationContext operationContext); + Task<IConnectionHandle> AcquireConnectionAsync(OperationContext operationContext); void Clear(bool closeInUseConnections = false); void Clear(ObjectId serviceId); int GetGeneration(ObjectId? serviceId); diff --git a/src/MongoDB.Driver/Core/ConnectionPools/MaintenanceHelper.cs b/src/MongoDB.Driver/Core/ConnectionPools/MaintenanceHelper.cs index a8f2bdfa12f..568ed1933c3 100644 --- a/src/MongoDB.Driver/Core/ConnectionPools/MaintenanceHelper.cs +++ b/src/MongoDB.Driver/Core/ConnectionPools/MaintenanceHelper.cs @@ -125,9 +125,9 @@ private void EnsureMinSize(CancellationToken cancellationToken) return; } - using (var connectionCreator = new ConnectionCreator(_connectionPool, minTimeout)) + using (var connectionCreator = new ConnectionCreator(_connectionPool)) { - var connection = connectionCreator.CreateOpened(cancellationToken); + var connection = connectionCreator.CreateOpened(minTimeout, cancellationToken); _connectionPool.ConnectionHolder.Return(connection); } } diff --git a/src/MongoDB.Driver/Core/Connections/BinaryConnection.cs b/src/MongoDB.Driver/Core/Connections/BinaryConnection.cs index 380526e8348..565cb3f9798 100644 --- a/src/MongoDB.Driver/Core/Connections/BinaryConnection.cs +++ b/src/MongoDB.Driver/Core/Connections/BinaryConnection.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,11 +15,8 @@ using System; using System.Buffers.Binary; -using System.Collections.Concurrent; -using System.Collections.Generic; using System.Diagnostics; using System.IO; -using System.Linq; using System.Net; using System.Threading; using System.Threading.Tasks; @@ -48,29 +45,31 @@ internal sealed class BinaryConnection : IConnection private ConnectionInitializerContext _connectionInitializerContext; private EndPoint _endPoint; private ConnectionDescription _description; - private readonly Dropbox _dropbox = new Dropbox(); private bool _failedEventHasBeenRaised; private DateTime _lastUsedAtUtc; private DateTime _openedAtUtc; private readonly object _openLock = new object(); private Task _openTask; - private readonly SemaphoreSlim _receiveLock; private CompressorType? _sendCompressorType; - private readonly SemaphoreSlim _sendLock; private readonly ConnectionSettings _settings; + private readonly TimeSpan _socketReadTimeout; + private readonly TimeSpan _socketWriteTimeout; private readonly InterlockedInt32 _state; private Stream _stream; private readonly IStreamFactory _streamFactory; private readonly EventLogger<LogCategories.Connection> _eventLogger; // constructors - public BinaryConnection(ServerId serverId, + public BinaryConnection( + ServerId serverId, EndPoint endPoint, ConnectionSettings settings, IStreamFactory streamFactory, IConnectionInitializer connectionInitializer, IEventSubscriber eventSubscriber, - ILoggerFactory loggerFactory) + ILoggerFactory loggerFactory, + TimeSpan socketReadTimeout, + TimeSpan socketWriteTimeout) { Ensure.IsNotNull(serverId, nameof(serverId)); _endPoint = Ensure.IsNotNull(endPoint, nameof(endPoint)); @@ -80,13 +79,13 @@ public BinaryConnection(ServerId serverId, Ensure.IsNotNull(eventSubscriber, nameof(eventSubscriber)); _connectionId = new ConnectionId(serverId, settings.ConnectionIdLocalValueProvider()); - _receiveLock = new SemaphoreSlim(1); - _sendLock = new SemaphoreSlim(1); _state = new InterlockedInt32(State.Initial); _compressorSource = new CompressorSource(settings.Compressors); _eventLogger = loggerFactory.CreateEventLogger<LogCategories.Connection>(eventSubscriber); _commandEventHelper = new CommandEventHelper(loggerFactory.CreateEventLogger<LogCategories.Command>(eventSubscriber)); + _socketReadTimeout = socketReadTimeout; + _socketWriteTimeout = socketWriteTimeout; } // properties @@ -174,9 +173,6 @@ private void Dispose(bool disposing) _eventLogger.LogAndPublish(new ConnectionClosingEvent(_connectionId, EventContext.OperationId)); var stopwatch = Stopwatch.StartNew(); - _receiveLock.Dispose(); - _sendLock.Dispose(); - if (_stream != null) { try @@ -205,9 +201,9 @@ private void EnsureMessageSizeIsValid(int messageSize) } } - public void Open(CancellationToken cancellationToken) + public void Open(OperationContext operationContext) { - ThrowIfCancelledOrDisposed(cancellationToken); + ThrowIfCancelledOrDisposed(operationContext); TaskCompletionSource<bool> taskCompletionSource = null; var connecting = false; @@ -227,7 +223,7 @@ public void Open(CancellationToken cancellationToken) { try { - OpenHelper(cancellationToken); + OpenHelper(operationContext); taskCompletionSource.TrySetResult(true); } catch (Exception ex) @@ -242,82 +238,98 @@ public void Open(CancellationToken cancellationToken) } } - public Task OpenAsync(CancellationToken cancellationToken) + public Task OpenAsync(OperationContext operationContext) { - ThrowIfCancelledOrDisposed(cancellationToken); + ThrowIfCancelledOrDisposed(operationContext); lock (_openLock) { if (_state.TryChange(State.Initial, State.Connecting)) { _openedAtUtc = DateTime.UtcNow; - _openTask = OpenHelperAsync(cancellationToken); + _openTask = OpenHelperAsync(operationContext); } return _openTask; } } - private void OpenHelper(CancellationToken cancellationToken) + private void OpenHelper(OperationContext operationContext) { var helper = new OpenConnectionHelper(this); ConnectionDescription handshakeDescription = null; try { helper.OpeningConnection(); - _stream = _streamFactory.CreateStream(_endPoint, cancellationToken); +#pragma warning disable CS0618 // Type or member is obsolete + _stream = _streamFactory.CreateStream(_endPoint, operationContext.CombinedCancellationToken); +#pragma warning restore CS0618 // Type or member is obsolete helper.InitializingConnection(); - _connectionInitializerContext = _connectionInitializer.SendHello(this, cancellationToken); + _connectionInitializerContext = _connectionInitializer.SendHello(operationContext, this); handshakeDescription = _connectionInitializerContext.Description; - _connectionInitializerContext = _connectionInitializer.Authenticate(this, _connectionInitializerContext, cancellationToken); + _connectionInitializerContext = _connectionInitializer.Authenticate(operationContext, this, _connectionInitializerContext); _description = _connectionInitializerContext.Description; _sendCompressorType = ChooseSendCompressorTypeIfAny(_description); helper.OpenedConnection(); } + catch (OperationCanceledException) when (operationContext.IsTimedOut()) + { + // OperationCanceledException could be thrown because of CombinedCancellationToken (see line 273), + // if we face it and operation context is timed out we should throw TimeoutException instead. + throw new TimeoutException(); + } catch (Exception ex) { _description ??= handshakeDescription; - var wrappedException = WrapExceptionIfRequired(ex, "opening a connection to the server"); + var wrappedException = WrapExceptionIfRequired(operationContext, ex, "opening a connection to the server"); helper.FailedOpeningConnection(wrappedException ?? ex); if (wrappedException == null) { throw; } else { throw wrappedException; } } } - private async Task OpenHelperAsync(CancellationToken cancellationToken) + private async Task OpenHelperAsync(OperationContext operationContext) { var helper = new OpenConnectionHelper(this); ConnectionDescription handshakeDescription = null; try { helper.OpeningConnection(); - _stream = await _streamFactory.CreateStreamAsync(_endPoint, cancellationToken).ConfigureAwait(false); +#pragma warning disable CS0618 // Type or member is obsolete + _stream = await _streamFactory.CreateStreamAsync(_endPoint, operationContext.CombinedCancellationToken).ConfigureAwait(false); +#pragma warning restore CS0618 // Type or member is obsolete helper.InitializingConnection(); - _connectionInitializerContext = await _connectionInitializer.SendHelloAsync(this, cancellationToken).ConfigureAwait(false); + _connectionInitializerContext = await _connectionInitializer.SendHelloAsync(operationContext, this).ConfigureAwait(false); handshakeDescription = _connectionInitializerContext.Description; - _connectionInitializerContext = await _connectionInitializer.AuthenticateAsync(this, _connectionInitializerContext, cancellationToken).ConfigureAwait(false); + _connectionInitializerContext = await _connectionInitializer.AuthenticateAsync(operationContext, this, _connectionInitializerContext).ConfigureAwait(false); _description = _connectionInitializerContext.Description; _sendCompressorType = ChooseSendCompressorTypeIfAny(_description); helper.OpenedConnection(); } + catch (OperationCanceledException) when (operationContext.IsTimedOut()) + { + // OperationCanceledException could be thrown because of CombinedCancellationToken (see line 307), + // if we face it and operation context is timed out we should throw TimeoutException instead. + throw new TimeoutException(); + } catch (Exception ex) { _description ??= handshakeDescription; - var wrappedException = WrapExceptionIfRequired(ex, "opening a connection to the server"); + var wrappedException = WrapExceptionIfRequired(operationContext, ex, "opening a connection to the server"); helper.FailedOpeningConnection(wrappedException ?? ex); if (wrappedException == null) { throw; } else { throw wrappedException; } } } - public void Reauthenticate(CancellationToken cancellationToken) + public void Reauthenticate(OperationContext operationContext) { InvalidateAuthenticator(); - _connectionInitializerContext = _connectionInitializer.Authenticate(this, _connectionInitializerContext, cancellationToken); + _connectionInitializerContext = _connectionInitializer.Authenticate(operationContext, this, _connectionInitializerContext); } - public async Task ReauthenticateAsync(CancellationToken cancellationToken) + public async Task ReauthenticateAsync(OperationContext operationContext) { InvalidateAuthenticator(); - _connectionInitializerContext = await _connectionInitializer.AuthenticateAsync(this, _connectionInitializerContext, cancellationToken).ConfigureAwait(false); + _connectionInitializerContext = await _connectionInitializer.AuthenticateAsync(operationContext, this, _connectionInitializerContext).ConfigureAwait(false); } private void InvalidateAuthenticator() @@ -328,163 +340,82 @@ private void InvalidateAuthenticator() } } - private IByteBuffer ReceiveBuffer(CancellationToken cancellationToken) + private IByteBuffer ReceiveBuffer(OperationContext operationContext) { try { var messageSizeBytes = new byte[4]; - _stream.ReadBytes(messageSizeBytes, 0, 4, cancellationToken); + _stream.ReadBytes(operationContext, messageSizeBytes, 0, 4, _socketReadTimeout); var messageSize = BinaryPrimitives.ReadInt32LittleEndian(messageSizeBytes); EnsureMessageSizeIsValid(messageSize); var inputBufferChunkSource = new InputBufferChunkSource(BsonChunkPool.Default); var buffer = ByteBufferFactory.Create(inputBufferChunkSource, messageSize); buffer.Length = messageSize; buffer.SetBytes(0, messageSizeBytes, 0, 4); - _stream.ReadBytes(buffer, 4, messageSize - 4, cancellationToken); + _stream.ReadBytes(operationContext, buffer, 4, messageSize - 4, _socketReadTimeout); _lastUsedAtUtc = DateTime.UtcNow; buffer.MakeReadOnly(); return buffer; } catch (Exception ex) { - var wrappedException = WrapExceptionIfRequired(ex, "receiving a message from the server"); + var wrappedException = WrapExceptionIfRequired(operationContext, ex, "receiving a message from the server"); ConnectionFailed(wrappedException ?? ex); if (wrappedException == null) { throw; } else { throw wrappedException; } } } - private IByteBuffer ReceiveBuffer(int responseTo, CancellationToken cancellationToken) - { - using (var receiveLockRequest = new SemaphoreSlimRequest(_receiveLock, cancellationToken)) - { - var messageTask = _dropbox.GetMessageAsync(responseTo); - try - { - Task.WaitAny(messageTask, receiveLockRequest.Task); - if (messageTask.IsCompleted) - { - return _dropbox.RemoveMessage(responseTo); // also propagates exception if any - } - - receiveLockRequest.Task.GetAwaiter().GetResult(); // propagate exceptions - while (true) - { - try - { - var buffer = ReceiveBuffer(cancellationToken); - _dropbox.AddMessage(buffer); - } - catch (Exception ex) - { - _dropbox.AddException(ex); - } - - if (messageTask.IsCompleted) - { - return _dropbox.RemoveMessage(responseTo); // also propagates exception if any - } - - cancellationToken.ThrowIfCancellationRequested(); - } - } - catch - { - var ignored = messageTask.ContinueWith( - t => { _dropbox.RemoveMessage(responseTo).Dispose(); }, - TaskContinuationOptions.OnlyOnRanToCompletion); - throw; - } - } - } - - private async Task<IByteBuffer> ReceiveBufferAsync(CancellationToken cancellationToken) + private async Task<IByteBuffer> ReceiveBufferAsync(OperationContext operationContext) { try { var messageSizeBytes = new byte[4]; - var readTimeout = _stream.CanTimeout ? TimeSpan.FromMilliseconds(_stream.ReadTimeout) : Timeout.InfiniteTimeSpan; - await _stream.ReadBytesAsync(messageSizeBytes, 0, 4, readTimeout, cancellationToken).ConfigureAwait(false); + await _stream.ReadBytesAsync(operationContext, messageSizeBytes, 0, 4, _socketReadTimeout).ConfigureAwait(false); var messageSize = BinaryPrimitives.ReadInt32LittleEndian(messageSizeBytes); EnsureMessageSizeIsValid(messageSize); var inputBufferChunkSource = new InputBufferChunkSource(BsonChunkPool.Default); var buffer = ByteBufferFactory.Create(inputBufferChunkSource, messageSize); buffer.Length = messageSize; buffer.SetBytes(0, messageSizeBytes, 0, 4); - await _stream.ReadBytesAsync(buffer, 4, messageSize - 4, readTimeout, cancellationToken).ConfigureAwait(false); + await _stream.ReadBytesAsync(operationContext, buffer, 4, messageSize - 4, _socketReadTimeout).ConfigureAwait(false); _lastUsedAtUtc = DateTime.UtcNow; buffer.MakeReadOnly(); return buffer; } catch (Exception ex) { - var wrappedException = WrapExceptionIfRequired(ex, "receiving a message from the server"); + var wrappedException = WrapExceptionIfRequired(operationContext, ex, "receiving a message from the server"); ConnectionFailed(wrappedException ?? ex); if (wrappedException == null) { throw; } else { throw wrappedException; } } } - private async Task<IByteBuffer> ReceiveBufferAsync(int responseTo, CancellationToken cancellationToken) - { - using (var receiveLockRequest = new SemaphoreSlimRequest(_receiveLock, cancellationToken)) - { - var messageTask = _dropbox.GetMessageAsync(responseTo); - try - { - await Task.WhenAny(messageTask, receiveLockRequest.Task).ConfigureAwait(false); - if (messageTask.IsCompleted) - { - return _dropbox.RemoveMessage(responseTo); // also propagates exception if any - } - - receiveLockRequest.Task.GetAwaiter().GetResult(); // propagate exceptions - while (true) - { - try - { - var buffer = await ReceiveBufferAsync(cancellationToken).ConfigureAwait(false); - _dropbox.AddMessage(buffer); - } - catch (Exception ex) - { - _dropbox.AddException(ex); - } - - if (messageTask.IsCompleted) - { - return _dropbox.RemoveMessage(responseTo); // also propagates exception if any - } - - cancellationToken.ThrowIfCancellationRequested(); - } - } - catch - { - var ignored = messageTask.ContinueWith( - t => { _dropbox.RemoveMessage(responseTo).Dispose(); }, - TaskContinuationOptions.OnlyOnRanToCompletion); - throw; - } - } - } - public ResponseMessage ReceiveMessage( + OperationContext operationContext, int responseTo, IMessageEncoderSelector encoderSelector, - MessageEncoderSettings messageEncoderSettings, - CancellationToken cancellationToken) + MessageEncoderSettings messageEncoderSettings) { Ensure.IsNotNull(encoderSelector, nameof(encoderSelector)); - ThrowIfCancelledOrDisposedOrNotOpen(cancellationToken); + ThrowIfCancelledOrDisposedOrNotOpen(operationContext); var helper = new ReceiveMessageHelper(this, responseTo, messageEncoderSettings, _compressorSource); try { helper.ReceivingMessage(); - using (var buffer = ReceiveBuffer(responseTo, cancellationToken)) + while (true) { - var message = helper.DecodeMessage(buffer, encoderSelector, cancellationToken); - helper.ReceivedMessage(buffer, message); - return message; + using (var buffer = ReceiveBuffer(operationContext)) + { + if (responseTo != GetResponseTo(buffer)) + { + continue; + } + + var message = helper.DecodeMessage(operationContext, buffer, encoderSelector); + helper.ReceivedMessage(buffer, message); + return message; + } } } catch (Exception ex) @@ -496,23 +427,31 @@ public ResponseMessage ReceiveMessage( } public async Task<ResponseMessage> ReceiveMessageAsync( + OperationContext operationContext, int responseTo, IMessageEncoderSelector encoderSelector, - MessageEncoderSettings messageEncoderSettings, - CancellationToken cancellationToken) + MessageEncoderSettings messageEncoderSettings) { Ensure.IsNotNull(encoderSelector, nameof(encoderSelector)); - ThrowIfCancelledOrDisposedOrNotOpen(cancellationToken); + ThrowIfCancelledOrDisposedOrNotOpen(operationContext); var helper = new ReceiveMessageHelper(this, responseTo, messageEncoderSettings, _compressorSource); try { helper.ReceivingMessage(); - using (var buffer = await ReceiveBufferAsync(responseTo, cancellationToken).ConfigureAwait(false)) + while (true) { - var message = helper.DecodeMessage(buffer, encoderSelector, cancellationToken); - helper.ReceivedMessage(buffer, message); - return message; + using (var buffer = await ReceiveBufferAsync(operationContext).ConfigureAwait(false)) + { + if (responseTo != GetResponseTo(buffer)) + { + continue; + } + + var message = helper.DecodeMessage(operationContext, buffer, encoderSelector); + helper.ReceivedMessage(buffer, message); + return message; + } } } catch (Exception ex) @@ -523,147 +462,130 @@ public async Task<ResponseMessage> ReceiveMessageAsync( } } - private void SendBuffer(IByteBuffer buffer, CancellationToken cancellationToken) + private int GetResponseTo(IByteBuffer message) { - _sendLock.Wait(cancellationToken); - try + var backingBytes = message.AccessBackingBytes(8); + return BitConverter.ToInt32(backingBytes.Array, backingBytes.Offset); + } + + private void SendBuffer(OperationContext operationContext, IByteBuffer buffer) + { + if (_state.Value == State.Failed) { - if (_state.Value == State.Failed) - { - throw new MongoConnectionClosedException(_connectionId); - } + throw new MongoConnectionClosedException(_connectionId); + } - try - { - _stream.WriteBytes(buffer, 0, buffer.Length, cancellationToken); - _lastUsedAtUtc = DateTime.UtcNow; - } - catch (Exception ex) - { - var wrappedException = WrapExceptionIfRequired(ex, "sending a message to the server"); - ConnectionFailed(wrappedException ?? ex); - if (wrappedException == null) { throw; } else { throw wrappedException; } - } + try + { + _stream.WriteBytes(operationContext, buffer, 0, buffer.Length, _socketWriteTimeout); + _lastUsedAtUtc = DateTime.UtcNow; } - finally + catch (Exception ex) { - _sendLock.Release(); + var wrappedException = WrapExceptionIfRequired(operationContext, ex, "sending a message to the server"); + ConnectionFailed(wrappedException ?? ex); + if (wrappedException == null) { throw; } else { throw wrappedException; } } } - private async Task SendBufferAsync(IByteBuffer buffer, CancellationToken cancellationToken) + private async Task SendBufferAsync(OperationContext operationContext, IByteBuffer buffer) { - await _sendLock.WaitAsync(cancellationToken).ConfigureAwait(false); - try + if (_state.Value == State.Failed) { - if (_state.Value == State.Failed) - { - throw new MongoConnectionClosedException(_connectionId); - } + throw new MongoConnectionClosedException(_connectionId); + } - try - { - var writeTimeout = _stream.CanTimeout ? TimeSpan.FromMilliseconds(_stream.WriteTimeout) : Timeout.InfiniteTimeSpan; - await _stream.WriteBytesAsync(buffer, 0, buffer.Length, writeTimeout, cancellationToken).ConfigureAwait(false); - _lastUsedAtUtc = DateTime.UtcNow; - } - catch (Exception ex) - { - var wrappedException = WrapExceptionIfRequired(ex, "sending a message to the server"); - ConnectionFailed(wrappedException ?? ex); - if (wrappedException == null) { throw; } else { throw wrappedException; } - } + try + { + await _stream.WriteBytesAsync(operationContext, buffer, 0, buffer.Length, _socketWriteTimeout).ConfigureAwait(false); + _lastUsedAtUtc = DateTime.UtcNow; } - finally + catch (Exception ex) { - _sendLock.Release(); + var wrappedException = WrapExceptionIfRequired(operationContext, ex, "sending a message to the server"); + ConnectionFailed(wrappedException ?? ex); + if (wrappedException == null) { throw; } else { throw wrappedException; } } } - public void SendMessages(IEnumerable<RequestMessage> messages, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken) + public void SendMessage(OperationContext operationContext, RequestMessage message, MessageEncoderSettings messageEncoderSettings) { - Ensure.IsNotNull(messages, nameof(messages)); - ThrowIfCancelledOrDisposedOrNotOpen(cancellationToken); + Ensure.IsNotNull(message, nameof(message)); + ThrowIfCancelledOrDisposedOrNotOpen(operationContext); - var helper = new SendMessagesHelper(this, messages, messageEncoderSettings); + var helper = new SendMessageHelper(this, message, messageEncoderSettings); try { - helper.EncodingMessages(); - using (var uncompressedBuffer = helper.EncodeMessages(cancellationToken, out var sentMessages)) + helper.EncodingMessage(); + using (var uncompressedBuffer = helper.EncodeMessage(operationContext, out var sentMessage)) { - helper.SendingMessages(uncompressedBuffer); + helper.SendingMessage(uncompressedBuffer); int sentLength; - if (AnyMessageNeedsToBeCompressed(sentMessages)) + if (ShouldBeCompressed(sentMessage)) { - using (var compressedBuffer = CompressMessages(sentMessages, uncompressedBuffer, messageEncoderSettings)) + using (var compressedBuffer = CompressMessage(sentMessage, uncompressedBuffer, messageEncoderSettings)) { - SendBuffer(compressedBuffer, cancellationToken); + SendBuffer(operationContext, compressedBuffer); sentLength = compressedBuffer.Length; } } else { - SendBuffer(uncompressedBuffer, cancellationToken); + SendBuffer(operationContext, uncompressedBuffer); sentLength = uncompressedBuffer.Length; } - helper.SentMessages(sentLength); + helper.SentMessage(sentLength); } } catch (Exception ex) { - helper.FailedSendingMessages(ex); + helper.FailedSendingMessage(ex); ThrowOperationCanceledExceptionIfRequired(ex); throw; } } - public async Task SendMessagesAsync(IEnumerable<RequestMessage> messages, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken) + public async Task SendMessageAsync(OperationContext operationContext, RequestMessage message, MessageEncoderSettings messageEncoderSettings) { - Ensure.IsNotNull(messages, nameof(messages)); - ThrowIfCancelledOrDisposedOrNotOpen(cancellationToken); + Ensure.IsNotNull(message, nameof(message)); + ThrowIfCancelledOrDisposedOrNotOpen(operationContext); - var helper = new SendMessagesHelper(this, messages, messageEncoderSettings); + var helper = new SendMessageHelper(this, message, messageEncoderSettings); try { - helper.EncodingMessages(); - using (var uncompressedBuffer = helper.EncodeMessages(cancellationToken, out var sentMessages)) + helper.EncodingMessage(); + using (var uncompressedBuffer = helper.EncodeMessage(operationContext, out var sentMessage)) { - helper.SendingMessages(uncompressedBuffer); + helper.SendingMessage(uncompressedBuffer); int sentLength; - if (AnyMessageNeedsToBeCompressed(sentMessages)) + if (ShouldBeCompressed(sentMessage)) { - using (var compressedBuffer = CompressMessages(sentMessages, uncompressedBuffer, messageEncoderSettings)) + using (var compressedBuffer = CompressMessage(sentMessage, uncompressedBuffer, messageEncoderSettings)) { - await SendBufferAsync(compressedBuffer, cancellationToken).ConfigureAwait(false); + await SendBufferAsync(operationContext, compressedBuffer).ConfigureAwait(false); sentLength = compressedBuffer.Length; } } else { - await SendBufferAsync(uncompressedBuffer, cancellationToken).ConfigureAwait(false); + await SendBufferAsync(operationContext, uncompressedBuffer).ConfigureAwait(false); sentLength = uncompressedBuffer.Length; } - helper.SentMessages(sentLength); + helper.SentMessage(sentLength); } } catch (Exception ex) { - helper.FailedSendingMessages(ex); + helper.FailedSendingMessage(ex); ThrowOperationCanceledExceptionIfRequired(ex); throw; } } - public void SetReadTimeout(TimeSpan timeout) - { - ThrowIfDisposed(); - _stream.ReadTimeout = (int)timeout.TotalMilliseconds; - } - // private methods - private bool AnyMessageNeedsToBeCompressed(IEnumerable<RequestMessage> messages) + private bool ShouldBeCompressed(RequestMessage message) { - return _sendCompressorType.HasValue && messages.Any(m => m.MayBeCompressed); + return _sendCompressorType.HasValue && message.MayBeCompressed; } private CompressorType? ChooseSendCompressorTypeIfAny(ConnectionDescription connectionDescription) @@ -672,8 +594,8 @@ private bool AnyMessageNeedsToBeCompressed(IEnumerable<RequestMessage> messages) return availableCompressors.Count > 0 ? (CompressorType?)availableCompressors[0] : null; } - private IByteBuffer CompressMessages( - IEnumerable<RequestMessage> messages, + private IByteBuffer CompressMessage( + RequestMessage message, IByteBuffer uncompressedBuffer, MessageEncoderSettings messageEncoderSettings) { @@ -683,24 +605,22 @@ private IByteBuffer CompressMessages( using (var uncompressedStream = new ByteBufferStream(uncompressedBuffer, ownsBuffer: false)) using (var compressedStream = new ByteBufferStream(compressedBuffer, ownsBuffer: false)) { - foreach (var message in messages) - { - var uncompressedMessageLength = uncompressedStream.ReadInt32(); - uncompressedStream.Position -= 4; + var uncompressedMessageLength = uncompressedStream.ReadInt32(); + uncompressedStream.Position -= 4; - using (var uncompressedMessageSlice = uncompressedBuffer.GetSlice((int)uncompressedStream.Position, uncompressedMessageLength)) - using (var uncompressedMessageStream = new ByteBufferStream(uncompressedMessageSlice, ownsBuffer: false)) + using (var uncompressedMessageSlice = uncompressedBuffer.GetSlice((int)uncompressedStream.Position, uncompressedMessageLength)) + using (var uncompressedMessageStream = new ByteBufferStream(uncompressedMessageSlice, ownsBuffer: false)) + { + if (message.MayBeCompressed) { - if (message.MayBeCompressed) - { - CompressMessage(message, uncompressedMessageStream, compressedStream, messageEncoderSettings); - } - else - { - uncompressedMessageStream.EfficientCopyTo(compressedStream); - } + CompressMessage(message, uncompressedMessageStream, compressedStream, messageEncoderSettings); + } + else + { + uncompressedMessageStream.EfficientCopyTo(compressedStream); } } + compressedBuffer.Length = (int)compressedStream.Length; } @@ -719,15 +639,15 @@ private void CompressMessage( compressedMessageEncoder.WriteMessage(compressedMessage); } - private void ThrowIfCancelledOrDisposed(CancellationToken cancellationToken = default) + private void ThrowIfCancelledOrDisposed(OperationContext operationContext) { - cancellationToken.ThrowIfCancellationRequested(); + operationContext.ThrowIfTimedOutOrCanceled(); ThrowIfDisposed(); } - private void ThrowIfCancelledOrDisposedOrNotOpen(CancellationToken cancellationToken) + private void ThrowIfCancelledOrDisposedOrNotOpen(OperationContext operationContext) { - ThrowIfCancelledOrDisposed(cancellationToken); + ThrowIfCancelledOrDisposed(operationContext); if (_state.Value == State.Failed) { throw new MongoConnectionClosedException(_connectionId); @@ -746,10 +666,14 @@ private void ThrowIfDisposed() } } - private Exception WrapExceptionIfRequired(Exception ex, string action) + private Exception WrapExceptionIfRequired(OperationContext operationContext, Exception ex, string action) { - if ( - ex is ThreadAbortException || + if (ex is TimeoutException && operationContext.IsRootContextTimeoutConfigured()) + { + return null; + } + + if (ex is ThreadAbortException || ex is StackOverflowException || ex is MongoAuthenticationException || ex is OutOfMemoryException || @@ -758,11 +682,9 @@ ex is OperationCanceledException || { return null; } - else - { - var message = string.Format("An exception occurred while {0}.", action); - return new MongoConnectionException(_connectionId, message, ex); - } + + var message = string.Format("An exception occurred while {0}.", action); + return new MongoConnectionException(_connectionId, message, ex); } private void ThrowOperationCanceledExceptionIfRequired(Exception exception) @@ -779,47 +701,6 @@ private void ThrowOperationCanceledExceptionIfRequired(Exception exception) } // nested classes - private class Dropbox - { - private readonly ConcurrentDictionary<int, TaskCompletionSource<IByteBuffer>> _messages = new ConcurrentDictionary<int, TaskCompletionSource<IByteBuffer>>(); - - // public methods - public void AddException(Exception exception) - { - foreach (var taskCompletionSource in _messages.Values) - { - taskCompletionSource.TrySetException(exception); // has no effect on already completed tasks - } - } - - public void AddMessage(IByteBuffer message) - { - var responseTo = GetResponseTo(message); - var tcs = _messages.GetOrAdd(responseTo, x => new TaskCompletionSource<IByteBuffer>()); - tcs.TrySetResult(message); - } - - public Task<IByteBuffer> GetMessageAsync(int responseTo) - { - var tcs = _messages.GetOrAdd(responseTo, _ => new TaskCompletionSource<IByteBuffer>()); - return tcs.Task; - } - - public IByteBuffer RemoveMessage(int responseTo) - { - TaskCompletionSource<IByteBuffer> tcs; - _messages.TryRemove(responseTo, out tcs); - return tcs.Task.GetAwaiter().GetResult(); // RemoveMessage is only called when Task is complete - } - - // private methods - private int GetResponseTo(IByteBuffer message) - { - var backingBytes = message.AccessBackingBytes(8); - return BinaryPrimitives.ReadInt32LittleEndian(new ReadOnlySpan<byte>(backingBytes.Array, backingBytes.Offset, 4)); - } - } - private class OpenConnectionHelper { private readonly BinaryConnection _connection; @@ -907,9 +788,9 @@ public ReceiveMessageHelper(BinaryConnection connection, int responseTo, Message _messageEncoderSettings = messageEncoderSettings; } - public ResponseMessage DecodeMessage(IByteBuffer buffer, IMessageEncoderSelector encoderSelector, CancellationToken cancellationToken) + public ResponseMessage DecodeMessage(OperationContext operationContext, IByteBuffer buffer, IMessageEncoderSelector encoderSelector) { - cancellationToken.ThrowIfCancellationRequested(); + operationContext.ThrowIfTimedOutOrCanceled(); _stopwatch.Stop(); _networkDuration = _stopwatch.Elapsed; @@ -976,30 +857,28 @@ private Opcode PeekOpcode(BsonStream stream) } } - private class SendMessagesHelper + private class SendMessageHelper { private readonly Stopwatch _commandStopwatch; private readonly BinaryConnection _connection; private readonly MessageEncoderSettings _messageEncoderSettings; - private readonly List<RequestMessage> _messages; - private Lazy<List<int>> _requestIds; + private readonly RequestMessage _message; private TimeSpan _serializationDuration; private Stopwatch _networkStopwatch; - public SendMessagesHelper(BinaryConnection connection, IEnumerable<RequestMessage> messages, MessageEncoderSettings messageEncoderSettings) + public SendMessageHelper(BinaryConnection connection, RequestMessage message, MessageEncoderSettings messageEncoderSettings) { _connection = connection; - _messages = messages.ToList(); + _message = message; _messageEncoderSettings = messageEncoderSettings; _commandStopwatch = Stopwatch.StartNew(); - _requestIds = new Lazy<List<int>>(() => _messages.Select(m => m.RequestId).ToList()); } - public IByteBuffer EncodeMessages(CancellationToken cancellationToken, out List<RequestMessage> sentMessages) + public IByteBuffer EncodeMessage(OperationContext operationContext, out RequestMessage sentMessage) { - sentMessages = new List<RequestMessage>(); - cancellationToken.ThrowIfCancellationRequested(); + sentMessage = null; + operationContext.ThrowIfTimedOutOrCanceled(); var serializationStopwatch = Stopwatch.StartNew(); var outputBufferChunkSource = new OutputBufferChunkSource(BsonChunkPool.Default); @@ -1007,21 +886,17 @@ public IByteBuffer EncodeMessages(CancellationToken cancellationToken, out List< using (var stream = new ByteBufferStream(buffer, ownsBuffer: false)) { var encoderFactory = new BinaryMessageEncoderFactory(stream, _messageEncoderSettings, compressorSource: null); - foreach (var message in _messages) - { - if (message.ShouldBeSent == null || message.ShouldBeSent()) - { - var encoder = message.GetEncoder(encoderFactory); - encoder.WriteMessage(message); - message.WasSent = true; - sentMessages.Add(message); - } - // Encoding messages includes serializing the - // documents, so encoding message could be expensive - // and worthy of us honoring cancellation here. - cancellationToken.ThrowIfCancellationRequested(); - } + var encoder = _message.GetEncoder(encoderFactory); + encoder.WriteMessage(_message); + _message.WasSent = true; + sentMessage = _message; + + // Encoding messages includes serializing the + // documents, so encoding message could be expensive + // and worthy of us honoring cancellation here. + operationContext.ThrowIfTimedOutOrCanceled(); + buffer.Length = (int)stream.Length; buffer.MakeReadOnly(); } @@ -1031,42 +906,42 @@ public IByteBuffer EncodeMessages(CancellationToken cancellationToken, out List< return buffer; } - public void EncodingMessages() + public void EncodingMessage() { - _connection._eventLogger.LogAndPublish(new ConnectionSendingMessagesEvent(_connection.ConnectionId, _requestIds.Value, EventContext.OperationId)); + _connection._eventLogger.LogAndPublish(new ConnectionSendingMessagesEvent(_connection.ConnectionId, _message.RequestId, EventContext.OperationId)); } - public void FailedSendingMessages(Exception ex) + public void FailedSendingMessage(Exception ex) { if (_connection._commandEventHelper.ShouldCallErrorSending) { - _connection._commandEventHelper.ErrorSending(_messages, _connection._connectionId, _connection._description?.ServiceId, ex, _connection.IsInitializing); + _connection._commandEventHelper.ErrorSending(_message, _connection._connectionId, _connection._description?.ServiceId, ex, _connection.IsInitializing); } - _connection._eventLogger.LogAndPublish(new ConnectionSendingMessagesFailedEvent(_connection.ConnectionId, _requestIds.Value, ex, EventContext.OperationId)); + _connection._eventLogger.LogAndPublish(new ConnectionSendingMessagesFailedEvent(_connection.ConnectionId, _message.RequestId, ex, EventContext.OperationId)); } - public void SendingMessages(IByteBuffer buffer) + public void SendingMessage(IByteBuffer buffer) { if (_connection._commandEventHelper.ShouldCallBeforeSending) { - _connection._commandEventHelper.BeforeSending(_messages, _connection.ConnectionId, _connection.Description?.ServiceId, buffer, _messageEncoderSettings, _commandStopwatch, _connection.IsInitializing); + _connection._commandEventHelper.BeforeSending(_message, _connection.ConnectionId, _connection.Description?.ServiceId, buffer, _messageEncoderSettings, _commandStopwatch, _connection.IsInitializing); } _networkStopwatch = Stopwatch.StartNew(); } - public void SentMessages(int bufferLength) + public void SentMessage(int bufferLength) { _networkStopwatch.Stop(); var networkDuration = _networkStopwatch.Elapsed; if (_connection._commandEventHelper.ShouldCallAfterSending) { - _connection._commandEventHelper.AfterSending(_messages, _connection._connectionId, _connection.Description?.ServiceId, _connection.IsInitializing); + _connection._commandEventHelper.AfterSending(_message, _connection._connectionId, _connection.Description?.ServiceId, _connection.IsInitializing); } - _connection._eventLogger.LogAndPublish(new ConnectionSentMessagesEvent(_connection.ConnectionId, _requestIds.Value, bufferLength, networkDuration, _serializationDuration, EventContext.OperationId)); + _connection._eventLogger.LogAndPublish(new ConnectionSentMessagesEvent(_connection.ConnectionId, _message.RequestId, bufferLength, networkDuration, _serializationDuration, EventContext.OperationId)); } } diff --git a/src/MongoDB.Driver/Core/Connections/BinaryConnectionFactory.cs b/src/MongoDB.Driver/Core/Connections/BinaryConnectionFactory.cs index 54e98c9db21..591814e4422 100644 --- a/src/MongoDB.Driver/Core/Connections/BinaryConnectionFactory.cs +++ b/src/MongoDB.Driver/Core/Connections/BinaryConnectionFactory.cs @@ -13,7 +13,9 @@ * limitations under the License. */ +using System; using System.Net; +using System.Threading; using Microsoft.Extensions.Logging; using MongoDB.Driver.Core.Configuration; using MongoDB.Driver.Core.Events; @@ -30,6 +32,9 @@ internal sealed class BinaryConnectionFactory : IConnectionFactory private readonly ILoggerFactory _loggerFactory; private readonly ConnectionSettings _settings; private readonly IStreamFactory _streamFactory; + // TODO: CSOT: temporary here, remove on the next major release, together with socketTimeout + private readonly TimeSpan _socketReadTimeout; + private readonly TimeSpan _socketWriteTimeout; // constructors public BinaryConnectionFactory( @@ -37,13 +42,17 @@ public BinaryConnectionFactory( IStreamFactory streamFactory, IEventSubscriber eventSubscriber, ServerApi serverApi, - ILoggerFactory loggerFactory) + ILoggerFactory loggerFactory, + TimeSpan? socketReadTimeout, + TimeSpan? socketWriteTimeout) { _settings = Ensure.IsNotNull(settings, nameof(settings)); _streamFactory = Ensure.IsNotNull(streamFactory, nameof(streamFactory)); _eventSubscriber = Ensure.IsNotNull(eventSubscriber, nameof(eventSubscriber)); _connectionInitializer = new ConnectionInitializer(settings.ApplicationName, settings.Compressors, serverApi, settings.LibraryInfo); _loggerFactory = loggerFactory; + _socketReadTimeout = socketReadTimeout.HasValue && socketReadTimeout > TimeSpan.Zero ? socketReadTimeout.Value : Timeout.InfiniteTimeSpan; + _socketWriteTimeout = socketWriteTimeout.HasValue && socketWriteTimeout > TimeSpan.Zero ? socketWriteTimeout.Value : Timeout.InfiniteTimeSpan; } // properties @@ -54,7 +63,15 @@ public IConnection CreateConnection(ServerId serverId, EndPoint endPoint) { Ensure.IsNotNull(serverId, nameof(serverId)); Ensure.IsNotNull(endPoint, nameof(endPoint)); - return new BinaryConnection(serverId, endPoint, _settings, _streamFactory, _connectionInitializer, _eventSubscriber, _loggerFactory); + return new BinaryConnection(serverId, + endPoint, + _settings, + _streamFactory, + _connectionInitializer, + _eventSubscriber, + _loggerFactory, + _socketReadTimeout, + _socketWriteTimeout); } } } diff --git a/src/MongoDB.Driver/Core/Connections/CommandEventHelper.cs b/src/MongoDB.Driver/Core/Connections/CommandEventHelper.cs index 5b64bf2a545..1678c55c273 100644 --- a/src/MongoDB.Driver/Core/Connections/CommandEventHelper.cs +++ b/src/MongoDB.Driver/Core/Connections/CommandEventHelper.cs @@ -1,4 +1,4 @@ -/* Copyright 2015-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,7 +15,6 @@ using System; using System.Collections.Concurrent; -using System.Collections.Generic; using System.Diagnostics; using System.IO; using System.Linq; @@ -83,7 +82,7 @@ public bool ShouldCallErrorReceiving } public void BeforeSending( - IEnumerable<RequestMessage> messages, + RequestMessage message, ConnectionId connectionId, ObjectId? serviceId, IByteBuffer buffer, @@ -93,28 +92,21 @@ public void BeforeSending( { using (var stream = new ByteBufferStream(buffer, ownsBuffer: false)) { - var messageQueue = new Queue<RequestMessage>(messages); - - while (messageQueue.Count > 0) - { - ProcessRequestMessages(messageQueue, connectionId, serviceId, stream, encoderSettings, stopwatch, skipLogging); - } + ProcessRequestMessage(message, connectionId, serviceId, stream, encoderSettings, stopwatch, skipLogging); } } - public void AfterSending(IEnumerable<RequestMessage> messages, ConnectionId connectionId, ObjectId? serviceId, bool skipLogging) + public void AfterSending(RequestMessage message, ConnectionId connectionId, ObjectId? serviceId, bool skipLogging) { - foreach (var message in messages) + CommandState state; + if (_state.TryGetValue(message.RequestId, out state) && + state.ExpectedResponseType == ExpectedResponseType.None) { - CommandState state; - if (_state.TryGetValue(message.RequestId, out state) && - state.ExpectedResponseType == ExpectedResponseType.None) - { - state.Stopwatch.Stop(); + state.Stopwatch.Stop(); - if (_shouldTrackSucceeded) - { - _eventLogger.LogAndPublish(new CommandSucceededEvent( + if (_shouldTrackSucceeded) + { + _eventLogger.LogAndPublish(new CommandSucceededEvent( state.CommandName, new BsonDocument("ok", 1), state.QueryNamespace.DatabaseNamespace, @@ -123,22 +115,20 @@ public void AfterSending(IEnumerable<RequestMessage> messages, ConnectionId conn connectionId, serviceId, state.Stopwatch.Elapsed), - skipLogging); - } - _state.TryRemove(message.RequestId, out state); + skipLogging); } + + _state.TryRemove(message.RequestId, out state); } } - public void ErrorSending(IEnumerable<RequestMessage> messages, ConnectionId connectionId, ObjectId? serviceId, Exception exception, bool skipLogging) + public void ErrorSending(RequestMessage message, ConnectionId connectionId, ObjectId? serviceId, Exception exception, bool skipLogging) { - foreach (var message in messages) + CommandState state; + if (_state.TryRemove(message.RequestId, out state)) { - CommandState state; - if (_state.TryRemove(message.RequestId, out state)) - { - state.Stopwatch.Stop(); - _eventLogger.LogAndPublish(new CommandFailedEvent( + state.Stopwatch.Stop(); + _eventLogger.LogAndPublish(new CommandFailedEvent( state.CommandName, state.QueryNamespace.DatabaseNamespace, exception, @@ -147,8 +137,7 @@ public void ErrorSending(IEnumerable<RequestMessage> messages, ConnectionId conn connectionId, serviceId, state.Stopwatch.Elapsed), - skipLogging); - } + skipLogging); } } @@ -222,13 +211,12 @@ public void ConnectionFailed(ConnectionId connectionId, ObjectId? serviceId, Exc } } - private void ProcessRequestMessages(Queue<RequestMessage> messageQueue, ConnectionId connectionId, ObjectId? serviceId, Stream stream, MessageEncoderSettings encoderSettings, Stopwatch stopwatch, bool skipLogging) + private void ProcessRequestMessage(RequestMessage message, ConnectionId connectionId, ObjectId? serviceId, Stream stream, MessageEncoderSettings encoderSettings, Stopwatch stopwatch, bool skipLogging) { - var message = messageQueue.Dequeue(); switch (message.MessageType) { case MongoDBMessageType.Command: - ProcessCommandRequestMessage((CommandRequestMessage)message, messageQueue, connectionId, serviceId, new CommandMessageBinaryEncoder(stream, encoderSettings), stopwatch, skipLogging); + ProcessCommandRequestMessage((CommandRequestMessage)message, connectionId, serviceId, new CommandMessageBinaryEncoder(stream, encoderSettings), stopwatch, skipLogging); break; case MongoDBMessageType.Query: ProcessQueryMessage((QueryMessage)message, connectionId, new QueryMessageBinaryEncoder(stream, encoderSettings), stopwatch, skipLogging); @@ -238,9 +226,9 @@ private void ProcessRequestMessages(Queue<RequestMessage> messageQueue, Connecti } } - private void ProcessCommandRequestMessage(CommandRequestMessage originalMessage, Queue<RequestMessage> messageQueue, ConnectionId connectionId, ObjectId? serviceId, CommandMessageBinaryEncoder encoder, Stopwatch stopwatch, bool skipLogging) + private void ProcessCommandRequestMessage(CommandRequestMessage message, ConnectionId connectionId, ObjectId? serviceId, CommandMessageBinaryEncoder encoder, Stopwatch stopwatch, bool skipLogging) { - var requestId = originalMessage.RequestId; + var requestId = message.RequestId; var operationId = EventContext.OperationId; var decodedMessage = encoder.ReadMessage(); diff --git a/src/MongoDB.Driver/Core/Connections/ConnectionExtensions.cs b/src/MongoDB.Driver/Core/Connections/ConnectionExtensions.cs deleted file mode 100644 index 3b6b0bcb086..00000000000 --- a/src/MongoDB.Driver/Core/Connections/ConnectionExtensions.cs +++ /dev/null @@ -1,39 +0,0 @@ -/* Copyright 2010-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System.Threading; -using System.Threading.Tasks; -using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.WireProtocol.Messages; -using MongoDB.Driver.Core.WireProtocol.Messages.Encoders; - -namespace MongoDB.Driver.Core.Connections -{ - internal static class ConnectionExtensions - { - // static methods - public static void SendMessage(this IConnection connection, RequestMessage message, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken) - { - Ensure.IsNotNull(connection, nameof(connection)); - connection.SendMessages(new[] { message }, messageEncoderSettings, cancellationToken); - } - - public static Task SendMessageAsync(this IConnection connection, RequestMessage message, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken) - { - Ensure.IsNotNull(connection, nameof(connection)); - return connection.SendMessagesAsync(new[] { message }, messageEncoderSettings, cancellationToken); - } - } -} diff --git a/src/MongoDB.Driver/Core/Connections/ConnectionInitializer.cs b/src/MongoDB.Driver/Core/Connections/ConnectionInitializer.cs index 851fd96b82f..07616e028af 100644 --- a/src/MongoDB.Driver/Core/Connections/ConnectionInitializer.cs +++ b/src/MongoDB.Driver/Core/Connections/ConnectionInitializer.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,8 +15,6 @@ using System; using System.Collections.Generic; -using System.Linq; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -47,13 +45,14 @@ public ConnectionInitializer( _serverApi = serverApi; } - public ConnectionInitializerContext Authenticate(IConnection connection, ConnectionInitializerContext connectionInitializerContext, CancellationToken cancellationToken) + public ConnectionInitializerContext Authenticate(OperationContext operationContext, IConnection connection, ConnectionInitializerContext connectionInitializerContext) { + Ensure.IsNotNull(operationContext, nameof(operationContext)); Ensure.IsNotNull(connection, nameof(connection)); Ensure.IsNotNull(connectionInitializerContext, nameof(connectionInitializerContext)); var description = Ensure.IsNotNull(connectionInitializerContext.Description, nameof(connectionInitializerContext.Description)); - AuthenticationHelper.Authenticate(connection, description, connectionInitializerContext.Authenticator, cancellationToken); + AuthenticationHelper.Authenticate(operationContext, connection, description, connectionInitializerContext.Authenticator); // Connection description should be updated only on the initial handshake and not after reauthentication if (!description.IsInitialized()) @@ -68,7 +67,7 @@ public ConnectionInitializerContext Authenticate(IConnection connection, Connect try { var getLastErrorProtocol = CreateGetLastErrorProtocol(_serverApi); - var getLastErrorResult = getLastErrorProtocol.Execute(connection, cancellationToken); + var getLastErrorResult = getLastErrorProtocol.Execute(operationContext, connection); description = UpdateConnectionIdWithServerValue(description, getLastErrorResult); } @@ -82,13 +81,14 @@ public ConnectionInitializerContext Authenticate(IConnection connection, Connect return new ConnectionInitializerContext(description, connectionInitializerContext.Authenticator); } - public async Task<ConnectionInitializerContext> AuthenticateAsync(IConnection connection, ConnectionInitializerContext connectionInitializerContext, CancellationToken cancellationToken) + public async Task<ConnectionInitializerContext> AuthenticateAsync(OperationContext operationContext, IConnection connection, ConnectionInitializerContext connectionInitializerContext) { + Ensure.IsNotNull(operationContext, nameof(operationContext)); Ensure.IsNotNull(connection, nameof(connection)); Ensure.IsNotNull(connectionInitializerContext, nameof(connectionInitializerContext)); var description = Ensure.IsNotNull(connectionInitializerContext.Description, nameof(connectionInitializerContext.Description)); - await AuthenticationHelper.AuthenticateAsync(connection, description, connectionInitializerContext.Authenticator, cancellationToken).ConfigureAwait(false); + await AuthenticationHelper.AuthenticateAsync(operationContext, connection, description, connectionInitializerContext.Authenticator).ConfigureAwait(false); // Connection description should be updated only on the initial handshake and not while reauthentication if (!description.IsInitialized()) @@ -104,7 +104,7 @@ public async Task<ConnectionInitializerContext> AuthenticateAsync(IConnection co { var getLastErrorProtocol = CreateGetLastErrorProtocol(_serverApi); var getLastErrorResult = await getLastErrorProtocol - .ExecuteAsync(connection, cancellationToken) + .ExecuteAsync(operationContext, connection) .ConfigureAwait(false); description = UpdateConnectionIdWithServerValue(description, getLastErrorResult); @@ -119,13 +119,14 @@ public async Task<ConnectionInitializerContext> AuthenticateAsync(IConnection co return new ConnectionInitializerContext(description, connectionInitializerContext.Authenticator); } - public ConnectionInitializerContext SendHello(IConnection connection, CancellationToken cancellationToken) + public ConnectionInitializerContext SendHello(OperationContext operationContext, IConnection connection) { + Ensure.IsNotNull(operationContext, nameof(operationContext)); Ensure.IsNotNull(connection, nameof(connection)); var authenticator = CreateAuthenticator(connection); - var helloCommand = CreateInitialHelloCommand(authenticator, connection.Settings.LoadBalanced, cancellationToken); + var helloCommand = CreateInitialHelloCommand(operationContext, authenticator, connection.Settings.LoadBalanced); var helloProtocol = HelloHelper.CreateProtocol(helloCommand, _serverApi); - var helloResult = HelloHelper.GetResult(connection, helloProtocol, cancellationToken); + var helloResult = HelloHelper.GetResult(operationContext, connection, helloProtocol); if (connection.Settings.LoadBalanced && !helloResult.ServiceId.HasValue) { throw new InvalidOperationException("Driver attempted to initialize in load balancing mode, but the server does not support this mode."); @@ -134,13 +135,14 @@ public ConnectionInitializerContext SendHello(IConnection connection, Cancellati return new (new ConnectionDescription(connection.ConnectionId, helloResult), authenticator); } - public async Task<ConnectionInitializerContext> SendHelloAsync(IConnection connection, CancellationToken cancellationToken) + public async Task<ConnectionInitializerContext> SendHelloAsync(OperationContext operationContext, IConnection connection) { + Ensure.IsNotNull(operationContext, nameof(operationContext)); Ensure.IsNotNull(connection, nameof(connection)); var authenticator = CreateAuthenticator(connection); - var helloCommand = CreateInitialHelloCommand(authenticator, connection.Settings.LoadBalanced, cancellationToken); + var helloCommand = CreateInitialHelloCommand(operationContext, authenticator, connection.Settings.LoadBalanced); var helloProtocol = HelloHelper.CreateProtocol(helloCommand, _serverApi); - var helloResult = await HelloHelper.GetResultAsync(connection, helloProtocol, cancellationToken).ConfigureAwait(false); + var helloResult = await HelloHelper.GetResultAsync(operationContext, connection, helloProtocol).ConfigureAwait(false); if (connection.Settings.LoadBalanced && !helloResult.ServiceId.HasValue) { throw new InvalidOperationException("Driver attempted to initialize in load balancing mode, but the server does not support this mode."); @@ -163,12 +165,12 @@ private CommandWireProtocol<BsonDocument> CreateGetLastErrorProtocol(ServerApi s return getLastErrorProtocol; } - private BsonDocument CreateInitialHelloCommand(IAuthenticator authenticator, bool loadBalanced = false, CancellationToken cancellationToken = default) + private BsonDocument CreateInitialHelloCommand(OperationContext operationContext, IAuthenticator authenticator, bool loadBalanced = false) { var command = HelloHelper.CreateCommand(_serverApi, loadBalanced: loadBalanced); HelloHelper.AddClientDocumentToCommand(command, _clientDocument); HelloHelper.AddCompressorsToCommand(command, _compressors); - return HelloHelper.CustomizeCommand(command, authenticator, cancellationToken); + return HelloHelper.CustomizeCommand(operationContext, command, authenticator); } private IAuthenticator CreateAuthenticator(IConnection connection) diff --git a/src/MongoDB.Driver/Core/Connections/HelloHelper.cs b/src/MongoDB.Driver/Core/Connections/HelloHelper.cs index 70194498f5c..ee5a8caae9e 100644 --- a/src/MongoDB.Driver/Core/Connections/HelloHelper.cs +++ b/src/MongoDB.Driver/Core/Connections/HelloHelper.cs @@ -1,4 +1,4 @@ -/* Copyright 2018–present MongoDB Inc. +/* Copyright 2010–present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,6 @@ using System; using System.Collections.Generic; using System.Linq; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -61,10 +60,10 @@ internal static BsonDocument CreateCommand(ServerApi serverApi, bool helloOk = f }; } - internal static BsonDocument CustomizeCommand(BsonDocument command, IAuthenticator authenticator, CancellationToken cancellationToken) + internal static BsonDocument CustomizeCommand(OperationContext operationContext, BsonDocument command, IAuthenticator authenticator) { return authenticator != null - ? authenticator.CustomizeInitialHelloCommand(command, cancellationToken) + ? authenticator.CustomizeInitialHelloCommand(operationContext, command) : command; } @@ -84,13 +83,13 @@ internal static CommandWireProtocol<BsonDocument> CreateProtocol( } internal static HelloResult GetResult( + OperationContext operationContext, IConnection connection, - CommandWireProtocol<BsonDocument> helloProtocol, - CancellationToken cancellationToken) + CommandWireProtocol<BsonDocument> helloProtocol) { try { - var helloResultDocument = helloProtocol.Execute(connection, cancellationToken); + var helloResultDocument = helloProtocol.Execute(operationContext, connection); return new HelloResult(helloResultDocument); } catch (MongoCommandException ex) when (ex.Code == 11) @@ -103,13 +102,13 @@ internal static HelloResult GetResult( } internal static async Task<HelloResult> GetResultAsync( + OperationContext operationContext, IConnection connection, - CommandWireProtocol<BsonDocument> helloProtocol, - CancellationToken cancellationToken) + CommandWireProtocol<BsonDocument> helloProtocol) { try { - var helloResultDocument = await helloProtocol.ExecuteAsync(connection, cancellationToken).ConfigureAwait(false); + var helloResultDocument = await helloProtocol.ExecuteAsync(operationContext, connection).ConfigureAwait(false); return new HelloResult(helloResultDocument); } catch (MongoCommandException ex) when (ex.Code == 11) diff --git a/src/MongoDB.Driver/Core/Connections/IConnection.cs b/src/MongoDB.Driver/Core/Connections/IConnection.cs index 961def30f29..b61f0c24075 100644 --- a/src/MongoDB.Driver/Core/Connections/IConnection.cs +++ b/src/MongoDB.Driver/Core/Connections/IConnection.cs @@ -14,9 +14,7 @@ */ using System; -using System.Collections.Generic; using System.Net; -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Configuration; using MongoDB.Driver.Core.WireProtocol.Messages; @@ -33,15 +31,14 @@ internal interface IConnection : IDisposable bool IsExpired { get; } ConnectionSettings Settings { get; } - void SetReadTimeout(TimeSpan timeout); - void Open(CancellationToken cancellationToken); - Task OpenAsync(CancellationToken cancellationToken); - void Reauthenticate(CancellationToken cancellationToken); - Task ReauthenticateAsync(CancellationToken cancellationToken); - ResponseMessage ReceiveMessage(int responseTo, IMessageEncoderSelector encoderSelector, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken); - Task<ResponseMessage> ReceiveMessageAsync(int responseTo, IMessageEncoderSelector encoderSelector, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken); - void SendMessages(IEnumerable<RequestMessage> messages, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken); - Task SendMessagesAsync(IEnumerable<RequestMessage> messages, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken); + void Open(OperationContext operationContext); + Task OpenAsync(OperationContext operationContext); + void Reauthenticate(OperationContext operationContext); + Task ReauthenticateAsync(OperationContext operationContext); + ResponseMessage ReceiveMessage(OperationContext operationContext, int responseTo, IMessageEncoderSelector encoderSelector, MessageEncoderSettings messageEncoderSettings); + Task<ResponseMessage> ReceiveMessageAsync(OperationContext operationContext, int responseTo, IMessageEncoderSelector encoderSelector, MessageEncoderSettings messageEncoderSettings); + void SendMessage(OperationContext operationContext, RequestMessage message, MessageEncoderSettings messageEncoderSettings); + Task SendMessageAsync(OperationContext operationContext, RequestMessage message, MessageEncoderSettings messageEncoderSettings); } internal interface IConnectionHandle : IConnection diff --git a/src/MongoDB.Driver/Core/Connections/IConnectionInitializer.cs b/src/MongoDB.Driver/Core/Connections/IConnectionInitializer.cs index 0cf12354a07..942094c80d3 100644 --- a/src/MongoDB.Driver/Core/Connections/IConnectionInitializer.cs +++ b/src/MongoDB.Driver/Core/Connections/IConnectionInitializer.cs @@ -13,7 +13,6 @@ * limitations under the License. */ -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Authentication; using MongoDB.Driver.Core.Misc; @@ -34,9 +33,9 @@ public ConnectionInitializerContext(ConnectionDescription description, IAuthenti internal interface IConnectionInitializer { - ConnectionInitializerContext Authenticate(IConnection connection, ConnectionInitializerContext connectionInitializerContext, CancellationToken cancellationToken); - Task<ConnectionInitializerContext> AuthenticateAsync(IConnection connection, ConnectionInitializerContext connectionInitializerContext, CancellationToken cancellationToken); - ConnectionInitializerContext SendHello(IConnection connection, CancellationToken cancellationToken); - Task<ConnectionInitializerContext> SendHelloAsync(IConnection connection, CancellationToken cancellationToken); + ConnectionInitializerContext Authenticate(OperationContext operationContext, IConnection connection, ConnectionInitializerContext connectionInitializerContext); + Task<ConnectionInitializerContext> AuthenticateAsync(OperationContext operationContext, IConnection connection, ConnectionInitializerContext connectionInitializerContext); + ConnectionInitializerContext SendHello(OperationContext operationContext, IConnection connection); + Task<ConnectionInitializerContext> SendHelloAsync(OperationContext operationContext, IConnection connection); } } diff --git a/src/MongoDB.Driver/Core/Connections/Socks5AuthenticationSettings.cs b/src/MongoDB.Driver/Core/Connections/Socks5AuthenticationSettings.cs new file mode 100644 index 00000000000..2fbd46f58f3 --- /dev/null +++ b/src/MongoDB.Driver/Core/Connections/Socks5AuthenticationSettings.cs @@ -0,0 +1,94 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System.Text; +using MongoDB.Driver.Core.Misc; +using MongoDB.Shared; + +namespace MongoDB.Driver.Core.Connections; + +/// <summary> +/// Represents the settings for SOCKS5 authentication. +/// </summary> +public abstract class Socks5AuthenticationSettings +{ + /// <summary> + /// Creates authentication settings that does not require any authentication. + /// </summary> + public static Socks5AuthenticationSettings None { get; } = new NoAuthenticationSettings(); + + /// <summary> + /// Creates authentication settings for username and password. + /// </summary> + /// <param name="username">The username</param> + /// <param name="password">The password</param> + /// <returns></returns> + public static Socks5AuthenticationSettings UsernamePassword(string username, string password) + => new UsernamePasswordAuthenticationSettings(username, password); + + /// <summary> + /// Represents settings for no authentication in SOCKS5. + /// </summary> + internal sealed class NoAuthenticationSettings : Socks5AuthenticationSettings + { + /// <inheritdoc /> + public override bool Equals(object obj) => obj is NoAuthenticationSettings; + + /// <inheritdoc /> + public override int GetHashCode() => 0; + } + + /// <summary> + /// Represents settings for username and password authentication in SOCKS5. + /// </summary> + internal sealed class UsernamePasswordAuthenticationSettings : Socks5AuthenticationSettings + { + /// <summary> + /// Gets the username for authentication. + /// </summary> + public string Username { get; } + + /// <summary> + /// Gets the password for authentication. + /// </summary> + public string Password { get; } + + internal UsernamePasswordAuthenticationSettings(string username, string password) + { + Username = Ensure.IsNotNullOrEmpty(username, nameof(username)); + Ensure.That(Encoding.UTF8.GetByteCount(username) <= byte.MaxValue, $"{nameof(username)} must be at most 255 bytes long when encoded as UTF-8", nameof(username)); + Password = Ensure.IsNotNullOrEmpty(password, nameof(password)); + Ensure.That(Encoding.UTF8.GetByteCount(password) <= byte.MaxValue, $"{nameof(password)} must be at most 255 bytes long when encoded as UTF-8", nameof(password)); + } + + /// <inheritdoc /> + public override bool Equals(object obj) + { + if (obj is UsernamePasswordAuthenticationSettings other) + { + return Username == other.Username && Password == other.Password; + } + + return false; + } + + /// <inheritdoc /> + public override int GetHashCode() => + new Hasher() + .Hash(Username) + .Hash(Password) + .GetHashCode(); + } +} \ No newline at end of file diff --git a/src/MongoDB.Driver/Core/Connections/Socks5Helper.cs b/src/MongoDB.Driver/Core/Connections/Socks5Helper.cs new file mode 100644 index 00000000000..edea1055c96 --- /dev/null +++ b/src/MongoDB.Driver/Core/Connections/Socks5Helper.cs @@ -0,0 +1,343 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using System.Buffers; +using System.Buffers.Binary; +using System.IO; +using System.Net; +using System.Net.Sockets; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using MongoDB.Driver.Core.Misc; + +namespace MongoDB.Driver.Core.Connections; + +internal static class Socks5Helper +{ + // Schemas for requests/responses are taken from the following RFCs: + // SOCKS Protocol Version 5 - https://datatracker.ietf.org/doc/html/rfc1928 + // Username/Password Authentication for SOCKS V5 - https://datatracker.ietf.org/doc/html/rfc1929 + + // Greeting request + // +----+----------+----------+ + // |VER | NMETHODS | METHODS | + // +----+----------+----------+ + // | 1 | 1 | 1 to 255 | + // +----+----------+----------+ + + // Greeting response + // +----+--------+ + // |VER | METHOD | + // +----+--------+ + // | 1 | 1 | + // +----+--------+ + + // Authentication request -- if using username/password authentication + // +----+------+----------+------+----------+ + // |VER | ULEN | UNAME | PLEN | PASSWD | + // +----+------+----------+------+----------+ + // | 1 | 1 | 1 to 255 | 1 | 1 to 255 | + // +----+------+----------+------+----------+ + + // Authentication response + // +----+--------+ + // |VER | STATUS | + // +----+--------+ + // | 1 | 1 | + // +----+--------+ + + // Connect request + // +----+-----+-------+------+----------+----------+ + // |VER | CMD | RSV | ATYP | DST.ADDR | DST.PORT | + // +----+-----+-------+------+----------+----------+ + // | 1 | 1 | X'00' | 1 | Variable | 2 | + // +----+-----+-------+------+----------+----------+ + + // Connect response + // +----+-----+-------+------+----------+----------+ + // |VER | REP | RSV | ATYP | DST.ADDR | DST.PORT | + // +----+-----+-------+------+----------+----------+ + // | 1 | 1 | X'00' | 1 | Variable | 2 | + // +----+-----+-------+------+----------+----------+ + + //General use constants + private const byte ProtocolVersion5 = 0x05; + private const byte Socks5Success = 0x00; + private const byte Reserved = 0x00; + private const byte CmdConnect = 0x01; + + //Auth constants + private const byte MethodNoAuth = 0x00; + private const byte MethodUsernamePassword = 0x02; + private const byte SubnegotiationVersion = 0x01; + + //Address type constants + private const byte AddressTypeIPv4 = 0x01; + private const byte AddressTypeIPv6 = 0x04; + private const byte AddressTypeDomain = 0x03; + + // Largest possible message size when using username and password auth. + private const int BufferSize = 513; + + public static void PerformSocks5Handshake(Stream stream, EndPoint endPoint, Socks5AuthenticationSettings authenticationSettings, CancellationToken cancellationToken) + { + var (targetHost, targetPort) = endPoint.GetHostAndPort(); + var buffer = ArrayPool<byte>.Shared.Rent(BufferSize); + try + { + var useAuth = authenticationSettings is Socks5AuthenticationSettings.UsernamePasswordAuthenticationSettings; + + var greetingRequestLength = CreateGreetingRequest(buffer, useAuth); + stream.Write(buffer, 0, greetingRequestLength); + + stream.ReadBytes(buffer, 0, 2, cancellationToken); + var requiresAuthenticationStep = ProcessGreetingResponse(buffer, useAuth); + + // If we have username and password, but the proxy doesn't need them, we skip the authentication step. + if (requiresAuthenticationStep) + { + var authenticationRequestLength = CreateAuthenticationRequest(buffer, authenticationSettings); + stream.Write(buffer, 0, authenticationRequestLength); + + stream.ReadBytes(buffer, 0, 2, cancellationToken); + ProcessAuthenticationResponse(buffer); + } + + var connectRequestLength = CreateConnectRequest(buffer, targetHost, targetPort); + stream.Write(buffer, 0, connectRequestLength); + + stream.ReadBytes(buffer, 0, 5, cancellationToken); + var skip = ProcessConnectResponse(buffer); + stream.ReadBytes(buffer, 0, skip, cancellationToken); + } + finally + { + ArrayPool<byte>.Shared.Return(buffer); + } + } + + public static async Task PerformSocks5HandshakeAsync(Stream stream, EndPoint endPoint, Socks5AuthenticationSettings authenticationSettings, CancellationToken cancellationToken) + { + var (targetHost, targetPort) = endPoint.GetHostAndPort(); + var buffer = ArrayPool<byte>.Shared.Rent(BufferSize); + try + { + var useAuth = authenticationSettings is Socks5AuthenticationSettings.UsernamePasswordAuthenticationSettings; + + var greetingRequestLength = CreateGreetingRequest(buffer, useAuth); + await stream.WriteAsync(buffer, 0, greetingRequestLength, cancellationToken).ConfigureAwait(false); + + await stream.ReadBytesAsync(buffer, 0, 2, cancellationToken).ConfigureAwait(false); + var requiresAuthenticationStep = ProcessGreetingResponse(buffer, useAuth); + + // If we have username and password, but the proxy doesn't need them, we skip the authentication step. + if (requiresAuthenticationStep) + { + var authenticationRequestLength = CreateAuthenticationRequest(buffer, authenticationSettings); + await stream.WriteAsync(buffer, 0, authenticationRequestLength, cancellationToken).ConfigureAwait(false); + + await stream.ReadBytesAsync(buffer, 0, 2, cancellationToken).ConfigureAwait(false); + ProcessAuthenticationResponse(buffer); + } + + var connectRequestLength = CreateConnectRequest(buffer, targetHost, targetPort); + await stream.WriteAsync(buffer, 0, connectRequestLength, cancellationToken).ConfigureAwait(false); + + await stream.ReadBytesAsync(buffer, 0, 5, cancellationToken).ConfigureAwait(false); + var skip = ProcessConnectResponse(buffer); + await stream.ReadBytesAsync(buffer, 0, skip, cancellationToken).ConfigureAwait(true); + } + finally + { + ArrayPool<byte>.Shared.Return(buffer); + } + } + + private static int CreateGreetingRequest(byte[] buffer, bool useAuth) + { + buffer[0] = ProtocolVersion5; + + //buffer[1] is the number of methods supported by the client. + if (!useAuth) + { + buffer[1] = 1; + buffer[2] = MethodNoAuth; + return 3; + } + + buffer[1] = 2; + buffer[2] = MethodNoAuth; + buffer[3] = MethodUsernamePassword; + return 4; + } + + private static bool ProcessGreetingResponse(byte[] buffer, bool useAuth) + { + EnsureProtocolVersion(buffer[0]); + var acceptedMethod = buffer[1]; + if (acceptedMethod == MethodUsernamePassword) + { + if (!useAuth) + { + // If the server is well-behaved this should never happen, we're just being defensive. + throw new IOException("Unexpected server response. SOCKS5 proxy replied with username and password authentication," + + " but no such request was made."); + } + + return true; + } + + if (acceptedMethod != MethodNoAuth) + { + throw new IOException($"SOCKS5 proxy requires unsupported authentication method. Unsupported method: {acceptedMethod}"); + } + + return false; + } + + private static int CreateAuthenticationRequest(byte[] buffer, Socks5AuthenticationSettings authenticationSettings) + { + if (authenticationSettings is not Socks5AuthenticationSettings.UsernamePasswordAuthenticationSettings usernamePasswordAuthenticationSettings) + { + // This should not happen, trying to be defensive here. + throw new ArgumentException($"{nameof(authenticationSettings)} must be of type {nameof(Socks5AuthenticationSettings.UsernamePasswordAuthenticationSettings)}."); + } + + var proxyUsername = usernamePasswordAuthenticationSettings.Username; + var proxyPassword = usernamePasswordAuthenticationSettings.Password; + + // We need to add version, username.length, username, password.length, password (in this order) + buffer[0] = SubnegotiationVersion; + var usernameLength = EncodeString(proxyUsername, buffer, 2, nameof(proxyUsername)); + buffer[1] = usernameLength; + var passwordLength = EncodeString(proxyPassword, buffer, 3 + usernameLength, nameof(proxyPassword)); + buffer[2 + usernameLength] = passwordLength; + + return 3 + usernameLength + passwordLength; + } + + private static void ProcessAuthenticationResponse(byte[] buffer) + { + EnsureSocksSuccess(buffer[1], "authentication"); + + if (buffer[0] != SubnegotiationVersion) + { + throw new IOException($"Invalid SOCKS5 subnegotiation version in authentication response. Expected version {SubnegotiationVersion}, but received {buffer[0]}."); + } + } + + private static int CreateConnectRequest(byte[] buffer, string targetHost, int targetPort) + { + buffer[0] = ProtocolVersion5; + buffer[1] = CmdConnect; + buffer[2] = Reserved; + int addressLength; + + if (IPAddress.TryParse(targetHost, out var ip)) + { + switch (ip.AddressFamily) + { + case AddressFamily.InterNetwork: + buffer[3] = AddressTypeIPv4; + Array.Copy(ip.GetAddressBytes(), 0, buffer, 4, 4); + addressLength = 4; + break; + case AddressFamily.InterNetworkV6: + buffer[3] = AddressTypeIPv6; + Array.Copy(ip.GetAddressBytes(), 0, buffer, 4, 16); + addressLength = 16; + break; + default: + throw new IOException($"Invalid target host address family: {ip.AddressFamily}."); + } + } + else + { + buffer[3] = AddressTypeDomain; + var hostLength = EncodeString(targetHost, buffer, 5, nameof(targetHost)); + buffer[4] = hostLength; + addressLength = hostLength + 1; + } + + BinaryPrimitives.WriteUInt16BigEndian(buffer.AsSpan(addressLength + 4), (ushort)targetPort); + + return addressLength + 6; + } + + // Reads the SOCKS5 connect response and returns the number of bytes to skip in the buffer. + private static int ProcessConnectResponse(byte[] buffer) + { + EnsureProtocolVersion(buffer[0]); + EnsureSocksSuccess(buffer[1], "connect"); + + // We skip the last bytes of the response as we do not need them. + // We skip length(dst.address) + length(dst.port) - 1 --- length(dst.port) is always 2 + // -1 because we already ready the first byte of the address type + // (necessary for the variable length domain-type addresses) + return buffer[3] switch + { + AddressTypeIPv4 => 5, + AddressTypeIPv6 => 17, + AddressTypeDomain => buffer[4] + 2, + _ => throw new IOException($"Unknown address type in SOCKS5 reply: {buffer[3]}.") + }; + } + + private static byte EncodeString(string input, byte[] buffer, int offset, string parameterName) + { + try + { + var written = Encoding.UTF8.GetBytes(input, 0, input.Length, buffer, offset); + return checked((byte)written); + } + catch + { + throw new IOException($"The {parameterName} could not be encoded as UTF-8."); + } + } + + private static void EnsureProtocolVersion(byte version) + { + if (version != ProtocolVersion5) + { + throw new IOException($"Invalid SOCKS version in response. Expected version {ProtocolVersion5}, but received {version}."); + } + } + + private static void EnsureSocksSuccess(byte code, string operation) + { + if (code == Socks5Success) + { + return; // success + } + + var message = code switch + { + 0x01 => "General SOCKS server failure", + 0x02 => "Connection not allowed by ruleset", + 0x03 => "Network unreachable", + 0x04 => "Host unreachable", + 0x05 => "Connection refused", + 0x06 => "TTL expired", + 0x07 => "Command not supported", + 0x08 => "Address type not supported", + _ => $"Unassigned error (0x{code:X2})" + }; + + throw new IOException($"SOCKS5 {operation} failed. {message}"); + } +} \ No newline at end of file diff --git a/src/MongoDB.Driver/Core/Connections/Socks5ProxySettings.cs b/src/MongoDB.Driver/Core/Connections/Socks5ProxySettings.cs new file mode 100644 index 00000000000..67911164b08 --- /dev/null +++ b/src/MongoDB.Driver/Core/Connections/Socks5ProxySettings.cs @@ -0,0 +1,147 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using System.Text; +using MongoDB.Driver.Core.Misc; +using MongoDB.Shared; + +namespace MongoDB.Driver.Core.Connections; + +/// <summary> +/// Represents the settings for a SOCKS5 proxy connection. +/// </summary> +public sealed class Socks5ProxySettings +{ + private const int DefaultPort = 1080; + + /// <summary> + /// Gets the host of the SOCKS5 proxy. + /// </summary> + public string Host { get; } + + /// <summary> + /// Gets the port of the SOCKS5 proxy. + /// </summary> + public int Port { get; } + + /// <summary> + /// Gets the authentication settings of the SOCKS5 proxy. + /// </summary> + public Socks5AuthenticationSettings Authentication { get; } + + /// <summary> + /// Initializes a new instance of the <see cref="Socks5ProxySettings"/> class with the specified host. + /// </summary> + /// <param name="host">The SOCKS5 proxy host.</param> + public Socks5ProxySettings(string host) + : this(host, DefaultPort, Socks5AuthenticationSettings.None) + { + } + + /// <summary> + /// Initializes a new instance of the <see cref="Socks5ProxySettings"/> class with the specified host and port. + /// </summary> + /// <param name="host">The proxy host.</param> + /// <param name="port">The proxy port.</param> + public Socks5ProxySettings(string host, int port) + : this(host, port, Socks5AuthenticationSettings.None) + { + } + + /// <summary> + /// Initializes a new instance of the <see cref="Socks5ProxySettings"/> class with the specified host and authentication settings. + /// </summary> + /// <param name="host">The proxy host.</param> + /// <param name="authentication">The proxy authentication settings.</param> + public Socks5ProxySettings(string host, Socks5AuthenticationSettings authentication) + : this(host, DefaultPort, authentication) + { + } + + /// <summary> + /// Initializes a new instance of the <see cref="Socks5ProxySettings"/> class with the specified host, port, and authentication settings. + /// </summary> + /// <param name="host">The proxy host.</param> + /// <param name="port">The proxy port.</param> + /// <param name="authentication">The proxy authentication settings.</param> + public Socks5ProxySettings(string host, int port, Socks5AuthenticationSettings authentication) + { + Host = Ensure.IsNotNullOrEmpty(host, nameof(host)); + Ensure.That(Encoding.UTF8.GetByteCount(host) <= byte.MaxValue, $"{nameof(host)} must be at most 255 bytes long when encoded as UTF-8"); + Port = Ensure.IsBetween(port, 1, 65535, nameof(port)); + Authentication = Ensure.IsNotNull(authentication, nameof(authentication)); + } + + // This is a convenience method to create Socks5ProxySettings from the connection string parameters. + internal static Socks5ProxySettings Create(string host, int? port, string username, string password) + { + if (string.IsNullOrEmpty(username) != string.IsNullOrEmpty(password)) + { + throw new ArgumentException("Both username and password must be provided or neither should be provided."); + } + + var authentication = + !string.IsNullOrEmpty(username) + ? Socks5AuthenticationSettings.UsernamePassword(username, password) + : Socks5AuthenticationSettings.None; + + return new Socks5ProxySettings(host, port ?? DefaultPort, authentication); + } + + /// <inheritdoc /> + public override bool Equals(object obj) + { + if (obj is Socks5ProxySettings other) + { + return Host == other.Host && + Port == other.Port && + Equals(Authentication, other.Authentication); + } + + return false; + } + + /// <inheritdoc /> + public override int GetHashCode() + { + return new Hasher() + .Hash(Host) + .Hash(Port) + .Hash(Authentication) + .GetHashCode(); + } + + /// <inheritdoc/> + public override string ToString() + { + var sb = new StringBuilder(); + sb.Append("{ Host : "); + sb.Append(Host); + sb.Append(", Port : "); + sb.Append(Port); + sb.Append(", Authentication : "); + + sb.Append(Authentication switch + { + Socks5AuthenticationSettings.UsernamePasswordAuthenticationSettings => + "UsernamePassword", + _ => "None" + }); + + sb.Append(" }"); + return sb.ToString(); + } +} \ No newline at end of file diff --git a/src/MongoDB.Driver/Core/Connections/Socks5ProxyStreamFactory.cs b/src/MongoDB.Driver/Core/Connections/Socks5ProxyStreamFactory.cs new file mode 100644 index 00000000000..6b3fc8b467d --- /dev/null +++ b/src/MongoDB.Driver/Core/Connections/Socks5ProxyStreamFactory.cs @@ -0,0 +1,71 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System.IO; +using System.Net; +using System.Threading; +using System.Threading.Tasks; +using MongoDB.Driver.Core.Configuration; +using MongoDB.Driver.Core.Misc; + +namespace MongoDB.Driver.Core.Connections; + +internal sealed class Socks5ProxyStreamFactory : IStreamFactory +{ + private readonly Socks5ProxyStreamSettings _settings; + private readonly IStreamFactory _wrapped; + + public Socks5ProxyStreamFactory(Socks5ProxyStreamSettings settings, IStreamFactory wrapped) + { + _settings = Ensure.IsNotNull(settings, nameof(settings)); + _wrapped = Ensure.IsNotNull(wrapped, nameof(wrapped)); + } + + public Stream CreateStream(EndPoint endPoint, CancellationToken cancellationToken) + { + Stream stream = null; + + try + { + var proxyEndpoint = new DnsEndPoint(_settings.Socks5ProxySettings.Host, _settings.Socks5ProxySettings.Port); + stream = _wrapped.CreateStream(proxyEndpoint, cancellationToken); + Socks5Helper.PerformSocks5Handshake(stream, endPoint, _settings.Socks5ProxySettings.Authentication, cancellationToken); + return stream; + } + catch + { + stream?.Dispose(); + throw; + } + } + + public async Task<Stream> CreateStreamAsync(EndPoint endPoint, CancellationToken cancellationToken) + { + Stream stream = null; + + try + { + var proxyEndpoint = new DnsEndPoint(_settings.Socks5ProxySettings.Host, _settings.Socks5ProxySettings.Port); + stream = await _wrapped.CreateStreamAsync(proxyEndpoint, cancellationToken).ConfigureAwait(false); + await Socks5Helper.PerformSocks5HandshakeAsync(stream, endPoint, _settings.Socks5ProxySettings.Authentication, cancellationToken).ConfigureAwait(false); + return stream; + } + catch + { + stream?.Dispose(); + throw; + } + } +} \ No newline at end of file diff --git a/src/MongoDB.Driver/Core/Connections/TcpStreamFactory.cs b/src/MongoDB.Driver/Core/Connections/TcpStreamFactory.cs index ae2c1fb69b4..be13ca03d17 100644 --- a/src/MongoDB.Driver/Core/Connections/TcpStreamFactory.cs +++ b/src/MongoDB.Driver/Core/Connections/TcpStreamFactory.cs @@ -49,21 +49,43 @@ public TcpStreamFactory(TcpStreamSettings settings) public Stream CreateStream(EndPoint endPoint, CancellationToken cancellationToken) { #if NET472 - var socket = CreateSocket(endPoint); - Connect(socket, endPoint, cancellationToken); - return CreateNetworkStream(socket); + Socket socket = null; + NetworkStream stream = null; + + try + { + socket = CreateSocket(endPoint); + Connect(socket, endPoint, cancellationToken); + stream = CreateNetworkStream(socket); + + return stream; + } + catch + { + socket?.Dispose(); + stream?.Dispose(); + + throw; + } #else var resolved = ResolveEndPoints(endPoint); - for (int i = 0; i < resolved.Length; i++) + for (var i = 0; i < resolved.Length; i++) { + Socket socket = null; + NetworkStream stream = null; + try { - var socket = CreateSocket(resolved[i]); + socket = CreateSocket(resolved[i]); Connect(socket, resolved[i], cancellationToken); - return CreateNetworkStream(socket); + stream = CreateNetworkStream(socket); + return stream; } catch { + socket?.Dispose(); + stream?.Dispose(); + // if we have tried all of them and still failed, // then blow up. if (i == resolved.Length - 1) @@ -74,28 +96,49 @@ public Stream CreateStream(EndPoint endPoint, CancellationToken cancellationToke } // we should never get here... - throw new InvalidOperationException("Unabled to resolve endpoint."); + throw new InvalidOperationException("Unable to resolve endpoint."); #endif } public async Task<Stream> CreateStreamAsync(EndPoint endPoint, CancellationToken cancellationToken) { #if NET472 - var socket = CreateSocket(endPoint); - await ConnectAsync(socket, endPoint, cancellationToken).ConfigureAwait(false); - return CreateNetworkStream(socket); + Socket socket = null; + NetworkStream stream = null; + + try + { + socket = CreateSocket(endPoint); + await ConnectAsync(socket, endPoint, cancellationToken).ConfigureAwait(false); + stream = CreateNetworkStream(socket); + return stream; + } + catch + { + socket?.Dispose(); + stream?.Dispose(); + + throw; + } #else var resolved = await ResolveEndPointsAsync(endPoint).ConfigureAwait(false); for (int i = 0; i < resolved.Length; i++) { + Socket socket = null; + NetworkStream stream = null; + try { - var socket = CreateSocket(resolved[i]); + socket = CreateSocket(resolved[i]); await ConnectAsync(socket, resolved[i], cancellationToken).ConfigureAwait(false); - return CreateNetworkStream(socket); + stream = CreateNetworkStream(socket); + return stream; } catch { + socket?.Dispose(); + stream?.Dispose(); + // if we have tried all of them and still failed, // then blow up. if (i == resolved.Length - 1) @@ -138,7 +181,10 @@ private void Connect(Socket socket, EndPoint endPoint, CancellationToken cancell if (!connectOperation.IsCompleted) { - try { socket.Dispose(); } catch { } + try + { + socket.Dispose(); + } catch { } cancellationToken.ThrowIfCancellationRequested(); throw new TimeoutException($"Timed out connecting to {endPoint}. Timeout was {_settings.ConnectTimeout}."); @@ -164,7 +210,12 @@ private async Task ConnectAsync(Socket socket, EndPoint endPoint, CancellationTo if (!connectTask.IsCompleted) { - try { socket.Dispose(); } catch { } + try + { + socket.Dispose(); + // should await on the read task to avoid UnobservedTaskException + await connectTask.ConfigureAwait(false); + } catch { } cancellationToken.ThrowIfCancellationRequested(); throw new TimeoutException($"Timed out connecting to {endPoint}. Timeout was {_settings.ConnectTimeout}."); @@ -184,28 +235,7 @@ private async Task ConnectAsync(Socket socket, EndPoint endPoint, CancellationTo private NetworkStream CreateNetworkStream(Socket socket) { ConfigureConnectedSocket(socket); - - var stream = new NetworkStream(socket, true); - - if (_settings.ReadTimeout.HasValue) - { - var readTimeout = (int)_settings.ReadTimeout.Value.TotalMilliseconds; - if (readTimeout != 0) - { - stream.ReadTimeout = readTimeout; - } - } - - if (_settings.WriteTimeout.HasValue) - { - var writeTimeout = (int)_settings.WriteTimeout.Value.TotalMilliseconds; - if (writeTimeout != 0) - { - stream.WriteTimeout = writeTimeout; - } - } - - return stream; + return new NetworkStream(socket, true); } private Socket CreateSocket(EndPoint endPoint) @@ -250,20 +280,18 @@ private Socket CreateSocket(EndPoint endPoint) private EndPoint[] ResolveEndPoints(EndPoint initial) { - var dnsInitial = initial as DnsEndPoint; - if (dnsInitial == null) + if (initial is not DnsEndPoint dnsInitial) { - return new[] { initial }; + return [initial]; } - IPAddress address; - if (IPAddress.TryParse(dnsInitial.Host, out address)) + if (IPAddress.TryParse(dnsInitial.Host, out var address)) { - return new[] { new IPEndPoint(address, dnsInitial.Port) }; + return [new IPEndPoint(address, dnsInitial.Port)]; } var preferred = initial.AddressFamily; - if (preferred == AddressFamily.Unspecified || preferred == AddressFamily.Unknown) + if (preferred is AddressFamily.Unspecified or AddressFamily.Unknown) { preferred = _settings.AddressFamily; } @@ -277,20 +305,18 @@ private EndPoint[] ResolveEndPoints(EndPoint initial) private async Task<EndPoint[]> ResolveEndPointsAsync(EndPoint initial) { - var dnsInitial = initial as DnsEndPoint; - if (dnsInitial == null) + if (initial is not DnsEndPoint dnsInitial) { - return new[] { initial }; + return [initial]; } - IPAddress address; - if (IPAddress.TryParse(dnsInitial.Host, out address)) + if (IPAddress.TryParse(dnsInitial.Host, out var address)) { - return new[] { new IPEndPoint(address, dnsInitial.Port) }; + return [new IPEndPoint(address, dnsInitial.Port)]; } var preferred = initial.AddressFamily; - if (preferred == AddressFamily.Unspecified || preferred == AddressFamily.Unknown) + if (preferred is AddressFamily.Unspecified or AddressFamily.Unknown) { preferred = _settings.AddressFamily; } diff --git a/src/MongoDB.Driver/Core/DatabaseNamespace.cs b/src/MongoDB.Driver/Core/DatabaseNamespace.cs index 2fa91d4065e..360a527065b 100644 --- a/src/MongoDB.Driver/Core/DatabaseNamespace.cs +++ b/src/MongoDB.Driver/Core/DatabaseNamespace.cs @@ -86,11 +86,13 @@ public string DatabaseName get { return _databaseName; } } + [Obsolete("This collection namespace was removed in server version 4.2. As such, this property will be removed in a later release.")] internal CollectionNamespace SystemIndexesCollection { get { return new CollectionNamespace(this, "system.indexes"); } } + [Obsolete("This collection namespace was removed in server version 4.2. As such, this property will be removed in a later release.")] internal CollectionNamespace SystemNamespacesCollection { get { return new CollectionNamespace(this, "system.namespaces"); } diff --git a/src/MongoDB.Driver/Core/Events/ConnectionSendingMessagesEvent.cs b/src/MongoDB.Driver/Core/Events/ConnectionSendingMessagesEvent.cs index 1e80c9a8e66..f61acad279d 100644 --- a/src/MongoDB.Driver/Core/Events/ConnectionSendingMessagesEvent.cs +++ b/src/MongoDB.Driver/Core/Events/ConnectionSendingMessagesEvent.cs @@ -15,6 +15,7 @@ using System; using System.Collections.Generic; +using System.Linq; using MongoDB.Driver.Core.Clusters; using MongoDB.Driver.Core.Connections; using MongoDB.Driver.Core.Servers; @@ -28,7 +29,7 @@ public struct ConnectionSendingMessagesEvent : IEvent { private readonly ConnectionId _connectionId; private readonly long? _operationId; - private readonly IReadOnlyList<int> _requestIds; + private readonly int _requestId; private readonly DateTime _timestamp; /// <summary> @@ -37,10 +38,25 @@ public struct ConnectionSendingMessagesEvent : IEvent /// <param name="connectionId">The connection identifier.</param> /// <param name="requestIds">The request ids.</param> /// <param name="operationId">The operation identifier.</param> + [Obsolete("Support for sending multiple messages has been removed, use the constructor with single requestId instead.")] public ConnectionSendingMessagesEvent(ConnectionId connectionId, IReadOnlyList<int> requestIds, long? operationId) { _connectionId = connectionId; - _requestIds = requestIds; + _requestId = requestIds.Single(); + _operationId = operationId; + _timestamp = DateTime.UtcNow; + } + + /// <summary> + /// Initializes a new instance of the <see cref="ConnectionSendingMessagesEvent" /> struct. + /// </summary> + /// <param name="connectionId">The connection identifier.</param> + /// <param name="requestId">The request id.</param> + /// <param name="operationId">The operation identifier.</param> + public ConnectionSendingMessagesEvent(ConnectionId connectionId, int requestId, long? operationId) + { + _connectionId = connectionId; + _requestId = requestId; _operationId = operationId; _timestamp = DateTime.UtcNow; } @@ -61,12 +77,21 @@ public ConnectionId ConnectionId get { return _connectionId; } } + /// <summary> + /// Gets the request id. + /// </summary> + public int RequestId + { + get { return _requestId; } + } + /// <summary> /// Gets the request ids. /// </summary> + [Obsolete($"Support for sending multiple messages has been removed, use {nameof(RequestId)} instead.")] public IReadOnlyList<int> RequestIds { - get { return _requestIds; } + get { return [_requestId]; } } /// <summary> diff --git a/src/MongoDB.Driver/Core/Events/ConnectionSendingMessagesFailedEvent.cs b/src/MongoDB.Driver/Core/Events/ConnectionSendingMessagesFailedEvent.cs index 2fb5aab05df..fd9f9624d01 100644 --- a/src/MongoDB.Driver/Core/Events/ConnectionSendingMessagesFailedEvent.cs +++ b/src/MongoDB.Driver/Core/Events/ConnectionSendingMessagesFailedEvent.cs @@ -15,6 +15,7 @@ using System; using System.Collections.Generic; +using System.Linq; using MongoDB.Driver.Core.Clusters; using MongoDB.Driver.Core.Connections; using MongoDB.Driver.Core.Servers; @@ -29,7 +30,7 @@ public struct ConnectionSendingMessagesFailedEvent : IEvent private readonly ConnectionId _connectionId; private readonly Exception _exception; private readonly long? _operationId; - private readonly IReadOnlyList<int> _requestIds; + private readonly int _requestId; private readonly DateTime _timestamp; /// <summary> @@ -39,10 +40,27 @@ public struct ConnectionSendingMessagesFailedEvent : IEvent /// <param name="requestIds">The request ids.</param> /// <param name="exception">The exception.</param> /// <param name="operationId">The operation identifier.</param> + [Obsolete("Support for sending multiple messages has been removed, use the constructor with single requestId instead.")] public ConnectionSendingMessagesFailedEvent(ConnectionId connectionId, IReadOnlyList<int> requestIds, Exception exception, long? operationId) { _connectionId = connectionId; - _requestIds = requestIds; + _requestId = requestIds.Single(); + _exception = exception; + _operationId = operationId; + _timestamp = DateTime.UtcNow; + } + + /// <summary> + /// Initializes a new instance of the <see cref="ConnectionSendingMessagesFailedEvent" /> struct. + /// </summary> + /// <param name="connectionId">The connection identifier.</param> + /// <param name="requestId">The request id.</param> + /// <param name="exception">The exception.</param> + /// <param name="operationId">The operation identifier.</param> + public ConnectionSendingMessagesFailedEvent(ConnectionId connectionId, int requestId, Exception exception, long? operationId) + { + _connectionId = connectionId; + _requestId = requestId; _exception = exception; _operationId = operationId; _timestamp = DateTime.UtcNow; @@ -80,12 +98,21 @@ public long? OperationId get { return _operationId; } } + /// <summary> + /// Gets the request id. + /// </summary> + public int RequestId + { + get { return _requestId; } + } + /// <summary> /// Gets the request ids. /// </summary> + [Obsolete($"Support for sending multiple messages has been removed, use {nameof(RequestId)} instead.")] public IReadOnlyList<int> RequestIds { - get { return _requestIds; } + get { return [_requestId]; } } /// <summary> diff --git a/src/MongoDB.Driver/Core/Events/ConnectionSentMessagesEvent.cs b/src/MongoDB.Driver/Core/Events/ConnectionSentMessagesEvent.cs index a5e3608b9c9..6e66e122339 100644 --- a/src/MongoDB.Driver/Core/Events/ConnectionSentMessagesEvent.cs +++ b/src/MongoDB.Driver/Core/Events/ConnectionSentMessagesEvent.cs @@ -15,6 +15,7 @@ using System; using System.Collections.Generic; +using System.Linq; using MongoDB.Driver.Core.Clusters; using MongoDB.Driver.Core.Connections; using MongoDB.Driver.Core.Servers; @@ -31,7 +32,7 @@ public struct ConnectionSentMessagesEvent : IEvent private readonly TimeSpan _serializationDuration; private readonly int _length; private readonly long? _operationId; - private readonly IReadOnlyList<int> _requestIds; + private readonly int _requestId; private readonly DateTime _timestamp; /// <summary> @@ -43,10 +44,31 @@ public struct ConnectionSentMessagesEvent : IEvent /// <param name="networkDuration">The duration of time spent on the network.</param> /// <param name="serializationDuration">The duration of time spent serializing the messages.</param> /// <param name="operationId">The operation identifier.</param> + [Obsolete("Support for sending multiple messages has been removed, use the constructor with single requestId instead.")] public ConnectionSentMessagesEvent(ConnectionId connectionId, IReadOnlyList<int> requestIds, int length, TimeSpan networkDuration, TimeSpan serializationDuration, long? operationId) { _connectionId = connectionId; - _requestIds = requestIds; + _requestId = requestIds.Single(); + _length = length; + _networkDuration = networkDuration; + _serializationDuration = serializationDuration; + _operationId = operationId; + _timestamp = DateTime.UtcNow; + } + + /// <summary> + /// Initializes a new instance of the <see cref="ConnectionSentMessagesEvent" /> struct. + /// </summary> + /// <param name="connectionId">The connection identifier.</param> + /// <param name="requestId">The request id.</param> + /// <param name="length">The length.</param> + /// <param name="networkDuration">The duration of time spent on the network.</param> + /// <param name="serializationDuration">The duration of time spent serializing the messages.</param> + /// <param name="operationId">The operation identifier.</param> + public ConnectionSentMessagesEvent(ConnectionId connectionId, int requestId, int length, TimeSpan networkDuration, TimeSpan serializationDuration, long? operationId) + { + _connectionId = connectionId; + _requestId = requestId; _length = length; _networkDuration = networkDuration; _serializationDuration = serializationDuration; @@ -110,12 +132,21 @@ public int Length get { return _length; } } + /// <summary> + /// Gets the request id. + /// </summary> + public int RequestId + { + get { return _requestId; } + } + /// <summary> /// Gets the request ids. /// </summary> + [Obsolete($"Support for sending multiple messages has been removed, use {nameof(RequestId)} instead.")] public IReadOnlyList<int> RequestIds { - get { return _requestIds; } + get { return [_requestId]; } } /// <summary> diff --git a/src/MongoDB.Driver/Core/Events/Diagnostics/PerformanceCounterEventSubscriber.cs b/src/MongoDB.Driver/Core/Events/Diagnostics/PerformanceCounterEventSubscriber.cs index c7f92351285..762e8120c43 100644 --- a/src/MongoDB.Driver/Core/Events/Diagnostics/PerformanceCounterEventSubscriber.cs +++ b/src/MongoDB.Driver/Core/Events/Diagnostics/PerformanceCounterEventSubscriber.cs @@ -16,14 +16,11 @@ #if NET472 using System; using System.Collections.Concurrent; -using System.Collections.Generic; using System.Net; using System.Reflection; -using MongoDB.Driver.Core.Configuration; using MongoDB.Driver.Core.Connections; using MongoDB.Driver.Core.Events.Diagnostics.PerformanceCounters; using MongoDB.Driver.Core.Servers; -using MongoDB.Driver.Core.WireProtocol.Messages; namespace MongoDB.Driver.Core.Events.Diagnostics { @@ -32,7 +29,7 @@ namespace MongoDB.Driver.Core.Events.Diagnostics /// </summary> public sealed class PerformanceCounterEventSubscriber : IEventSubscriber { - //static + //static /// <summary> /// Installs the performance counters. /// </summary> @@ -169,7 +166,7 @@ private void Handle(ConnectionSentMessagesEvent @event) ConnectionPerformanceRecorder recorder; if (_connectionRecorders.TryGetValue(@event.ConnectionId, out recorder)) { - recorder.PacketSent(@event.RequestIds.Count, @event.Length); + recorder.PacketSent(1, @event.Length); } } diff --git a/src/MongoDB.Driver/Core/Events/Diagnostics/TraceSourceEventSubscriber.cs b/src/MongoDB.Driver/Core/Events/Diagnostics/TraceSourceEventSubscriber.cs index fbc1ac25c16..a362daef900 100644 --- a/src/MongoDB.Driver/Core/Events/Diagnostics/TraceSourceEventSubscriber.cs +++ b/src/MongoDB.Driver/Core/Events/Diagnostics/TraceSourceEventSubscriber.cs @@ -250,17 +250,17 @@ private void Handle(ConnectionReceivingMessageFailedEvent @event) private void Handle(ConnectionSendingMessagesEvent @event) { - Debug(TraceSourceEventHelper.ConnectionIdBase + 9, "{0}: sending messages [{1}].", TraceSourceEventHelper.Label(@event.ConnectionId), string.Join(",", @event.RequestIds)); + Debug(TraceSourceEventHelper.ConnectionIdBase + 9, "{0}: sending messages [{1}].", TraceSourceEventHelper.Label(@event.ConnectionId), @event.RequestId); } private void Handle(ConnectionSentMessagesEvent @event) { - Debug(TraceSourceEventHelper.ConnectionIdBase + 10, "{0}: sent messages [{1}] of length {2} bytes in {3}ms.", TraceSourceEventHelper.Label(@event.ConnectionId), string.Join(",", @event.RequestIds), @event.Length, @event.Duration.TotalMilliseconds); + Debug(TraceSourceEventHelper.ConnectionIdBase + 10, "{0}: sent messages [{1}] of length {2} bytes in {3}ms.", TraceSourceEventHelper.Label(@event.ConnectionId), @event.RequestId, @event.Length, @event.Duration.TotalMilliseconds); } private void Handle(ConnectionSendingMessagesFailedEvent @event) { - Error(TraceSourceEventHelper.ConnectionIdBase + 11, @event.Exception, "{0}: error sending messages [{1}].", TraceSourceEventHelper.Label(@event.ConnectionId), string.Join(",", @event.RequestIds)); + Error(TraceSourceEventHelper.ConnectionIdBase + 11, @event.Exception, "{0}: error sending messages [{1}].", TraceSourceEventHelper.Label(@event.ConnectionId), @event.RequestId); } private void Debug(int id, string message, params object[] args) diff --git a/src/MongoDB.Driver/Core/Events/ReflectionEventSubscriber.cs b/src/MongoDB.Driver/Core/Events/ReflectionEventSubscriber.cs index cb6ed20bd2d..638b3d1e898 100644 --- a/src/MongoDB.Driver/Core/Events/ReflectionEventSubscriber.cs +++ b/src/MongoDB.Driver/Core/Events/ReflectionEventSubscriber.cs @@ -22,7 +22,7 @@ namespace MongoDB.Driver.Core.Events { /// <summary> - /// Subscribes methods with a single argument to events + /// Subscribes methods with a single argument to events /// of that single argument's type. /// </summary> public sealed class ReflectionEventSubscriber : IEventSubscriber diff --git a/src/MongoDB.Driver/Core/IAsyncCursor.cs b/src/MongoDB.Driver/Core/IAsyncCursor.cs index 55a655f5b2a..cfd9963e3ba 100644 --- a/src/MongoDB.Driver/Core/IAsyncCursor.cs +++ b/src/MongoDB.Driver/Core/IAsyncCursor.cs @@ -360,6 +360,17 @@ public static class IAsyncCursorExtensions return new AsyncCursorEnumerableOneTimeAdapter<TDocument>(cursor, cancellationToken); } + /// <summary> + /// Wraps a cursor in an IAsyncEnumerable that can be enumerated one time. + /// </summary> + /// <typeparam name="TDocument">The type of the document.</typeparam> + /// <param name="cursor">The cursor.</param> + /// <returns>An IAsyncEnumerable.</returns> + public static IAsyncEnumerable<TDocument> ToAsyncEnumerable<TDocument>(this IAsyncCursor<TDocument> cursor) + { + return new AsyncCursorEnumerableOneTimeAdapter<TDocument>(cursor, CancellationToken.None); + } + /// <summary> /// Returns a list containing all the documents returned by a cursor. /// </summary> diff --git a/src/MongoDB.Driver/Core/IAsyncCursorSource.cs b/src/MongoDB.Driver/Core/IAsyncCursorSource.cs index e6710c5ad49..25676130f32 100644 --- a/src/MongoDB.Driver/Core/IAsyncCursorSource.cs +++ b/src/MongoDB.Driver/Core/IAsyncCursorSource.cs @@ -336,6 +336,18 @@ public static class IAsyncCursorSourceExtensions return new AsyncCursorSourceEnumerableAdapter<TDocument>(source, cancellationToken); } + /// <summary> + /// Wraps a cursor source in an IAsyncEnumerable. Each time GetAsyncEnumerator is called a new enumerator is returned and a new cursor + /// is fetched from the cursor source on the first call to MoveNextAsync. + /// </summary> + /// <typeparam name="TDocument">The type of the document.</typeparam> + /// <param name="source">The source.</param> + /// <returns>An IAsyncEnumerable.</returns> + public static IAsyncEnumerable<TDocument> ToAsyncEnumerable<TDocument>(this IAsyncCursorSource<TDocument> source) + { + return new AsyncCursorSourceEnumerableAdapter<TDocument>(source, CancellationToken.None); + } + /// <summary> /// Returns a list containing all the documents returned by the cursor returned by a cursor source. /// </summary> diff --git a/src/MongoDB.Driver/Core/Logging/StructuredLogTemplateProvidersSdam.cs b/src/MongoDB.Driver/Core/Logging/StructuredLogTemplateProvidersSdam.cs index 5447eded8e7..63a63025d7b 100644 --- a/src/MongoDB.Driver/Core/Logging/StructuredLogTemplateProvidersSdam.cs +++ b/src/MongoDB.Driver/Core/Logging/StructuredLogTemplateProvidersSdam.cs @@ -78,6 +78,6 @@ private static void AddSdamTemplates() LogLevel.Trace, CmapCommonParams(Description), (e, _) => GetParams(e.ServerId, "Server description changed", e.NewDescription)); - } + } } } diff --git a/src/MongoDB.Driver/Core/Misc/BatchableSource.cs b/src/MongoDB.Driver/Core/Misc/BatchableSource.cs index f987aa1dc90..4452dac3a99 100644 --- a/src/MongoDB.Driver/Core/Misc/BatchableSource.cs +++ b/src/MongoDB.Driver/Core/Misc/BatchableSource.cs @@ -50,7 +50,7 @@ private static IReadOnlyList<T> EnumeratorToList(IEnumerator<T> enumerator) /// Initializes a new instance of the <see cref="BatchableSource{T}"/> class. /// </summary> /// <remarks> - /// Use this overload when you know the batch is small and won't have to be broken up into sub-batches. + /// Use this overload when you know the batch is small and won't have to be broken up into sub-batches. /// In that case using this overload is simpler than using an enumerator and using the other constructor. /// </remarks> /// <param name="batch">The single batch.</param> diff --git a/src/MongoDB.Driver/Core/Misc/Ensure.cs b/src/MongoDB.Driver/Core/Misc/Ensure.cs index 7fbab39b9b7..aff914d07fe 100644 --- a/src/MongoDB.Driver/Core/Misc/Ensure.cs +++ b/src/MongoDB.Driver/Core/Misc/Ensure.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -461,8 +461,7 @@ public static TimeSpan IsValidTimeout(TimeSpan value, string paramName) { if (value < TimeSpan.Zero && value != Timeout.InfiniteTimeSpan) { - var message = string.Format("Invalid timeout: {0}.", value); - throw new ArgumentException(message, paramName); + throw new ArgumentOutOfRangeException($"Invalid timeout: {value}.", paramName); } return value; } diff --git a/src/MongoDB.Driver/Core/Misc/Feature.cs b/src/MongoDB.Driver/Core/Misc/Feature.cs index 506e28de10c..2136f31736d 100644 --- a/src/MongoDB.Driver/Core/Misc/Feature.cs +++ b/src/MongoDB.Driver/Core/Misc/Feature.cs @@ -51,6 +51,7 @@ public class Feature private static readonly Feature __csfleRangeAlgorithm = new Feature("CsfleRangeAlgorithm", WireVersion.Server62); private static readonly Feature __csfle2Qev2Lookup = new Feature("csfle2Qev2Lookup", WireVersion.Server81); private static readonly Feature __csfle2Qev2RangeAlgorithm = new Feature("csfle2Qev2RangeAlgorithm", WireVersion.Server80); + private static readonly Feature __csfle2Qev2TextPreviewAlgorithm = new Feature("csfle2Qev2TextPreviewAlgorithm", WireVersion.Server82); private static readonly Feature __csfle2 = new Feature("Csfle2", WireVersion.Server60); private static readonly Feature __csfle2Qev2 = new Feature("Csfle2Qev2", WireVersion.Server70, notSupportedMessage: "Driver support of Queryable Encryption is incompatible with server. Upgrade server to use Queryable Encryption."); private static readonly Feature __dateFromStringFormatArgument = new Feature("DateFromStringFormatArgument", WireVersion.Server40); @@ -83,6 +84,8 @@ public class Feature private static readonly Feature __loookupConciseSyntax = new Feature("LoookupConciseSyntax", WireVersion.Server50); private static readonly Feature __loookupDocuments= new Feature("LoookupDocuments", WireVersion.Server60); private static readonly Feature __mmapV1StorageEngine = new Feature("MmapV1StorageEngine", WireVersion.Zero, WireVersion.Server42); + private static readonly Feature __medianOperator = new Feature("MedianOperator", WireVersion.Server70); + private static readonly Feature __percentileOperator = new Feature("PercentileOperator", WireVersion.Server70); private static readonly Feature __pickAccumulatorsNewIn52 = new Feature("PickAccumulatorsNewIn52", WireVersion.Server52); private static readonly Feature __rankFusionStage = new Feature("RankFusionStage", WireVersion.Server81); private static readonly Feature __regexMatch = new Feature("RegexMatch", WireVersion.Server42); @@ -215,6 +218,7 @@ public class Feature /// <summary> /// Gets the create indexes using insert operations feature. /// </summary> + [Obsolete("This feature was removed in server version 4.2. As such, this property will be removed in a later release.")] public static Feature CreateIndexesUsingInsertOperations => __createIndexesUsingInsertOperations; /// <summary> @@ -242,6 +246,11 @@ public class Feature /// </summary> public static Feature Csfle2QEv2RangeAlgorithm => __csfle2Qev2RangeAlgorithm; + /// <summary> + /// Gets the csfle2 textPreview algorithm feature. + /// </summary> + public static Feature Csfle2QEv2TextPreviewAlgorithm => __csfle2Qev2TextPreviewAlgorithm; + /// <summary> /// Gets the $dateFromString format argument feature. /// </summary> @@ -275,6 +284,7 @@ public class Feature /// <summary> /// Gets the eval feature. /// </summary> + [Obsolete("This feature was removed in server version 4.2. As such, this property will be removed in a later release.")] public static Feature Eval => __eval; /// <summary> @@ -328,6 +338,7 @@ public class Feature /// <summary> /// Gets the group command feature. /// </summary> + [Obsolete("This feature was removed in server version 4.2. As such, this property will be removed in a later release.")] public static Feature GroupCommand => __groupCommand; /// <summary> @@ -395,8 +406,19 @@ public class Feature /// <summary> /// Gets the mmapv1 storage engine feature. /// </summary> + [Obsolete("This feature was removed in server version 4.2. As such, this property will be removed in a later release.")] public static Feature MmapV1StorageEngine => __mmapV1StorageEngine; + /// <summary> + /// Gets the $median operator added in 7.0 + /// </summary> + public static Feature MedianOperator => __medianOperator; + + /// <summary> + /// Gets the $percentile operator added in 7.0 + /// </summary> + public static Feature PercentileOperator => __percentileOperator; + /// <summary> /// Gets the pick accumulators new in 5.2 feature. /// </summary> @@ -565,9 +587,11 @@ internal int LastNotSupportedWireVersion public void ThrowIfNotSupported(IMongoClient client, CancellationToken cancellationToken = default) { var cluster = client.GetClusterInternal(); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); using (var binding = new ReadWriteBindingHandle(new WritableServerBinding(cluster, NoCoreSession.NewHandle()))) - using (var channelSource = binding.GetWriteChannelSource(cancellationToken)) - using (var channel = channelSource.GetChannel(cancellationToken)) + using (var channelSource = binding.GetWriteChannelSource(operationContext)) + using (var channel = channelSource.GetChannel(operationContext)) { // Use WireVersion from a connection since server level value may be null ThrowIfNotSupported(channel.ConnectionDescription.MaxWireVersion); @@ -582,9 +606,11 @@ public void ThrowIfNotSupported(IMongoClient client, CancellationToken cancellat public async Task ThrowIfNotSupportedAsync(IMongoClient client, CancellationToken cancellationToken = default) { var cluster = client.GetClusterInternal(); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); using (var binding = new ReadWriteBindingHandle(new WritableServerBinding(cluster, NoCoreSession.NewHandle()))) - using (var channelSource = await binding.GetWriteChannelSourceAsync(cancellationToken).ConfigureAwait(false)) - using (var channel = await channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)) + using (var channelSource = await binding.GetWriteChannelSourceAsync(operationContext).ConfigureAwait(false)) + using (var channel = await channelSource.GetChannelAsync(operationContext).ConfigureAwait(false)) { // Use WireVersion from a connection since server level value may be null ThrowIfNotSupported(channel.ConnectionDescription.MaxWireVersion); diff --git a/src/MongoDB.Driver/Core/Misc/IClock.cs b/src/MongoDB.Driver/Core/Misc/IClock.cs index d409bb604ee..de818c3cbdc 100644 --- a/src/MongoDB.Driver/Core/Misc/IClock.cs +++ b/src/MongoDB.Driver/Core/Misc/IClock.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,6 +19,10 @@ namespace MongoDB.Driver.Core.Misc { internal interface IClock { + long Frequency { get; } + DateTime UtcNow { get; } + + long GetTimestamp(); } } diff --git a/src/MongoDB.Driver/Core/Misc/SemaphoreSlimRequest.cs b/src/MongoDB.Driver/Core/Misc/SemaphoreSlimRequest.cs index eed0ee4a21e..8c583fa72d8 100644 --- a/src/MongoDB.Driver/Core/Misc/SemaphoreSlimRequest.cs +++ b/src/MongoDB.Driver/Core/Misc/SemaphoreSlimRequest.cs @@ -22,6 +22,7 @@ namespace MongoDB.Driver.Core.Misc /// <summary> /// Represents a tentative request to acquire a SemaphoreSlim. /// </summary> + [Obsolete("SemaphoreSlimRequest is deprecated and will be removed in future release")] public sealed class SemaphoreSlimRequest : IDisposable { // private fields @@ -39,12 +40,23 @@ public sealed class SemaphoreSlimRequest : IDisposable /// <param name="semaphore">The semaphore.</param> /// <param name="cancellationToken">The cancellation token.</param> public SemaphoreSlimRequest(SemaphoreSlim semaphore, CancellationToken cancellationToken) + : this(semaphore, Timeout.InfiniteTimeSpan, cancellationToken) + { + } + + /// <summary> + /// Initializes a new instance of the <see cref="SemaphoreSlimRequest"/> class. + /// </summary> + /// <param name="semaphore">The semaphore.</param> + /// <param name="timeout">The timeout.</param> + /// <param name="cancellationToken">The cancellation token.</param> + public SemaphoreSlimRequest(SemaphoreSlim semaphore, TimeSpan timeout, CancellationToken cancellationToken) { _semaphore = Ensure.IsNotNull(semaphore, nameof(semaphore)); _disposeCancellationTokenSource = new CancellationTokenSource(); _linkedCancellationTokenSource = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken, _disposeCancellationTokenSource.Token); - _task = semaphore.WaitAsync(_linkedCancellationTokenSource.Token); + _task = semaphore.WaitAsync(timeout, _linkedCancellationTokenSource.Token); } // public properties @@ -56,7 +68,7 @@ public SemaphoreSlimRequest(SemaphoreSlim semaphore, CancellationToken cancellat /// </value> public Task Task => _task; - // public methods + // public methods /// <inheritdoc/> public void Dispose() { diff --git a/src/MongoDB.Driver/Core/Misc/StreamExtensionMethods.cs b/src/MongoDB.Driver/Core/Misc/StreamExtensionMethods.cs index 248aa756272..93e2da1a1ba 100644 --- a/src/MongoDB.Driver/Core/Misc/StreamExtensionMethods.cs +++ b/src/MongoDB.Driver/Core/Misc/StreamExtensionMethods.cs @@ -36,46 +36,80 @@ public static void EfficientCopyTo(this Stream input, Stream output) } } - public static async Task<int> ReadAsync(this Stream stream, byte[] buffer, int offset, int count, TimeSpan timeout, CancellationToken cancellationToken) + public static int Read(this Stream stream, byte[] buffer, int offset, int count, TimeSpan timeout, CancellationToken cancellationToken) { - var state = 1; // 1 == reading, 2 == done reading, 3 == timedout, 4 == cancelled - - var bytesRead = 0; - using (new Timer(_ => ChangeState(3), null, timeout, Timeout.InfiniteTimeSpan)) - using (cancellationToken.Register(() => ChangeState(4))) + try { - try - { - bytesRead = await stream.ReadAsync(buffer, offset, count, cancellationToken).ConfigureAwait(false); - ChangeState(2); // note: might not actually go to state 2 if already in state 3 or 4 - } - catch when (state == 1) - { - try { stream.Dispose(); } catch { } - throw; - } - catch when (state >= 3) + using var manualResetEvent = new ManualResetEventSlim(); + var readOperation = stream.BeginRead( + buffer, + offset, + count, + state => ((ManualResetEventSlim)state.AsyncState).Set(), + manualResetEvent); + + if (readOperation.IsCompleted || manualResetEvent.Wait(timeout, cancellationToken)) { - // a timeout or operation cancelled exception will be thrown instead + return stream.EndRead(readOperation); } + } + catch (OperationCanceledException) + { + // Have to suppress OperationCanceledException here, it will be thrown after the stream will be disposed. + } + catch (ObjectDisposedException) + { + throw new IOException(); + } - if (state == 3) { throw new TimeoutException(); } - if (state == 4) { throw new OperationCanceledException(); } + try + { + stream.Dispose(); + } + catch + { + // Ignore any exceptions } - return bytesRead; + cancellationToken.ThrowIfCancellationRequested(); + throw new TimeoutException(); + } - void ChangeState(int to) + public static async Task<int> ReadAsync(this Stream stream, byte[] buffer, int offset, int count, TimeSpan timeout, CancellationToken cancellationToken) + { + Task<int> readTask = null; + try { - var from = Interlocked.CompareExchange(ref state, to, 1); - if (from == 1 && to >= 3) + readTask = stream.ReadAsync(buffer, offset, count); + return await readTask.WaitAsync(timeout, cancellationToken).ConfigureAwait(false); + } + catch (ObjectDisposedException) + { + // It's possible to get ObjectDisposedException when the connection pool was closed with interruptInUseConnections set to true. + throw new IOException(); + } + catch (Exception ex) when (ex is OperationCanceledException or TimeoutException) + { + // await Task.WaitAsync() throws OperationCanceledException in case of cancellation and TimeoutException in case of timeout + try { - try { stream.Dispose(); } catch { } // disposing the stream aborts the read attempt + stream.Dispose(); + if (readTask != null) + { + // Should await on the task to avoid UnobservedTaskException + await readTask.ConfigureAwait(false); + } } + catch + { + // Ignore any exceptions + } + + throw; } } - public static void ReadBytes(this Stream stream, byte[] buffer, int offset, int count, CancellationToken cancellationToken) + public static void ReadBytes(this Stream stream, OperationContext operationContext, byte[] buffer, int offset, int count, TimeSpan socketTimeout) { Ensure.IsNotNull(stream, nameof(stream)); Ensure.IsNotNull(buffer, nameof(buffer)); @@ -84,7 +118,7 @@ public static void ReadBytes(this Stream stream, byte[] buffer, int offset, int while (count > 0) { - var bytesRead = stream.Read(buffer, offset, count); // TODO: honor cancellationToken? + var bytesRead = stream.Read(buffer, offset, count, operationContext.RemainingTimeoutOrDefault(socketTimeout), operationContext.CancellationToken); if (bytesRead == 0) { throw new EndOfStreamException(); @@ -94,7 +128,7 @@ public static void ReadBytes(this Stream stream, byte[] buffer, int offset, int } } - public static void ReadBytes(this Stream stream, IByteBuffer buffer, int offset, int count, CancellationToken cancellationToken) + public static void ReadBytes(this Stream stream, OperationContext operationContext, IByteBuffer buffer, int offset, int count, TimeSpan socketTimeout) { Ensure.IsNotNull(stream, nameof(stream)); Ensure.IsNotNull(buffer, nameof(buffer)); @@ -105,7 +139,21 @@ public static void ReadBytes(this Stream stream, IByteBuffer buffer, int offset, { var backingBytes = buffer.AccessBackingBytes(offset); var bytesToRead = Math.Min(count, backingBytes.Count); - var bytesRead = stream.Read(backingBytes.Array, backingBytes.Offset, bytesToRead); // TODO: honor cancellationToken? + var bytesRead = stream.Read(backingBytes.Array, backingBytes.Offset, bytesToRead, operationContext.RemainingTimeoutOrDefault(socketTimeout), operationContext.CancellationToken); + if (bytesRead == 0) + { + throw new EndOfStreamException(); + } + offset += bytesRead; + count -= bytesRead; + } + } + + public static void ReadBytes(this Stream stream, byte[] destination, int offset, int count, CancellationToken cancellationToken) + { + while (count > 0) + { + var bytesRead = stream.Read(destination, offset, count); // TODO: honor cancellationToken? if (bytesRead == 0) { throw new EndOfStreamException(); @@ -115,7 +163,7 @@ public static void ReadBytes(this Stream stream, IByteBuffer buffer, int offset, } } - public static async Task ReadBytesAsync(this Stream stream, byte[] buffer, int offset, int count, TimeSpan timeout, CancellationToken cancellationToken) + public static async Task ReadBytesAsync(this Stream stream, OperationContext operationContext, byte[] buffer, int offset, int count, TimeSpan socketTimeout) { Ensure.IsNotNull(stream, nameof(stream)); Ensure.IsNotNull(buffer, nameof(buffer)); @@ -124,7 +172,7 @@ public static async Task ReadBytesAsync(this Stream stream, byte[] buffer, int o while (count > 0) { - var bytesRead = await stream.ReadAsync(buffer, offset, count, timeout, cancellationToken).ConfigureAwait(false); + var bytesRead = await stream.ReadAsync(buffer, offset, count, operationContext.RemainingTimeoutOrDefault(socketTimeout), operationContext.CancellationToken).ConfigureAwait(false); if (bytesRead == 0) { throw new EndOfStreamException(); @@ -134,7 +182,7 @@ public static async Task ReadBytesAsync(this Stream stream, byte[] buffer, int o } } - public static async Task ReadBytesAsync(this Stream stream, IByteBuffer buffer, int offset, int count, TimeSpan timeout, CancellationToken cancellationToken) + public static async Task ReadBytesAsync(this Stream stream, OperationContext operationContext, IByteBuffer buffer, int offset, int count, TimeSpan socketTimeout) { Ensure.IsNotNull(stream, nameof(stream)); Ensure.IsNotNull(buffer, nameof(buffer)); @@ -145,7 +193,7 @@ public static async Task ReadBytesAsync(this Stream stream, IByteBuffer buffer, { var backingBytes = buffer.AccessBackingBytes(offset); var bytesToRead = Math.Min(count, backingBytes.Count); - var bytesRead = await stream.ReadAsync(backingBytes.Array, backingBytes.Offset, bytesToRead, timeout, cancellationToken).ConfigureAwait(false); + var bytesRead = await stream.ReadAsync(backingBytes.Array, backingBytes.Offset, bytesToRead, operationContext.RemainingTimeoutOrDefault(socketTimeout), operationContext.CancellationToken).ConfigureAwait(false); if (bytesRead == 0) { throw new EndOfStreamException(); @@ -155,44 +203,96 @@ public static async Task ReadBytesAsync(this Stream stream, IByteBuffer buffer, } } - - public static async Task WriteAsync(this Stream stream, byte[] buffer, int offset, int count, TimeSpan timeout, CancellationToken cancellationToken) + public static async Task ReadBytesAsync(this Stream stream, byte[] destination, int offset, int count, CancellationToken cancellationToken) { - var state = 1; // 1 == writing, 2 == done writing, 3 == timedout, 4 == cancelled - - using (new Timer(_ => ChangeState(3), null, timeout, Timeout.InfiniteTimeSpan)) - using (cancellationToken.Register(() => ChangeState(4))) + while (count > 0) { - try - { - await stream.WriteAsync(buffer, offset, count, cancellationToken).ConfigureAwait(false); - ChangeState(2); // note: might not actually go to state 2 if already in state 3 or 4 - } - catch when (state == 1) + var bytesRead = await stream.ReadAsync(destination, offset, count, cancellationToken).ConfigureAwait(false); + if (bytesRead == 0) { - try { stream.Dispose(); } catch { } - throw; + throw new EndOfStreamException(); } - catch when (state >= 3) + offset += bytesRead; + count -= bytesRead; + } + } + + public static void Write(this Stream stream, byte[] buffer, int offset, int count, TimeSpan timeout, CancellationToken cancellationToken) + { + try + { + using var manualResetEvent = new ManualResetEventSlim(); + var writeOperation = stream.BeginWrite( + buffer, + offset, + count, + state => ((ManualResetEventSlim)state.AsyncState).Set(), + manualResetEvent); + + if (writeOperation.IsCompleted || manualResetEvent.Wait(timeout, cancellationToken)) { - // a timeout or operation cancelled exception will be thrown instead + stream.EndWrite(writeOperation); + return; } + } + catch (OperationCanceledException) + { + // Have to suppress OperationCanceledException here, it will be thrown after the stream will be disposed. + } + catch (ObjectDisposedException) + { + // It's possible to get ObjectDisposedException when the connection pool was closed with interruptInUseConnections set to true. + throw new IOException(); + } - if (state == 3) { throw new TimeoutException(); } - if (state == 4) { throw new OperationCanceledException(); } + try + { + stream.Dispose(); + } + catch + { + // Ignore any exceptions } - void ChangeState(int to) + cancellationToken.ThrowIfCancellationRequested(); + throw new TimeoutException(); + } + + public static async Task WriteAsync(this Stream stream, byte[] buffer, int offset, int count, TimeSpan timeout, CancellationToken cancellationToken) + { + Task writeTask = null; + try + { + writeTask = stream.WriteAsync(buffer, offset, count); + await writeTask.WaitAsync(timeout, cancellationToken).ConfigureAwait(false); + } + catch (ObjectDisposedException) + { + // It's possible to get ObjectDisposedException when the connection pool was closed with interruptInUseConnections set to true. + throw new IOException(); + } + catch (Exception ex) when (ex is OperationCanceledException or TimeoutException) { - var from = Interlocked.CompareExchange(ref state, to, 1); - if (from == 1 && to >= 3) + // await Task.WaitAsync() throws OperationCanceledException in case of cancellation and TimeoutException in case of timeout + try + { + stream.Dispose(); + // Should await on the task to avoid UnobservedTaskException + if (writeTask != null) + { + await writeTask.ConfigureAwait(false); + } + } + catch { - try { stream.Dispose(); } catch { } // disposing the stream aborts the write attempt + // Ignore any exceptions } + + throw; } } - public static void WriteBytes(this Stream stream, IByteBuffer buffer, int offset, int count, CancellationToken cancellationToken) + public static void WriteBytes(this Stream stream, OperationContext operationContext, IByteBuffer buffer, int offset, int count, TimeSpan socketTimeout) { Ensure.IsNotNull(stream, nameof(stream)); Ensure.IsNotNull(buffer, nameof(buffer)); @@ -201,16 +301,15 @@ public static void WriteBytes(this Stream stream, IByteBuffer buffer, int offset while (count > 0) { - cancellationToken.ThrowIfCancellationRequested(); var backingBytes = buffer.AccessBackingBytes(offset); var bytesToWrite = Math.Min(count, backingBytes.Count); - stream.Write(backingBytes.Array, backingBytes.Offset, bytesToWrite); // TODO: honor cancellationToken? + stream.Write(backingBytes.Array, backingBytes.Offset, bytesToWrite, operationContext.RemainingTimeoutOrDefault(socketTimeout), operationContext.CancellationToken); offset += bytesToWrite; count -= bytesToWrite; } } - public static async Task WriteBytesAsync(this Stream stream, IByteBuffer buffer, int offset, int count, TimeSpan timeout, CancellationToken cancellationToken) + public static async Task WriteBytesAsync(this Stream stream, OperationContext operationContext, IByteBuffer buffer, int offset, int count, TimeSpan socketTimeout) { Ensure.IsNotNull(stream, nameof(stream)); Ensure.IsNotNull(buffer, nameof(buffer)); @@ -221,7 +320,7 @@ public static async Task WriteBytesAsync(this Stream stream, IByteBuffer buffer, { var backingBytes = buffer.AccessBackingBytes(offset); var bytesToWrite = Math.Min(count, backingBytes.Count); - await stream.WriteAsync(backingBytes.Array, backingBytes.Offset, bytesToWrite, timeout, cancellationToken).ConfigureAwait(false); + await stream.WriteAsync(backingBytes.Array, backingBytes.Offset, bytesToWrite, operationContext.RemainingTimeoutOrDefault(socketTimeout), operationContext.CancellationToken).ConfigureAwait(false); offset += bytesToWrite; count -= bytesToWrite; } diff --git a/src/MongoDB.Driver/Core/Misc/SystemClock.cs b/src/MongoDB.Driver/Core/Misc/SystemClock.cs index f972f9aed62..b01565b87e5 100644 --- a/src/MongoDB.Driver/Core/Misc/SystemClock.cs +++ b/src/MongoDB.Driver/Core/Misc/SystemClock.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,6 +14,7 @@ */ using System; +using System.Diagnostics; namespace MongoDB.Driver.Core.Misc { @@ -28,9 +29,13 @@ private SystemClock() } // public properties + public long Frequency => Stopwatch.Frequency; + public DateTime UtcNow { get { return DateTime.UtcNow; } } + + public long GetTimestamp() => Stopwatch.GetTimestamp(); } } diff --git a/src/MongoDB.Driver/Core/Misc/TaskExtensions.cs b/src/MongoDB.Driver/Core/Misc/TaskExtensions.cs index b6702bce182..0de05faa0e5 100644 --- a/src/MongoDB.Driver/Core/Misc/TaskExtensions.cs +++ b/src/MongoDB.Driver/Core/Misc/TaskExtensions.cs @@ -15,12 +15,84 @@ using System; using System.Runtime.CompilerServices; +using System.Threading; using System.Threading.Tasks; namespace MongoDB.Driver.Core.Misc { internal static class TaskExtensions { +#if !NET6_0_OR_GREATER + public static Task WaitAsync(this Task task, TimeSpan timeout, CancellationToken cancellationToken) + { + EnsureTimeoutIsValid(timeout); + return WaitAsyncCore(task, timeout, cancellationToken); + + static async Task WaitAsyncCore(Task task, TimeSpan timeout, CancellationToken cancellationToken) + { + if (!task.IsCompleted) + { + var timeoutTask = Task.Delay(timeout, cancellationToken); + await Task.WhenAny(task, timeoutTask).ConfigureAwait(false); + } + + if (task.IsCompleted) + { + // will re-throw the exception if any + await task.ConfigureAwait(false); + return; + } + + if (cancellationToken.IsCancellationRequested) + { + throw new TaskCanceledException(); + } + throw new TimeoutException(); + } + } + + public static Task<TResult> WaitAsync<TResult>(this Task<TResult> task, TimeSpan timeout, CancellationToken cancellationToken) + { + EnsureTimeoutIsValid(timeout); + return WaitAsyncCore(task, timeout, cancellationToken); + + static async Task<TResult> WaitAsyncCore(Task<TResult> task, TimeSpan timeout, CancellationToken cancellationToken) + { + if (!task.IsCompleted) + { + var timeoutTask = Task.Delay(timeout, cancellationToken); + await Task.WhenAny(task, timeoutTask).ConfigureAwait(false); + } + + if (task.IsCompleted) + { + // will return the result or re-throw the exception if any + return await task.ConfigureAwait(false); + } + + if (cancellationToken.IsCancellationRequested) + { + throw new TaskCanceledException(); + } + + throw new TimeoutException(); + } + } + + private static void EnsureTimeoutIsValid(TimeSpan timeout) + { + if (timeout == Timeout.InfiniteTimeSpan) + { + return; + } + + if (timeout < TimeSpan.Zero) + { + throw new ArgumentOutOfRangeException(nameof(timeout)); + } + } +#endif + internal struct YieldNoContextAwaitable { public YieldNoContextAwaiter GetAwaiter() { return new YieldNoContextAwaiter(); } diff --git a/src/MongoDB.Driver/Core/Misc/WireVersion.cs b/src/MongoDB.Driver/Core/Misc/WireVersion.cs index bacf5406e74..29ccc23179a 100644 --- a/src/MongoDB.Driver/Core/Misc/WireVersion.cs +++ b/src/MongoDB.Driver/Core/Misc/WireVersion.cs @@ -128,6 +128,10 @@ internal static class WireVersion /// Wire version 27. /// </summary> public const int Server82 = 27; + /// <summary> + /// Wire version 28. + /// </summary> + public const int Server83 = 28; // note: keep WireVersion.cs and ServerVersion.cs in sync @@ -167,9 +171,10 @@ internal static class WireVersion new WireVersionInfo(wireVersion: 25, major: 8, minor: 0), new WireVersionInfo(wireVersion: 26, major: 8, minor: 1), new WireVersionInfo(wireVersion: 27, major: 8, minor: 2), + new WireVersionInfo(wireVersion: 28, major: 8, minor: 3) }; - private static Range<int> __supportedWireVersionRange = CreateSupportedWireVersionRange(minWireVersion: Server40, maxWireVersion: Server82); + private static Range<int> __supportedWireVersionRange = CreateSupportedWireVersionRange(minWireVersion: Server42, maxWireVersion: Server83); private static Range<int> CreateSupportedWireVersionRange(int minWireVersion, int maxWireVersion) { diff --git a/src/MongoDB.Driver/Core/Operations/AggregateOperation.cs b/src/MongoDB.Driver/Core/Operations/AggregateOperation.cs index a66161543a9..8f1947cc546 100644 --- a/src/MongoDB.Driver/Core/Operations/AggregateOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/AggregateOperation.cs @@ -16,7 +16,6 @@ using System; using System.Collections.Generic; using System.Linq; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.IO; @@ -270,27 +269,27 @@ public bool? UseCursor // methods /// <inheritdoc/> - public IAsyncCursor<TResult> Execute(IReadBinding binding, CancellationToken cancellationToken) + public IAsyncCursor<TResult> Execute(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var context = RetryableReadContext.Create(binding, _retryRequested, cancellationToken)) + using (var context = RetryableReadContext.Create(operationContext, binding, _retryRequested)) { - return Execute(context, cancellationToken); + return Execute(operationContext, context); } } /// <inheritdoc/> - public IAsyncCursor<TResult> Execute(RetryableReadContext context, CancellationToken cancellationToken) + public IAsyncCursor<TResult> Execute(OperationContext operationContext, RetryableReadContext context) { Ensure.IsNotNull(context, nameof(context)); EnsureIsReadOnlyPipeline(); using (EventContext.BeginOperation()) { - var operation = CreateOperation(context); - var result = operation.Execute(context, cancellationToken); + var operation = CreateOperation(operationContext, context); + var result = operation.Execute(operationContext, context); context.ChannelSource.Session.SetSnapshotTimeIfNeeded(result.AtClusterTime); @@ -299,27 +298,27 @@ public IAsyncCursor<TResult> Execute(RetryableReadContext context, CancellationT } /// <inheritdoc/> - public async Task<IAsyncCursor<TResult>> ExecuteAsync(IReadBinding binding, CancellationToken cancellationToken) + public async Task<IAsyncCursor<TResult>> ExecuteAsync(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var context = await RetryableReadContext.CreateAsync(binding, _retryRequested, cancellationToken).ConfigureAwait(false)) + using (var context = await RetryableReadContext.CreateAsync(operationContext, binding, _retryRequested).ConfigureAwait(false)) { - return await ExecuteAsync(context, cancellationToken).ConfigureAwait(false); + return await ExecuteAsync(operationContext, context).ConfigureAwait(false); } } /// <inheritdoc/> - public async Task<IAsyncCursor<TResult>> ExecuteAsync(RetryableReadContext context, CancellationToken cancellationToken) + public async Task<IAsyncCursor<TResult>> ExecuteAsync(OperationContext operationContext, RetryableReadContext context) { Ensure.IsNotNull(context, nameof(context)); EnsureIsReadOnlyPipeline(); using (EventContext.BeginOperation()) { - var operation = CreateOperation(context); - var result = await operation.ExecuteAsync(context, cancellationToken).ConfigureAwait(false); + var operation = CreateOperation(operationContext, context); + var result = await operation.ExecuteAsync(operationContext, context).ConfigureAwait(false); context.ChannelSource.Session.SetSnapshotTimeIfNeeded(result.AtClusterTime); @@ -327,7 +326,7 @@ public async Task<IAsyncCursor<TResult>> ExecuteAsync(RetryableReadContext conte } } - internal BsonDocument CreateCommand(ConnectionDescription connectionDescription, ICoreSession session) + internal BsonDocument CreateCommand(OperationContext operationContext, ICoreSession session, ConnectionDescription connectionDescription) { var readConcern = ReadConcernHelper.GetReadConcernForCommand(session, connectionDescription, _readConcern); var command = new BsonDocument @@ -335,7 +334,7 @@ internal BsonDocument CreateCommand(ConnectionDescription connectionDescription, { "aggregate", _collectionNamespace == null ? (BsonValue)1 : _collectionNamespace.CollectionName }, { "pipeline", new BsonArray(_pipeline) }, { "allowDiskUse", () => _allowDiskUse.Value, _allowDiskUse.HasValue }, - { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue }, + { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue && !operationContext.IsRootContextTimeoutConfigured() }, { "collation", () => _collation.ToBsonDocument(), _collation != null }, { "hint", _hint, _hint != null }, { "let", _let, _let != null }, @@ -355,10 +354,10 @@ internal BsonDocument CreateCommand(ConnectionDescription connectionDescription, private IDisposable BeginOperation() => EventContext.BeginOperation(null, "aggregate"); - private ReadCommandOperation<AggregateResult> CreateOperation(RetryableReadContext context) + private ReadCommandOperation<AggregateResult> CreateOperation(OperationContext operationContext, RetryableReadContext context) { var databaseNamespace = _collectionNamespace == null ? _databaseNamespace : _collectionNamespace.DatabaseNamespace; - var command = CreateCommand(context.Channel.ConnectionDescription, context.Binding.Session); + var command = CreateCommand(operationContext, context.Binding.Session, context.Channel.ConnectionDescription); var serializer = new AggregateResultDeserializer(_resultSerializer); return new ReadCommandOperation<AggregateResult>(databaseNamespace, command, serializer, MessageEncoderSettings) { diff --git a/src/MongoDB.Driver/Core/Operations/AggregateToCollectionOperation.cs b/src/MongoDB.Driver/Core/Operations/AggregateToCollectionOperation.cs index 88bc86f854c..0003d2cb3ae 100644 --- a/src/MongoDB.Driver/Core/Operations/AggregateToCollectionOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/AggregateToCollectionOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,6 @@ using System; using System.Collections.Generic; using System.Linq; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -148,49 +147,49 @@ public WriteConcern WriteConcern set { _writeConcern = value; } } - public BsonDocument Execute(IWriteBinding binding, CancellationToken cancellationToken) + public BsonDocument Execute(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); var mayUseSecondary = new MayUseSecondary(_readPreference); using (BeginOperation()) - using (var channelSource = binding.GetWriteChannelSource(mayUseSecondary, cancellationToken)) - using (var channel = channelSource.GetChannel(cancellationToken)) + using (var channelSource = binding.GetWriteChannelSource(operationContext, mayUseSecondary)) + using (var channel = channelSource.GetChannel(operationContext)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session, channel.ConnectionDescription, mayUseSecondary.EffectiveReadPreference); - return operation.Execute(channelBinding, cancellationToken); + var operation = CreateOperation(operationContext, channelBinding.Session, channel.ConnectionDescription, mayUseSecondary.EffectiveReadPreference); + return operation.Execute(operationContext, channelBinding); } } - public async Task<BsonDocument> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) + public async Task<BsonDocument> ExecuteAsync(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); var mayUseSecondary = new MayUseSecondary(_readPreference); using (BeginOperation()) - using (var channelSource = await binding.GetWriteChannelSourceAsync(mayUseSecondary, cancellationToken).ConfigureAwait(false)) - using (var channel = await channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)) + using (var channelSource = await binding.GetWriteChannelSourceAsync(operationContext, mayUseSecondary).ConfigureAwait(false)) + using (var channel = await channelSource.GetChannelAsync(operationContext).ConfigureAwait(false)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session, channel.ConnectionDescription, mayUseSecondary.EffectiveReadPreference); - return await operation.ExecuteAsync(channelBinding, cancellationToken).ConfigureAwait(false); + var operation = CreateOperation(operationContext, channelBinding.Session, channel.ConnectionDescription, mayUseSecondary.EffectiveReadPreference); + return await operation.ExecuteAsync(operationContext, channelBinding).ConfigureAwait(false); } } - public BsonDocument CreateCommand(ICoreSessionHandle session, ConnectionDescription connectionDescription) + public BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session, ConnectionDescription connectionDescription) { var readConcern = _readConcern != null ? ReadConcernHelper.GetReadConcernForCommand(session, connectionDescription, _readConcern) : null; - var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(session, _writeConcern); + var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(operationContext, session, _writeConcern); return new BsonDocument { { "aggregate", _collectionNamespace == null ? (BsonValue)1 : _collectionNamespace.CollectionName }, { "pipeline", new BsonArray(_pipeline) }, { "allowDiskUse", () => _allowDiskUse.Value, _allowDiskUse.HasValue }, { "bypassDocumentValidation", () => _bypassDocumentValidation.Value, _bypassDocumentValidation.HasValue }, - { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue }, + { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue && !operationContext.IsRootContextTimeoutConfigured() }, { "collation", () => _collation.ToBsonDocument(), _collation != null }, { "readConcern", readConcern, readConcern != null }, { "writeConcern", writeConcern, writeConcern != null }, @@ -203,9 +202,9 @@ public BsonDocument CreateCommand(ICoreSessionHandle session, ConnectionDescript private IDisposable BeginOperation() => EventContext.BeginOperation("aggregate"); - private WriteCommandOperation<BsonDocument> CreateOperation(ICoreSessionHandle session, ConnectionDescription connectionDescription, ReadPreference effectiveReadPreference) + private WriteCommandOperation<BsonDocument> CreateOperation(OperationContext operationContext, ICoreSessionHandle session, ConnectionDescription connectionDescription, ReadPreference effectiveReadPreference) { - var command = CreateCommand(session, connectionDescription); + var command = CreateCommand(operationContext, session, connectionDescription); var operation = new WriteCommandOperation<BsonDocument>(_databaseNamespace, command, BsonDocumentSerializer.Instance, MessageEncoderSettings); if (effectiveReadPreference != null) { diff --git a/src/MongoDB.Driver/Core/Operations/AsyncCursor.cs b/src/MongoDB.Driver/Core/Operations/AsyncCursor.cs index 1f0e4e46138..6791ae3a44a 100644 --- a/src/MongoDB.Driver/Core/Operations/AsyncCursor.cs +++ b/src/MongoDB.Driver/Core/Operations/AsyncCursor.cs @@ -219,7 +219,10 @@ private CursorBatch<TDocument> ExecuteGetMoreCommand(IChannelHandle channel, Can BsonDocument result; try { + // TODO: CSOT: Implement operation context support for Cursors + var operationContext = new OperationContext(null, cancellationToken); result = channel.Command<BsonDocument>( + operationContext, _channelSource.Session, null, // readPreference _collectionNamespace.DatabaseNamespace, @@ -230,8 +233,7 @@ private CursorBatch<TDocument> ExecuteGetMoreCommand(IChannelHandle channel, Can null, // postWriteAction CommandResponseHandling.Return, __getMoreCommandResultSerializer, - _messageEncoderSettings, - cancellationToken); + _messageEncoderSettings); } catch (MongoCommandException ex) when (IsMongoCursorNotFoundException(ex)) { @@ -247,7 +249,10 @@ private async Task<CursorBatch<TDocument>> ExecuteGetMoreCommandAsync(IChannelHa BsonDocument result; try { + // TODO: CSOT: Implement operation context support for Cursors + var operationContext = new OperationContext(null, cancellationToken); result = await channel.CommandAsync<BsonDocument>( + operationContext, _channelSource.Session, null, // readPreference _collectionNamespace.DatabaseNamespace, @@ -258,8 +263,7 @@ private async Task<CursorBatch<TDocument>> ExecuteGetMoreCommandAsync(IChannelHa null, // postWriteAction CommandResponseHandling.Return, __getMoreCommandResultSerializer, - _messageEncoderSettings, - cancellationToken).ConfigureAwait(false); + _messageEncoderSettings).ConfigureAwait(false); } catch (MongoCommandException ex) when (IsMongoCursorNotFoundException(ex)) { @@ -271,8 +275,11 @@ private async Task<CursorBatch<TDocument>> ExecuteGetMoreCommandAsync(IChannelHa private void ExecuteKillCursorsCommand(IChannelHandle channel, CancellationToken cancellationToken) { + // TODO: CSOT: Implement operation context support for Cursors + var operationContext = new OperationContext(null, cancellationToken); var command = CreateKillCursorsCommand(); var result = channel.Command( + operationContext, _channelSource.Session, null, // readPreference _collectionNamespace.DatabaseNamespace, @@ -283,16 +290,18 @@ private void ExecuteKillCursorsCommand(IChannelHandle channel, CancellationToken null, // postWriteAction CommandResponseHandling.Return, BsonDocumentSerializer.Instance, - _messageEncoderSettings, - cancellationToken); + _messageEncoderSettings); ThrowIfKillCursorsCommandFailed(result, channel.ConnectionDescription.ConnectionId); } private async Task ExecuteKillCursorsCommandAsync(IChannelHandle channel, CancellationToken cancellationToken) { + // TODO: CSOT: Implement operation context support for Cursors + var operationContext = new OperationContext(null, cancellationToken); var command = CreateKillCursorsCommand(); var result = await channel.CommandAsync( + operationContext, _channelSource.Session, null, // readPreference _collectionNamespace.DatabaseNamespace, @@ -303,8 +312,7 @@ private async Task ExecuteKillCursorsCommandAsync(IChannelHandle channel, Cancel null, // postWriteAction CommandResponseHandling.Return, BsonDocumentSerializer.Instance, - _messageEncoderSettings, - cancellationToken) + _messageEncoderSettings) .ConfigureAwait(false); ThrowIfKillCursorsCommandFailed(result, channel.ConnectionDescription.ConnectionId); @@ -409,8 +417,10 @@ private void DisposeChannelSourceIfNoLongerNeeded() private CursorBatch<TDocument> GetNextBatch(CancellationToken cancellationToken) { + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); using (EventContext.BeginOperation(_operationId)) - using (var channel = _channelSource.GetChannel(cancellationToken)) + using (var channel = _channelSource.GetChannel(operationContext)) { return ExecuteGetMoreCommand(channel, cancellationToken); } @@ -418,8 +428,10 @@ private CursorBatch<TDocument> GetNextBatch(CancellationToken cancellationToken) private async Task<CursorBatch<TDocument>> GetNextBatchAsync(CancellationToken cancellationToken) { + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); using (EventContext.BeginOperation(_operationId)) - using (var channel = await _channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)) + using (var channel = await _channelSource.GetChannelAsync(operationContext).ConfigureAwait(false)) { return await ExecuteGetMoreCommandAsync(channel, cancellationToken).ConfigureAwait(false); } @@ -432,10 +444,11 @@ private bool IsMongoCursorNotFoundException(MongoCommandException exception) private void KillCursors(CancellationToken cancellationToken) { + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); using (EventContext.BeginOperation(_operationId)) using (EventContext.BeginKillCursors(_collectionNamespace)) - using (var cancellationTokenSource = new CancellationTokenSource(TimeSpan.FromSeconds(10))) - using (var channel = _channelSource.GetChannel(cancellationTokenSource.Token)) + using (var channel = _channelSource.GetChannel(operationContext.WithTimeout(TimeSpan.FromSeconds(10)))) { if (!channel.Connection.IsExpired) { @@ -446,10 +459,11 @@ private void KillCursors(CancellationToken cancellationToken) private async Task KillCursorsAsync(CancellationToken cancellationToken) { + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); using (EventContext.BeginOperation(_operationId)) using (EventContext.BeginKillCursors(_collectionNamespace)) - using (var cancellationTokenSource = new CancellationTokenSource(TimeSpan.FromSeconds(10))) - using (var channel = await _channelSource.GetChannelAsync(cancellationTokenSource.Token).ConfigureAwait(false)) + using (var channel = await _channelSource.GetChannelAsync(operationContext.WithTimeout(TimeSpan.FromSeconds(10))).ConfigureAwait(false)) { if (!channel.Connection.IsExpired) { diff --git a/src/MongoDB.Driver/Core/Operations/AsyncCursorEnumerableOneTimeAdapter.cs b/src/MongoDB.Driver/Core/Operations/AsyncCursorEnumerableOneTimeAdapter.cs index 3303143eaf9..c5fe0cd8c25 100644 --- a/src/MongoDB.Driver/Core/Operations/AsyncCursorEnumerableOneTimeAdapter.cs +++ b/src/MongoDB.Driver/Core/Operations/AsyncCursorEnumerableOneTimeAdapter.cs @@ -21,7 +21,7 @@ namespace MongoDB.Driver.Core.Operations { - internal sealed class AsyncCursorEnumerableOneTimeAdapter<TDocument> : IEnumerable<TDocument> + internal sealed class AsyncCursorEnumerableOneTimeAdapter<TDocument> : IEnumerable<TDocument>, IAsyncEnumerable<TDocument> { private readonly CancellationToken _cancellationToken; private readonly IAsyncCursor<TDocument> _cursor; @@ -33,6 +33,16 @@ public AsyncCursorEnumerableOneTimeAdapter(IAsyncCursor<TDocument> cursor, Cance _cancellationToken = cancellationToken; } + public IAsyncEnumerator<TDocument> GetAsyncEnumerator(CancellationToken cancellationToken = default) + { + if (_hasBeenEnumerated) + { + throw new InvalidOperationException("An IAsyncCursor can only be enumerated once."); + } + _hasBeenEnumerated = true; + return new AsyncCursorEnumerator<TDocument>(_cursor, cancellationToken); + } + public IEnumerator<TDocument> GetEnumerator() { if (_hasBeenEnumerated) diff --git a/src/MongoDB.Driver/Core/Operations/AsyncCursorEnumerator.cs b/src/MongoDB.Driver/Core/Operations/AsyncCursorEnumerator.cs index 6778b38146a..3f164423bfa 100644 --- a/src/MongoDB.Driver/Core/Operations/AsyncCursorEnumerator.cs +++ b/src/MongoDB.Driver/Core/Operations/AsyncCursorEnumerator.cs @@ -17,11 +17,12 @@ using System.Collections; using System.Collections.Generic; using System.Threading; +using System.Threading.Tasks; using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver.Core.Operations { - internal class AsyncCursorEnumerator<TDocument> : IEnumerator<TDocument> + internal sealed class AsyncCursorEnumerator<TDocument> : IEnumerator<TDocument>, IAsyncEnumerator<TDocument> { // private fields private IEnumerator<TDocument> _batchEnumerator; @@ -72,6 +73,15 @@ public void Dispose() } } + public ValueTask DisposeAsync() + { + // TODO: implement true async disposal (CSHARP-5630) + Dispose(); + + // TODO: convert to ValueTask.CompletedTask once we stop supporting older target frameworks + return default; // Equivalent to ValueTask.CompletedTask which is not available on older target frameworks. + } + public bool MoveNext() { ThrowIfDisposed(); @@ -82,24 +92,46 @@ public bool MoveNext() return true; } - while (true) + while (_cursor.MoveNext(_cancellationToken)) { - if (_cursor.MoveNext(_cancellationToken)) + _batchEnumerator?.Dispose(); + _batchEnumerator = _cursor.Current.GetEnumerator(); + if (_batchEnumerator.MoveNext()) { - _batchEnumerator?.Dispose(); - _batchEnumerator = _cursor.Current.GetEnumerator(); - if (_batchEnumerator.MoveNext()) - { - return true; - } + return true; } - else + } + + _batchEnumerator?.Dispose(); + _batchEnumerator = null; + _finished = true; + return false; + } + + public async ValueTask<bool> MoveNextAsync() + { + ThrowIfDisposed(); + _started = true; + + if (_batchEnumerator != null && _batchEnumerator.MoveNext()) + { + return true; + } + + while (await _cursor.MoveNextAsync(_cancellationToken).ConfigureAwait(false)) + { + _batchEnumerator?.Dispose(); + _batchEnumerator = _cursor.Current.GetEnumerator(); + if (_batchEnumerator.MoveNext()) { - _batchEnumerator = null; - _finished = true; - return false; + return true; } } + + _batchEnumerator?.Dispose(); + _batchEnumerator = null; + _finished = true; + return false; } public void Reset() diff --git a/src/MongoDB.Driver/Core/Operations/AsyncCursorSourceEnumerableAdapter.cs b/src/MongoDB.Driver/Core/Operations/AsyncCursorSourceEnumerableAdapter.cs index 9d175b1fde8..113eedcb896 100644 --- a/src/MongoDB.Driver/Core/Operations/AsyncCursorSourceEnumerableAdapter.cs +++ b/src/MongoDB.Driver/Core/Operations/AsyncCursorSourceEnumerableAdapter.cs @@ -13,7 +13,6 @@ * limitations under the License. */ -using System; using System.Collections; using System.Collections.Generic; using System.Threading; @@ -21,7 +20,7 @@ namespace MongoDB.Driver.Core.Operations { - internal class AsyncCursorSourceEnumerableAdapter<TDocument> : IEnumerable<TDocument> + internal sealed class AsyncCursorSourceEnumerableAdapter<TDocument> : IEnumerable<TDocument>, IAsyncEnumerable<TDocument> { // private fields private readonly CancellationToken _cancellationToken; @@ -34,6 +33,11 @@ public AsyncCursorSourceEnumerableAdapter(IAsyncCursorSource<TDocument> source, _cancellationToken = cancellationToken; } + public IAsyncEnumerator<TDocument> GetAsyncEnumerator(CancellationToken cancellationToken = default) + { + return new AsyncCursorSourceEnumerator<TDocument>(_source, cancellationToken); + } + // public methods public IEnumerator<TDocument> GetEnumerator() { diff --git a/src/MongoDB.Driver/Core/Operations/AsyncCursorSourceEnumerator.cs b/src/MongoDB.Driver/Core/Operations/AsyncCursorSourceEnumerator.cs new file mode 100644 index 00000000000..8cfd97b2a30 --- /dev/null +++ b/src/MongoDB.Driver/Core/Operations/AsyncCursorSourceEnumerator.cs @@ -0,0 +1,95 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using MongoDB.Driver.Core.Misc; + +namespace MongoDB.Driver.Core.Operations +{ +#pragma warning disable CA1001 + // we are suppressing this warning as we currently use the old Microsoft.CodeAnalysis.FxCopAnalyzers which doesn't + // have a concept of IAsyncDisposable. + // TODO: remove this suppression once we update our analyzers to use Microsoft.CodeAnalysis.NetAnalyzers + internal sealed class AsyncCursorSourceEnumerator<TDocument> : IAsyncEnumerator<TDocument> +#pragma warning restore CA1001 + { + private readonly CancellationToken _cancellationToken; + private AsyncCursorEnumerator<TDocument> _cursorEnumerator; + private readonly IAsyncCursorSource<TDocument> _cursorSource; + private bool _disposed; + + public AsyncCursorSourceEnumerator(IAsyncCursorSource<TDocument> cursorSource, CancellationToken cancellationToken) + { + _cursorSource = Ensure.IsNotNull(cursorSource, nameof(cursorSource)); + _cancellationToken = cancellationToken; + } + + public TDocument Current + { + get + { + if (_cursorEnumerator == null) + { + throw new InvalidOperationException("Enumeration has not started. Call MoveNextAsync."); + } + return _cursorEnumerator.Current; + } + } + + public async ValueTask DisposeAsync() + { + if (!_disposed) + { + _disposed = true; + + if (_cursorEnumerator != null) + { + await _cursorEnumerator.DisposeAsync().ConfigureAwait(false); + } + } + } + + public async ValueTask<bool> MoveNextAsync() + { + ThrowIfDisposed(); + + if (_cursorEnumerator == null) + { + var cursor = await _cursorSource.ToCursorAsync(_cancellationToken).ConfigureAwait(false); + _cursorEnumerator = new AsyncCursorEnumerator<TDocument>(cursor, _cancellationToken); + } + + return await _cursorEnumerator.MoveNextAsync().ConfigureAwait(false); + } + + public void Reset() + { + ThrowIfDisposed(); + throw new NotSupportedException(); + } + + // private methods + private void ThrowIfDisposed() + { + if (_disposed) + { + throw new ObjectDisposedException(GetType().Name); + } + } + } +} \ No newline at end of file diff --git a/src/MongoDB.Driver/Core/Operations/BulkMixedWriteOperation.cs b/src/MongoDB.Driver/Core/Operations/BulkMixedWriteOperation.cs index c529c2ebc56..d18e1339069 100644 --- a/src/MongoDB.Driver/Core/Operations/BulkMixedWriteOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/BulkMixedWriteOperation.cs @@ -16,7 +16,6 @@ using System; using System.Collections.Generic; using System.Linq; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Driver.Core.Bindings; @@ -137,38 +136,39 @@ public WriteConcern WriteConcern set { _writeConcern = Ensure.IsNotNull(value, nameof(value)); } } - public BulkWriteOperationResult Execute(IWriteBinding binding, CancellationToken cancellationToken) + public BulkWriteOperationResult Execute(OperationContext operationContext, IWriteBinding binding) { using (BeginOperation()) - using (var context = RetryableWriteContext.Create(binding, _retryRequested, cancellationToken)) + using (var context = RetryableWriteContext.Create(operationContext, binding, IsOperationRetryable())) { EnsureHintIsSupportedIfAnyRequestHasHint(); - context.DisableRetriesIfAnyWriteRequestIsNotRetryable(_requests); var helper = new BatchHelper(_requests, _isOrdered, _writeConcern); foreach (var batch in helper.GetBatches()) { - batch.Result = ExecuteBatch(context, batch, cancellationToken); + batch.Result = ExecuteBatch(operationContext, context, batch); } return helper.GetFinalResultOrThrow(context.Channel.ConnectionDescription.ConnectionId); } } - public async Task<BulkWriteOperationResult> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) + public async Task<BulkWriteOperationResult> ExecuteAsync(OperationContext operationContext, IWriteBinding binding) { using (BeginOperation()) - using (var context = await RetryableWriteContext.CreateAsync(binding, _retryRequested, cancellationToken).ConfigureAwait(false)) + using (var context = await RetryableWriteContext.CreateAsync(operationContext, binding, IsOperationRetryable()).ConfigureAwait(false)) { EnsureHintIsSupportedIfAnyRequestHasHint(); - context.DisableRetriesIfAnyWriteRequestIsNotRetryable(_requests); var helper = new BatchHelper(_requests, _isOrdered, _writeConcern); foreach (var batch in helper.GetBatches()) { - batch.Result = await ExecuteBatchAsync(context, batch, cancellationToken).ConfigureAwait(false); + batch.Result = await ExecuteBatchAsync(operationContext, context, batch).ConfigureAwait(false); } return helper.GetFinalResultOrThrow(context.Channel.ConnectionDescription.ConnectionId); } } + private bool IsOperationRetryable() + => _retryRequested && _requests.All(r => r.IsRetryable()); + private IDisposable BeginOperation() => // Execution starts with the first request EventContext.BeginOperation(null, _requests.FirstOrDefault()?.RequestType.ToString().ToLower()); @@ -242,14 +242,14 @@ private void EnsureHintIsSupportedIfAnyRequestHasHint() } } - private BulkWriteBatchResult ExecuteBatch(RetryableWriteContext context, Batch batch, CancellationToken cancellationToken) + private BulkWriteBatchResult ExecuteBatch(OperationContext operationContext, RetryableWriteContext context, Batch batch) { BulkWriteOperationResult result; MongoBulkWriteOperationException exception = null; try { var operation = CreateUnmixedBatchOperation(batch); - result = operation.Execute(context, cancellationToken); + result = operation.Execute(operationContext, context); } catch (MongoBulkWriteOperationException ex) { @@ -260,14 +260,14 @@ private BulkWriteBatchResult ExecuteBatch(RetryableWriteContext context, Batch b return BulkWriteBatchResult.Create(result, exception, batch.IndexMap); } - private async Task<BulkWriteBatchResult> ExecuteBatchAsync(RetryableWriteContext context, Batch batch, CancellationToken cancellationToken) + private async Task<BulkWriteBatchResult> ExecuteBatchAsync(OperationContext operationContext, RetryableWriteContext context, Batch batch) { BulkWriteOperationResult result; MongoBulkWriteOperationException exception = null; try { var operation = CreateUnmixedBatchOperation(batch); - result = await operation.ExecuteAsync(context, cancellationToken).ConfigureAwait(false); + result = await operation.ExecuteAsync(operationContext, context).ConfigureAwait(false); } catch (MongoBulkWriteOperationException ex) { diff --git a/src/MongoDB.Driver/Core/Operations/BulkUnmixedWriteOperationBase.cs b/src/MongoDB.Driver/Core/Operations/BulkUnmixedWriteOperationBase.cs index 616b150b155..0ec4cb33f69 100644 --- a/src/MongoDB.Driver/Core/Operations/BulkUnmixedWriteOperationBase.cs +++ b/src/MongoDB.Driver/Core/Operations/BulkUnmixedWriteOperationBase.cs @@ -16,7 +16,6 @@ using System; using System.Collections.Generic; using System.Linq; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Driver.Core.Bindings; @@ -120,37 +119,35 @@ public WriteConcern WriteConcern } // public methods - public BulkWriteOperationResult Execute(RetryableWriteContext context, CancellationToken cancellationToken) + public BulkWriteOperationResult Execute(OperationContext operationContext, RetryableWriteContext context) { EnsureHintIsSupportedIfAnyRequestHasHint(); - return ExecuteBatches(context, cancellationToken); + return ExecuteBatches(operationContext, context); } - public BulkWriteOperationResult Execute(IWriteBinding binding, CancellationToken cancellationToken) + public BulkWriteOperationResult Execute(OperationContext operationContext, IWriteBinding binding) { using (BeginOperation()) - using (var context = RetryableWriteContext.Create(binding, _retryRequested, cancellationToken)) + using (var context = RetryableWriteContext.Create(operationContext, binding, IsOperationRetryable())) { - context.DisableRetriesIfAnyWriteRequestIsNotRetryable(_requests); - return Execute(context, cancellationToken); + return Execute(operationContext, context); } } - public Task<BulkWriteOperationResult> ExecuteAsync(RetryableWriteContext context, CancellationToken cancellationToken) + public Task<BulkWriteOperationResult> ExecuteAsync(OperationContext operationContext, RetryableWriteContext context) { EnsureHintIsSupportedIfAnyRequestHasHint(); - return ExecuteBatchesAsync(context, cancellationToken); + return ExecuteBatchesAsync(operationContext, context); } - public async Task<BulkWriteOperationResult> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) + public async Task<BulkWriteOperationResult> ExecuteAsync(OperationContext operationContext, IWriteBinding binding) { using (BeginOperation()) - using (var context = await RetryableWriteContext.CreateAsync(binding, _retryRequested, cancellationToken).ConfigureAwait(false)) + using (var context = await RetryableWriteContext.CreateAsync(operationContext, binding, IsOperationRetryable()).ConfigureAwait(false)) { - context.DisableRetriesIfAnyWriteRequestIsNotRetryable(_requests); - return await ExecuteAsync(context, cancellationToken).ConfigureAwait(false); + return await ExecuteAsync(operationContext, context).ConfigureAwait(false); } } @@ -160,6 +157,9 @@ public async Task<BulkWriteOperationResult> ExecuteAsync(IWriteBinding binding, protected abstract bool RequestHasHint(TWriteRequest request); // private methods + private bool IsOperationRetryable() + => _retryRequested && _requests.All(r => r.IsRetryable()); + private IDisposable BeginOperation() => EventContext.BeginOperation(null, _requests.FirstOrDefault()?.RequestType.ToString().ToLower()); @@ -190,14 +190,14 @@ private void EnsureHintIsSupportedIfAnyRequestHasHint() } } - private BulkWriteBatchResult ExecuteBatch(RetryableWriteContext context, Batch batch, CancellationToken cancellationToken) + private BulkWriteBatchResult ExecuteBatch(OperationContext operationContext, RetryableWriteContext context, Batch batch) { var operation = CreateBatchOperation(batch); BsonDocument operationResult; MongoWriteConcernException writeConcernException = null; try { - operationResult = RetryableWriteOperationExecutor.Execute(operation, context, cancellationToken); + operationResult = RetryableWriteOperationExecutor.Execute(operationContext, operation, context); } catch (MongoWriteConcernException exception) when (exception.IsWriteConcernErrorOnly()) { @@ -208,14 +208,14 @@ private BulkWriteBatchResult ExecuteBatch(RetryableWriteContext context, Batch b return CreateBatchResult(batch, operationResult, writeConcernException); } - private async Task<BulkWriteBatchResult> ExecuteBatchAsync(RetryableWriteContext context, Batch batch, CancellationToken cancellationToken) + private async Task<BulkWriteBatchResult> ExecuteBatchAsync(OperationContext operationContext, RetryableWriteContext context, Batch batch) { var operation = CreateBatchOperation(batch); BsonDocument operationResult; MongoWriteConcernException writeConcernException = null; try { - operationResult = await RetryableWriteOperationExecutor.ExecuteAsync(operation, context, cancellationToken).ConfigureAwait(false); + operationResult = await RetryableWriteOperationExecutor.ExecuteAsync(operationContext, operation, context).ConfigureAwait(false); } catch (MongoWriteConcernException exception) when (exception.IsWriteConcernErrorOnly()) { @@ -226,22 +226,22 @@ private async Task<BulkWriteBatchResult> ExecuteBatchAsync(RetryableWriteContext return CreateBatchResult(batch, operationResult, writeConcernException); } - private BulkWriteOperationResult ExecuteBatches(RetryableWriteContext context, CancellationToken cancellationToken) + private BulkWriteOperationResult ExecuteBatches(OperationContext operationContext, RetryableWriteContext context) { var helper = new BatchHelper(_requests, _writeConcern, _isOrdered); foreach (var batch in helper.GetBatches()) { - batch.Result = ExecuteBatch(context, batch, cancellationToken); + batch.Result = ExecuteBatch(operationContext, context, batch); } return helper.CreateFinalResultOrThrow(context.Channel); } - private async Task<BulkWriteOperationResult> ExecuteBatchesAsync(RetryableWriteContext context, CancellationToken cancellationToken) + private async Task<BulkWriteOperationResult> ExecuteBatchesAsync(OperationContext operationContext, RetryableWriteContext context) { var helper = new BatchHelper(_requests, _writeConcern, _isOrdered); foreach (var batch in helper.GetBatches()) { - batch.Result = await ExecuteBatchAsync(context, batch, cancellationToken).ConfigureAwait(false); + batch.Result = await ExecuteBatchAsync(operationContext, context, batch).ConfigureAwait(false); } return helper.CreateFinalResultOrThrow(context.Channel); } diff --git a/src/MongoDB.Driver/Core/Operations/ChangeStreamCursor.cs b/src/MongoDB.Driver/Core/Operations/ChangeStreamCursor.cs index 4dfabd9a026..f43cb30a168 100644 --- a/src/MongoDB.Driver/Core/Operations/ChangeStreamCursor.cs +++ b/src/MongoDB.Driver/Core/Operations/ChangeStreamCursor.cs @@ -261,13 +261,17 @@ private void ReconfigureOperationResumeValues() private IAsyncCursor<RawBsonDocument> Resume(CancellationToken cancellationToken) { ReconfigureOperationResumeValues(); - return _changeStreamOperation.Resume(_binding, cancellationToken); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); + return _changeStreamOperation.Resume(operationContext, _binding); } private async Task<IAsyncCursor<RawBsonDocument>> ResumeAsync(CancellationToken cancellationToken) { ReconfigureOperationResumeValues(); - return await _changeStreamOperation.ResumeAsync(_binding, cancellationToken).ConfigureAwait(false); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); + return await _changeStreamOperation.ResumeAsync(operationContext, _binding).ConfigureAwait(false); } internal struct ResumeValues diff --git a/src/MongoDB.Driver/Core/Operations/ChangeStreamOperation.cs b/src/MongoDB.Driver/Core/Operations/ChangeStreamOperation.cs index b9ed21931b0..ed08f1c85e5 100644 --- a/src/MongoDB.Driver/Core/Operations/ChangeStreamOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/ChangeStreamOperation.cs @@ -16,7 +16,6 @@ using System; using System.Collections.Generic; using System.Linq; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization; @@ -34,8 +33,8 @@ internal interface IChangeStreamOperation<TResult> : IReadOperation<IChangeStrea BsonDocument StartAfter { get; set; } BsonTimestamp StartAtOperationTime { get; set; } - IAsyncCursor<RawBsonDocument> Resume(IReadBinding binding, CancellationToken cancellationToken); - Task<IAsyncCursor<RawBsonDocument>> ResumeAsync(IReadBinding binding, CancellationToken cancellationToken); + IAsyncCursor<RawBsonDocument> Resume(OperationContext operationContext, IReadBinding binding); + Task<IAsyncCursor<RawBsonDocument>> ResumeAsync(OperationContext operationContext, IReadBinding binding); } internal sealed class ChangeStreamOperation<TResult> : IChangeStreamOperation<TResult> @@ -250,7 +249,7 @@ public BsonTimestamp StartAtOperationTime // public methods /// <inheritdoc /> - public IChangeStreamCursor<TResult> Execute(IReadBinding binding, CancellationToken cancellationToken) + public IChangeStreamCursor<TResult> Execute(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); var bindingHandle = binding as IReadBindingHandle; @@ -262,9 +261,9 @@ public IChangeStreamCursor<TResult> Execute(IReadBinding binding, CancellationTo IAsyncCursor<RawBsonDocument> cursor; ICursorBatchInfo cursorBatchInfo; BsonTimestamp initialOperationTime; - using (var context = RetryableReadContext.Create(binding, _retryRequested, cancellationToken)) + using (var context = RetryableReadContext.Create(operationContext, binding, _retryRequested)) { - cursor = ExecuteAggregateOperation(context, cancellationToken); + cursor = ExecuteAggregateOperation(operationContext, context); cursorBatchInfo = (ICursorBatchInfo)cursor; initialOperationTime = GetInitialOperationTimeIfRequired(context, cursorBatchInfo); @@ -285,7 +284,7 @@ public IChangeStreamCursor<TResult> Execute(IReadBinding binding, CancellationTo } /// <inheritdoc /> - public async Task<IChangeStreamCursor<TResult>> ExecuteAsync(IReadBinding binding, CancellationToken cancellationToken) + public async Task<IChangeStreamCursor<TResult>> ExecuteAsync(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); var bindingHandle = binding as IReadBindingHandle; @@ -297,9 +296,9 @@ public async Task<IChangeStreamCursor<TResult>> ExecuteAsync(IReadBinding bindin IAsyncCursor<RawBsonDocument> cursor; ICursorBatchInfo cursorBatchInfo; BsonTimestamp initialOperationTime; - using (var context = await RetryableReadContext.CreateAsync(binding, _retryRequested, cancellationToken).ConfigureAwait(false)) + using (var context = await RetryableReadContext.CreateAsync(operationContext, binding, _retryRequested).ConfigureAwait(false)) { - cursor = await ExecuteAggregateOperationAsync(context, cancellationToken).ConfigureAwait(false); + cursor = await ExecuteAggregateOperationAsync(operationContext, context).ConfigureAwait(false); cursorBatchInfo = (ICursorBatchInfo)cursor; initialOperationTime = GetInitialOperationTimeIfRequired(context, cursorBatchInfo); @@ -320,20 +319,20 @@ public async Task<IChangeStreamCursor<TResult>> ExecuteAsync(IReadBinding bindin } /// <inheritdoc /> - public IAsyncCursor<RawBsonDocument> Resume(IReadBinding binding, CancellationToken cancellationToken) + public IAsyncCursor<RawBsonDocument> Resume(OperationContext operationContext, IReadBinding binding) { - using (var context = RetryableReadContext.Create(binding, retryRequested: false, cancellationToken)) + using (var context = RetryableReadContext.Create(operationContext, binding, retryRequested: false)) { - return ExecuteAggregateOperation(context, cancellationToken); + return ExecuteAggregateOperation(operationContext, context); } } /// <inheritdoc /> - public async Task<IAsyncCursor<RawBsonDocument>> ResumeAsync(IReadBinding binding, CancellationToken cancellationToken) + public async Task<IAsyncCursor<RawBsonDocument>> ResumeAsync(OperationContext operationContext, IReadBinding binding) { - using (var context = await RetryableReadContext.CreateAsync(binding, retryRequested: false, cancellationToken).ConfigureAwait(false)) + using (var context = await RetryableReadContext.CreateAsync(operationContext, binding, retryRequested: false).ConfigureAwait(false)) { - return await ExecuteAggregateOperationAsync(context, cancellationToken).ConfigureAwait(false); + return await ExecuteAggregateOperationAsync(operationContext, context).ConfigureAwait(false); } } @@ -392,16 +391,16 @@ private List<BsonDocument> CreateCombinedPipeline(BsonDocument changeStreamStage return combinedPipeline; } - private IAsyncCursor<RawBsonDocument> ExecuteAggregateOperation(RetryableReadContext context, CancellationToken cancellationToken) + private IAsyncCursor<RawBsonDocument> ExecuteAggregateOperation(OperationContext operationContext, RetryableReadContext context) { var aggregateOperation = CreateAggregateOperation(); - return aggregateOperation.Execute(context, cancellationToken); + return aggregateOperation.Execute(operationContext, context); } - private Task<IAsyncCursor<RawBsonDocument>> ExecuteAggregateOperationAsync(RetryableReadContext context, CancellationToken cancellationToken) + private Task<IAsyncCursor<RawBsonDocument>> ExecuteAggregateOperationAsync(OperationContext operationContext, RetryableReadContext context) { var aggregateOperation = CreateAggregateOperation(); - return aggregateOperation.ExecuteAsync(context, cancellationToken); + return aggregateOperation.ExecuteAsync(operationContext, context); } private BsonDocument GetInitialPostBatchResumeTokenIfRequired(ICursorBatchInfo cursorBatchInfo) diff --git a/src/MongoDB.Driver/Core/Operations/ClientBulkWriteOperation.cs b/src/MongoDB.Driver/Core/Operations/ClientBulkWriteOperation.cs index b994a95388d..f872d83bf61 100644 --- a/src/MongoDB.Driver/Core/Operations/ClientBulkWriteOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/ClientBulkWriteOperation.cs @@ -16,7 +16,6 @@ using System; using System.Collections.Generic; using System.Linq; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -33,7 +32,7 @@ internal sealed class ClientBulkWriteOperation : RetryableWriteCommandOperationB { private readonly bool? _bypassDocumentValidation; private readonly bool _errorsOnly; - private readonly Dictionary<int, BsonValue> _idsMap = new(); + private readonly Dictionary<int, object> _idsMap = new(); private readonly BsonDocument _let; private readonly RenderArgs<BsonDocument> _renderArgs; private readonly IBatchableSource<BulkWriteModel> _writeModels; @@ -56,9 +55,9 @@ public ClientBulkWriteOperation( WriteConcern = options?.WriteConcern; } - protected override BsonDocument CreateCommand(ICoreSessionHandle session, int attempt, long? transactionNumber) + protected override BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session, int attempt, long? transactionNumber) { - var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(session, WriteConcern); + var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(operationContext, session, WriteConcern); return new BsonDocument { { "bulkWrite", 1 }, @@ -89,17 +88,17 @@ protected override IEnumerable<BatchableCommandMessageSection> CreateCommandPayl return new[] { payload }; } - public new ClientBulkWriteResult Execute(IWriteBinding binding, CancellationToken cancellationToken) + public new ClientBulkWriteResult Execute(OperationContext operationContext, IWriteBinding binding) { using var operation = BeginOperation(); var bulkWriteResults = new BulkWriteRawResult(); while (true) { - using var context = RetryableWriteContext.Create(binding, GetEffectiveRetryRequested(), cancellationToken); + using var context = RetryableWriteContext.Create(operationContext, binding, GetEffectiveRetryRequested()); BsonDocument serverResponse = null; try { - serverResponse = base.Execute(context, cancellationToken); + serverResponse = base.Execute(operationContext, context); } catch (MongoWriteConcernException concernException) { @@ -124,7 +123,8 @@ protected override IEnumerable<BatchableCommandMessageSection> CreateCommandPayl { try { - while (individualResults.MoveNext(cancellationToken)) + // TODO: CSOT implement a way to support timeout in cursor methods + while (individualResults.MoveNext(operationContext.CancellationToken)) { PopulateIndividualResponses(individualResults.Current, bulkWriteResults); } @@ -146,17 +146,17 @@ protected override IEnumerable<BatchableCommandMessageSection> CreateCommandPayl } } - public new async Task<ClientBulkWriteResult> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) + public new async Task<ClientBulkWriteResult> ExecuteAsync(OperationContext operationContext, IWriteBinding binding) { using var operation = BeginOperation(); var bulkWriteResults = new BulkWriteRawResult(); while (true) { - using var context = RetryableWriteContext.Create(binding, GetEffectiveRetryRequested(), cancellationToken); + using var context = RetryableWriteContext.Create(operationContext, binding, GetEffectiveRetryRequested()); BsonDocument serverResponse = null; try { - serverResponse = await base.ExecuteAsync(context, cancellationToken).ConfigureAwait(false); + serverResponse = await base.ExecuteAsync(operationContext, context).ConfigureAwait(false); } catch (MongoWriteConcernException concernException) { @@ -181,7 +181,8 @@ protected override IEnumerable<BatchableCommandMessageSection> CreateCommandPayl { try { - while (await individualResults.MoveNextAsync(cancellationToken).ConfigureAwait(false)) + // TODO: CSOT implement a way to support timeout in cursor methods + while (await individualResults.MoveNextAsync(operationContext.CancellationToken).ConfigureAwait(false)) { PopulateIndividualResponses(individualResults.Current, bulkWriteResults); } @@ -209,7 +210,12 @@ private void EnsureCanProceedNextBatch(ConnectionId connectionId, BulkWriteRawRe { if (bulkWriteResult.TopLevelException != null) { - var partialResult = ToClientBulkWriteResult(bulkWriteResult); + ClientBulkWriteResult partialResult = null; + if (_writeModels.Offset != 0) + { + partialResult = ToClientBulkWriteResult(bulkWriteResult); + } + throw new ClientBulkWriteException( connectionId, "An error occurred during bulkWrite operation. See InnerException for more details.", @@ -331,7 +337,7 @@ private void PopulateIndividualResponses(IEnumerable<BsonDocument> individualRes _idsMap.TryGetValue(operationIndex, out var insertedId); bulkWriteResult.InsertResults.Add(operationIndex, new() { - InsertedId = insertedId + DocumentId = insertedId }); } else if (writeModelType == typeof(BulkWriteUpdateOneModel<>) || writeModelType == typeof(BulkWriteUpdateManyModel<>) || writeModelType == typeof(BulkWriteReplaceOneModel<>)) diff --git a/src/MongoDB.Driver/Core/Operations/CommandOperationBase.cs b/src/MongoDB.Driver/Core/Operations/CommandOperationBase.cs index 18f18b14b6d..e5f94ebe200 100644 --- a/src/MongoDB.Driver/Core/Operations/CommandOperationBase.cs +++ b/src/MongoDB.Driver/Core/Operations/CommandOperationBase.cs @@ -13,7 +13,6 @@ * limitations under the License. */ -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.IO; @@ -85,11 +84,12 @@ public IBsonSerializer<TCommandResult> ResultSerializer get { return _resultSerializer; } } - protected TCommandResult ExecuteProtocol(IChannelHandle channel, ICoreSessionHandle session, ReadPreference readPreference, CancellationToken cancellationToken) + protected TCommandResult ExecuteProtocol(OperationContext operationContext, IChannelHandle channel, ICoreSessionHandle session, ReadPreference readPreference) { var additionalOptions = GetEffectiveAdditionalOptions(); return channel.Command( + operationContext, session, readPreference, _databaseNamespace, @@ -100,27 +100,27 @@ protected TCommandResult ExecuteProtocol(IChannelHandle channel, ICoreSessionHan null, // postWriteAction, CommandResponseHandling.Return, _resultSerializer, - _messageEncoderSettings, - cancellationToken); + _messageEncoderSettings); } protected TCommandResult ExecuteProtocol( + OperationContext operationContext, IChannelSource channelSource, ICoreSessionHandle session, - ReadPreference readPreference, - CancellationToken cancellationToken) + ReadPreference readPreference) { - using (var channel = channelSource.GetChannel(cancellationToken)) + using (var channel = channelSource.GetChannel(operationContext)) { - return ExecuteProtocol(channel, session, readPreference, cancellationToken); + return ExecuteProtocol(operationContext, channel, session, readPreference); } } - protected Task<TCommandResult> ExecuteProtocolAsync(IChannelHandle channel, ICoreSessionHandle session, ReadPreference readPreference, CancellationToken cancellationToken) + protected Task<TCommandResult> ExecuteProtocolAsync(OperationContext operationContext, IChannelHandle channel, ICoreSessionHandle session, ReadPreference readPreference) { var additionalOptions = GetEffectiveAdditionalOptions(); return channel.CommandAsync( + operationContext, session, readPreference, _databaseNamespace, @@ -131,19 +131,18 @@ protected Task<TCommandResult> ExecuteProtocolAsync(IChannelHandle channel, ICor null, // postWriteAction, CommandResponseHandling.Return, _resultSerializer, - _messageEncoderSettings, - cancellationToken); + _messageEncoderSettings); } protected async Task<TCommandResult> ExecuteProtocolAsync( + OperationContext operationContext, IChannelSource channelSource, ICoreSessionHandle session, - ReadPreference readPreference, - CancellationToken cancellationToken) + ReadPreference readPreference) { - using (var channel = await channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)) + using (var channel = await channelSource.GetChannelAsync(operationContext).ConfigureAwait(false)) { - return await ExecuteProtocolAsync(channel, session, readPreference, cancellationToken).ConfigureAwait(false); + return await ExecuteProtocolAsync(operationContext, channel, session, readPreference).ConfigureAwait(false); } } diff --git a/src/MongoDB.Driver/Core/Operations/CompositeWriteOperation.cs b/src/MongoDB.Driver/Core/Operations/CompositeWriteOperation.cs index 64a270c4486..68e6964d994 100644 --- a/src/MongoDB.Driver/Core/Operations/CompositeWriteOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/CompositeWriteOperation.cs @@ -14,7 +14,6 @@ */ using System.Linq; -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Misc; @@ -32,12 +31,12 @@ public CompositeWriteOperation(params (IWriteOperation<TResult>, bool IsMainOper Ensure.That(operations.Count(o => o.IsMainOperation) == 1, message: $"{nameof(CompositeWriteOperation<TResult>)} must have a single main operation."); } - public TResult Execute(IWriteBinding binding, CancellationToken cancellationToken) + public TResult Execute(OperationContext operationContext, IWriteBinding binding) { TResult result = default; foreach (var operationInfo in _operations) { - var itemResult = operationInfo.Operation.Execute(binding, cancellationToken); + var itemResult = operationInfo.Operation.Execute(operationContext, binding); if (operationInfo.IsMainOperation) { result = itemResult; @@ -47,12 +46,12 @@ public TResult Execute(IWriteBinding binding, CancellationToken cancellationToke return result; } - public async Task<TResult> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) + public async Task<TResult> ExecuteAsync(OperationContext operationContext, IWriteBinding binding) { TResult result = default; foreach (var operationInfo in _operations) { - var itemResult = await operationInfo.Operation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false); + var itemResult = await operationInfo.Operation.ExecuteAsync(operationContext, binding).ConfigureAwait(false); if (operationInfo.IsMainOperation) { result = itemResult; diff --git a/src/MongoDB.Driver/Core/Operations/CountDocumentsOperation.cs b/src/MongoDB.Driver/Core/Operations/CountDocumentsOperation.cs index 2871029e2a1..6bcc00d33db 100644 --- a/src/MongoDB.Driver/Core/Operations/CountDocumentsOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/CountDocumentsOperation.cs @@ -15,7 +15,6 @@ using System; using System.Collections.Generic; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -110,28 +109,28 @@ public long? Skip set { _skip = value; } } - public long Execute(IReadBinding binding, CancellationToken cancellationToken) + public long Execute(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) { var operation = CreateOperation(); - var cursor = operation.Execute(binding, cancellationToken); - var result = cursor.ToList(cancellationToken); + var cursor = operation.Execute(operationContext, binding); + var result = cursor.ToList(operationContext.CancellationToken); return ExtractCountFromResult(result); } } - public async Task<long> ExecuteAsync(IReadBinding binding, CancellationToken cancellationToken) + public async Task<long> ExecuteAsync(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) { var operation = CreateOperation(); - var cursor = await operation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false); - var result = await cursor.ToListAsync(cancellationToken).ConfigureAwait(false); + var cursor = await operation.ExecuteAsync(operationContext, binding).ConfigureAwait(false); + var result = await cursor.ToListAsync(operationContext.CancellationToken).ConfigureAwait(false); return ExtractCountFromResult(result); } } diff --git a/src/MongoDB.Driver/Core/Operations/CountOperation.cs b/src/MongoDB.Driver/Core/Operations/CountOperation.cs index dc33440c61f..1f19a2063ac 100644 --- a/src/MongoDB.Driver/Core/Operations/CountOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/CountOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -110,7 +109,7 @@ public long? Skip set { _skip = value; } } - public BsonDocument CreateCommand(ConnectionDescription connectionDescription, ICoreSession session) + public BsonDocument CreateCommand(OperationContext operationContext, ICoreSession session, ConnectionDescription connectionDescription) { var readConcern = ReadConcernHelper.GetReadConcernForCommand(session, connectionDescription, _readConcern); return new BsonDocument @@ -120,54 +119,54 @@ public BsonDocument CreateCommand(ConnectionDescription connectionDescription, I { "limit", () => _limit.Value, _limit.HasValue }, { "skip", () => _skip.Value, _skip.HasValue }, { "hint", _hint, _hint != null }, - { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue }, + { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue && !operationContext.IsRootContextTimeoutConfigured() }, { "collation", () => _collation.ToBsonDocument(), _collation != null }, { "comment", _comment, _comment != null }, { "readConcern", readConcern, readConcern != null } }; } - public long Execute(IReadBinding binding, CancellationToken cancellationToken) + public long Execute(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var context = RetryableReadContext.Create(binding, _retryRequested, cancellationToken)) + using (var context = RetryableReadContext.Create(operationContext, binding, _retryRequested)) { - return Execute(context, cancellationToken); + return Execute(operationContext, context); } } - public long Execute(RetryableReadContext context, CancellationToken cancellationToken) + public long Execute(OperationContext operationContext, RetryableReadContext context) { - var operation = CreateOperation(context); - var document = operation.Execute(context, cancellationToken); + var operation = CreateOperation(operationContext, context); + var document = operation.Execute(operationContext, context); return document["n"].ToInt64(); } - public async Task<long> ExecuteAsync(IReadBinding binding, CancellationToken cancellationToken) + public async Task<long> ExecuteAsync(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var context = await RetryableReadContext.CreateAsync(binding, _retryRequested, cancellationToken).ConfigureAwait(false)) + using (var context = await RetryableReadContext.CreateAsync(operationContext, binding, _retryRequested).ConfigureAwait(false)) { - return await ExecuteAsync(context, cancellationToken).ConfigureAwait(false); + return await ExecuteAsync(operationContext, context).ConfigureAwait(false); } } - public async Task<long> ExecuteAsync(RetryableReadContext context, CancellationToken cancellationToken) + public async Task<long> ExecuteAsync(OperationContext operationContext, RetryableReadContext context) { - var operation = CreateOperation(context); - var document = await operation.ExecuteAsync(context, cancellationToken).ConfigureAwait(false); + var operation = CreateOperation(operationContext, context); + var document = await operation.ExecuteAsync(operationContext, context).ConfigureAwait(false); return document["n"].ToInt64(); } private IDisposable BeginOperation() => EventContext.BeginOperation("count"); - private ReadCommandOperation<BsonDocument> CreateOperation(RetryableReadContext context) + private ReadCommandOperation<BsonDocument> CreateOperation(OperationContext operationContext, RetryableReadContext context) { - var command = CreateCommand(context.Channel.ConnectionDescription, context.Binding.Session); + var command = CreateCommand(operationContext, context.Binding.Session, context.Channel.ConnectionDescription); return new ReadCommandOperation<BsonDocument>(_collectionNamespace.DatabaseNamespace, command, BsonDocumentSerializer.Instance, _messageEncoderSettings) { RetryRequested = _retryRequested // might be overridden by retryable read context diff --git a/src/MongoDB.Driver/Core/Operations/CreateCollectionOperation.cs b/src/MongoDB.Driver/Core/Operations/CreateCollectionOperation.cs index 9f9923a5c75..a5149725bfa 100644 --- a/src/MongoDB.Driver/Core/Operations/CreateCollectionOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/CreateCollectionOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -80,10 +79,8 @@ CreateCollectionOperation CreateInnerCollectionOperation(string collectionName) private long? _maxDocuments; private long? _maxSize; private readonly MessageEncoderSettings _messageEncoderSettings; - private bool? _noPadding; private BsonDocument _storageEngine; private TimeSeriesOptions _timeSeriesOptions; - private bool? _usePowerOf2Sizes; private DocumentValidationAction? _validationAction; private DocumentValidationLevel? _validationLevel; private BsonDocument _validator; @@ -172,12 +169,6 @@ public MessageEncoderSettings MessageEncoderSettings get { return _messageEncoderSettings; } } - public bool? NoPadding - { - get { return _noPadding; } - set { _noPadding = value; } - } - public BsonDocument StorageEngine { get { return _storageEngine; } @@ -190,12 +181,6 @@ public TimeSeriesOptions TimeSeriesOptions set { _timeSeriesOptions = value; } } - public bool? UsePowerOf2Sizes - { - get { return _usePowerOf2Sizes; } - set { _usePowerOf2Sizes = value; } - } - public DocumentValidationAction? ValidationAction { get { return _validationAction; } @@ -226,10 +211,9 @@ public BsonDocument ClusteredIndex set => _clusteredIndex = value; } - internal BsonDocument CreateCommand(ICoreSessionHandle session) + internal BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session) { - var flags = GetFlags(); - var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(session, _writeConcern); + var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(operationContext, session, _writeConcern); return new BsonDocument { { "create", _collectionNamespace.CollectionName }, @@ -237,7 +221,6 @@ internal BsonDocument CreateCommand(ICoreSessionHandle session) { "capped", () => _capped.Value, _capped.HasValue }, { "size", () => _maxSize.Value, _maxSize.HasValue }, { "max", () => _maxDocuments.Value, _maxDocuments.HasValue }, - { "flags", () => (int)flags.Value, flags.HasValue }, { "storageEngine", _storageEngine, _storageEngine != null }, { "indexOptionDefaults", _indexOptionDefaults, _indexOptionDefaults != null }, { "validator", _validator, _validator != null }, @@ -253,66 +236,45 @@ internal BsonDocument CreateCommand(ICoreSessionHandle session) }; } - private CreateCollectionFlags? GetFlags() - { - if (_usePowerOf2Sizes.HasValue || _noPadding.HasValue) - { - var flags = CreateCollectionFlags.None; - if (_usePowerOf2Sizes.HasValue && _usePowerOf2Sizes.Value) - { - flags |= CreateCollectionFlags.UsePowerOf2Sizes; - } - if (_noPadding.HasValue && _noPadding.Value) - { - flags |= CreateCollectionFlags.NoPadding; - } - return flags; - } - else - { - return null; - } - } - - public BsonDocument Execute(IWriteBinding binding, CancellationToken cancellationToken) + public BsonDocument Execute(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var channelSource = binding.GetWriteChannelSource(cancellationToken)) - using (var channel = channelSource.GetChannel(cancellationToken)) + using (var channelSource = binding.GetWriteChannelSource(operationContext)) + using (var channel = channelSource.GetChannel(operationContext)) { EnsureServerIsValid(channel.ConnectionDescription.MaxWireVersion); using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session); - return operation.Execute(channelBinding, cancellationToken); + var operation = CreateOperation(operationContext, channelBinding.Session); + return operation.Execute(operationContext, channelBinding); } } } - public async Task<BsonDocument> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) + public async Task<BsonDocument> ExecuteAsync(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var channelSource = await binding.GetWriteChannelSourceAsync(cancellationToken).ConfigureAwait(false)) - using (var channel = await channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)) + using (var channelSource = await binding.GetWriteChannelSourceAsync(operationContext).ConfigureAwait(false)) + using (var channel = await channelSource.GetChannelAsync(operationContext).ConfigureAwait(false)) { EnsureServerIsValid(channel.ConnectionDescription.MaxWireVersion); using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session); - return await operation.ExecuteAsync(channelBinding, cancellationToken).ConfigureAwait(false); + var operation = CreateOperation(operationContext, channelBinding.Session); + return await operation.ExecuteAsync(operationContext, channelBinding).ConfigureAwait(false); } } } private IDisposable BeginOperation() => EventContext.BeginOperation("create"); - private WriteCommandOperation<BsonDocument> CreateOperation(ICoreSessionHandle session) + private WriteCommandOperation<BsonDocument> CreateOperation(OperationContext operationContext, ICoreSessionHandle session) { - var command = CreateCommand(session); + var command = CreateCommand(operationContext, session); return new WriteCommandOperation<BsonDocument>(_collectionNamespace.DatabaseNamespace, command, BsonDocumentSerializer.Instance, _messageEncoderSettings); } @@ -320,13 +282,5 @@ private void EnsureServerIsValid(int maxWireVersion) { _supportedFeature?.ThrowIfNotSupported(maxWireVersion); } - - [Flags] - private enum CreateCollectionFlags - { - None = 0, - UsePowerOf2Sizes = 1, - NoPadding = 2 - } } } diff --git a/src/MongoDB.Driver/Core/Operations/CreateIndexesOperation.cs b/src/MongoDB.Driver/Core/Operations/CreateIndexesOperation.cs index 1a19dc5787a..42632d712af 100644 --- a/src/MongoDB.Driver/Core/Operations/CreateIndexesOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/CreateIndexesOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,6 @@ using System; using System.Collections.Generic; using System.Linq; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -87,34 +86,34 @@ public TimeSpan? MaxTime set { _maxTime = Ensure.IsNullOrInfiniteOrGreaterThanOrEqualToZero(value, nameof(value)); } } - public BsonDocument Execute(IWriteBinding binding, CancellationToken cancellationToken) + public BsonDocument Execute(OperationContext operationContext, IWriteBinding binding) { using (BeginOperation()) - using (var channelSource = binding.GetWriteChannelSource(cancellationToken)) - using (var channel = channelSource.GetChannel(cancellationToken)) + using (var channelSource = binding.GetWriteChannelSource(operationContext)) + using (var channel = channelSource.GetChannel(operationContext)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session, channel.ConnectionDescription); - return operation.Execute(channelBinding, cancellationToken); + var operation = CreateOperation(operationContext, channelBinding.Session, channel.ConnectionDescription); + return operation.Execute(operationContext, channelBinding); } } - public async Task<BsonDocument> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) + public async Task<BsonDocument> ExecuteAsync(OperationContext operationContext, IWriteBinding binding) { using (BeginOperation()) - using (var channelSource = await binding.GetWriteChannelSourceAsync(cancellationToken).ConfigureAwait(false)) - using (var channel = await channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)) + using (var channelSource = await binding.GetWriteChannelSourceAsync(operationContext).ConfigureAwait(false)) + using (var channel = await channelSource.GetChannelAsync(operationContext).ConfigureAwait(false)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session, channel.ConnectionDescription); - return await operation.ExecuteAsync(channelBinding, cancellationToken).ConfigureAwait(false); + var operation = CreateOperation(operationContext, channelBinding.Session, channel.ConnectionDescription); + return await operation.ExecuteAsync(operationContext, channelBinding).ConfigureAwait(false); } } - internal BsonDocument CreateCommand(ICoreSessionHandle session, ConnectionDescription connectionDescription) + internal BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session, ConnectionDescription connectionDescription) { var maxWireVersion = connectionDescription.MaxWireVersion; - var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(session, _writeConcern); + var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(operationContext, session, _writeConcern); if (_commitQuorum != null) { Feature.CreateIndexCommitQuorum.ThrowIfNotSupported(maxWireVersion); @@ -124,7 +123,7 @@ internal BsonDocument CreateCommand(ICoreSessionHandle session, ConnectionDescri { { "createIndexes", _collectionNamespace.CollectionName }, { "indexes", new BsonArray(_requests.Select(request => request.CreateIndexDocument())) }, - { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue }, + { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue && !operationContext.IsRootContextTimeoutConfigured() }, { "writeConcern", writeConcern, writeConcern != null }, { "comment", _comment, _comment != null }, { "commitQuorum", () => _commitQuorum.ToBsonValue(), _commitQuorum != null } @@ -133,10 +132,10 @@ internal BsonDocument CreateCommand(ICoreSessionHandle session, ConnectionDescri private IDisposable BeginOperation() => EventContext.BeginOperation(null, "createIndexes"); - private WriteCommandOperation<BsonDocument> CreateOperation(ICoreSessionHandle session, ConnectionDescription connectionDescription) + private WriteCommandOperation<BsonDocument> CreateOperation(OperationContext operationContext, ICoreSessionHandle session, ConnectionDescription connectionDescription) { var databaseNamespace = _collectionNamespace.DatabaseNamespace; - var command = CreateCommand(session, connectionDescription); + var command = CreateCommand(operationContext, session, connectionDescription); var resultSerializer = BsonDocumentSerializer.Instance; return new WriteCommandOperation<BsonDocument>(databaseNamespace, command, resultSerializer, _messageEncoderSettings); } diff --git a/src/MongoDB.Driver/Core/Operations/CreateSearchIndexesOperation.cs b/src/MongoDB.Driver/Core/Operations/CreateSearchIndexesOperation.cs index bb96493a076..edc8ae04f77 100644 --- a/src/MongoDB.Driver/Core/Operations/CreateSearchIndexesOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/CreateSearchIndexesOperation.cs @@ -15,7 +15,6 @@ using System.Collections.Generic; using System.Linq; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -55,28 +54,28 @@ public CreateSearchIndexesOperation( // public methods /// <inheritdoc/> - public BsonDocument Execute(IWriteBinding binding, CancellationToken cancellationToken) + public BsonDocument Execute(OperationContext operationContext, IWriteBinding binding) { using (EventContext.BeginOperation("createSearchIndexes")) - using (var channelSource = binding.GetWriteChannelSource(cancellationToken)) - using (var channel = channelSource.GetChannel(cancellationToken)) + using (var channelSource = binding.GetWriteChannelSource(operationContext)) + using (var channel = channelSource.GetChannel(operationContext)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { var operation = CreateOperation(); - return operation.Execute(channelBinding, cancellationToken); + return operation.Execute(operationContext, channelBinding); } } /// <inheritdoc/> - public async Task<BsonDocument> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) + public async Task<BsonDocument> ExecuteAsync(OperationContext operationContext, IWriteBinding binding) { using (EventContext.BeginOperation("createSearchIndexes")) - using (var channelSource = await binding.GetWriteChannelSourceAsync(cancellationToken).ConfigureAwait(false)) - using (var channel = await channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)) + using (var channelSource = await binding.GetWriteChannelSourceAsync(operationContext).ConfigureAwait(false)) + using (var channel = await channelSource.GetChannelAsync(operationContext).ConfigureAwait(false)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { var operation = CreateOperation(); - return await operation.ExecuteAsync(channelBinding, cancellationToken).ConfigureAwait(false); + return await operation.ExecuteAsync(operationContext, channelBinding).ConfigureAwait(false); } } diff --git a/src/MongoDB.Driver/Core/Operations/CreateViewOperation.cs b/src/MongoDB.Driver/Core/Operations/CreateViewOperation.cs index cd291bfbf19..148d86a2536 100644 --- a/src/MongoDB.Driver/Core/Operations/CreateViewOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/CreateViewOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2016-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,7 +15,6 @@ using System.Collections.Generic; using System.Linq; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -87,35 +86,35 @@ public WriteConcern WriteConcern set { _writeConcern = value; } } - public BsonDocument Execute(IWriteBinding binding, CancellationToken cancellationToken) + public BsonDocument Execute(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); - using (var channelSource = binding.GetWriteChannelSource(cancellationToken)) - using (var channel = channelSource.GetChannel(cancellationToken)) + using (var channelSource = binding.GetWriteChannelSource(operationContext)) + using (var channel = channelSource.GetChannel(operationContext)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session, channel.ConnectionDescription); - return operation.Execute(channelBinding, cancellationToken); + var operation = CreateOperation(operationContext, channelBinding.Session, channel.ConnectionDescription); + return operation.Execute(operationContext, channelBinding); } } - public async Task<BsonDocument> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) + public async Task<BsonDocument> ExecuteAsync(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); - using (var channelSource = await binding.GetWriteChannelSourceAsync(cancellationToken).ConfigureAwait(false)) - using (var channel = await channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)) + using (var channelSource = await binding.GetWriteChannelSourceAsync(operationContext).ConfigureAwait(false)) + using (var channel = await channelSource.GetChannelAsync(operationContext).ConfigureAwait(false)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session, channel.ConnectionDescription); - return await operation.ExecuteAsync(channelBinding, cancellationToken).ConfigureAwait(false); + var operation = CreateOperation(operationContext, channelBinding.Session, channel.ConnectionDescription); + return await operation.ExecuteAsync(operationContext, channelBinding).ConfigureAwait(false); } } - public BsonDocument CreateCommand(ICoreSessionHandle session, ConnectionDescription connectionDescription) + public BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session, ConnectionDescription connectionDescription) { - var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(session, _writeConcern); + var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(operationContext, session, _writeConcern); return new BsonDocument { { "create", _viewName }, @@ -126,9 +125,9 @@ public BsonDocument CreateCommand(ICoreSessionHandle session, ConnectionDescript }; } - private WriteCommandOperation<BsonDocument> CreateOperation(ICoreSessionHandle session, ConnectionDescription connectionDescription) + private WriteCommandOperation<BsonDocument> CreateOperation(OperationContext operationContext, ICoreSessionHandle session, ConnectionDescription connectionDescription) { - var command = CreateCommand(session, connectionDescription); + var command = CreateCommand(operationContext, session, connectionDescription); return new WriteCommandOperation<BsonDocument>(_databaseNamespace, command, BsonDocumentSerializer.Instance, _messageEncoderSettings); } } diff --git a/src/MongoDB.Driver/Core/Operations/DatabaseExistsOperation.cs b/src/MongoDB.Driver/Core/Operations/DatabaseExistsOperation.cs index ec4b4a42e4a..9b6f4cdca4d 100644 --- a/src/MongoDB.Driver/Core/Operations/DatabaseExistsOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/DatabaseExistsOperation.cs @@ -14,7 +14,6 @@ */ using System.Linq; -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Misc; @@ -50,21 +49,23 @@ public bool RetryRequested set { _retryRequested = value; } } - public bool Execute(IReadBinding binding, CancellationToken cancellationToken) + public bool Execute(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); var operation = CreateOperation(); - var result = operation.Execute(binding, cancellationToken); - var list = result.ToList(cancellationToken); + var result = operation.Execute(operationContext, binding); + // TODO: CSOT find a way to apply CSOT timeout to ToList as well. + var list = result.ToList(operationContext.CancellationToken); return list.Any(x => x["name"] == _databaseNamespace.DatabaseName); } - public async Task<bool> ExecuteAsync(IReadBinding binding, CancellationToken cancellationToken) + public async Task<bool> ExecuteAsync(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); var operation = CreateOperation(); - var result = await operation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false); - var list = await result.ToListAsync(cancellationToken).ConfigureAwait(false); + var result = await operation.ExecuteAsync(operationContext, binding).ConfigureAwait(false); + // TODO: CSOT find a way to apply CSOT timeout to ToList as well. + var list = await result.ToListAsync(operationContext.CancellationToken).ConfigureAwait(false); return list.Any(x => x["name"] == _databaseNamespace.DatabaseName); } diff --git a/src/MongoDB.Driver/Core/Operations/DeleteOpcodeOperation.cs b/src/MongoDB.Driver/Core/Operations/DeleteOpcodeOperation.cs deleted file mode 100644 index d21c46885b9..00000000000 --- a/src/MongoDB.Driver/Core/Operations/DeleteOpcodeOperation.cs +++ /dev/null @@ -1,121 +0,0 @@ -/* Copyright 2013-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System.Threading; -using System.Threading.Tasks; -using MongoDB.Driver.Core.Bindings; -using MongoDB.Driver.Core.Events; -using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.WireProtocol.Messages.Encoders; - -namespace MongoDB.Driver.Core.Operations -{ - internal sealed class DeleteOpcodeOperation : IWriteOperation<WriteConcernResult>, IExecutableInRetryableWriteContext<WriteConcernResult> - { - private readonly CollectionNamespace _collectionNamespace; - private readonly MessageEncoderSettings _messageEncoderSettings; - private readonly DeleteRequest _request; - private bool _retryRequested; - private WriteConcern _writeConcern = WriteConcern.Acknowledged; - - public DeleteOpcodeOperation( - CollectionNamespace collectionNamespace, - DeleteRequest request, - MessageEncoderSettings messageEncoderSettings) - { - _collectionNamespace = Ensure.IsNotNull(collectionNamespace, nameof(collectionNamespace)); - _request = Ensure.IsNotNull(request, nameof(request)); - _messageEncoderSettings = messageEncoderSettings; - } - - public CollectionNamespace CollectionNamespace - { - get { return _collectionNamespace; } - } - - public DeleteRequest Request - { - get { return _request; } - } - - public MessageEncoderSettings MessageEncoderSettings - { - get { return _messageEncoderSettings; } - } - - public bool RetryRequested - { - get { return _retryRequested; } - set { _retryRequested = value; } - } - - public WriteConcern WriteConcern - { - get { return _writeConcern; } - set { _writeConcern = Ensure.IsNotNull(value, nameof(value)); } - } - - public WriteConcernResult Execute(IWriteBinding binding, CancellationToken cancellationToken) - { - Ensure.IsNotNull(binding, nameof(binding)); - - using (var context = RetryableWriteContext.Create(binding, false, cancellationToken)) - { - return Execute(context, cancellationToken); - } - } - - public WriteConcernResult Execute(RetryableWriteContext context, CancellationToken cancellationToken) - { - Ensure.IsNotNull(context, nameof(context)); - - using (EventContext.BeginOperation()) - { - var emulator = CreateEmulator(); - return emulator.Execute(context, cancellationToken); - } - } - - public async Task<WriteConcernResult> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) - { - Ensure.IsNotNull(binding, nameof(binding)); - - using (var context = await RetryableWriteContext.CreateAsync(binding, false, cancellationToken).ConfigureAwait(false)) - { - return await ExecuteAsync(context, cancellationToken).ConfigureAwait(false); - } - } - - public async Task<WriteConcernResult> ExecuteAsync(RetryableWriteContext context, CancellationToken cancellationToken) - { - Ensure.IsNotNull(context, nameof(context)); - - using (EventContext.BeginOperation()) - { - var emulator = CreateEmulator(); - return await emulator.ExecuteAsync(context, cancellationToken).ConfigureAwait(false); - } - } - - private IExecutableInRetryableWriteContext<WriteConcernResult> CreateEmulator() - { - return new DeleteOpcodeOperationEmulator(_collectionNamespace, _request, _messageEncoderSettings) - { - RetryRequested = _retryRequested, - WriteConcern = _writeConcern - }; - } - } -} diff --git a/src/MongoDB.Driver/Core/Operations/DeleteOpcodeOperationEmulator.cs b/src/MongoDB.Driver/Core/Operations/DeleteOpcodeOperationEmulator.cs deleted file mode 100644 index e325eee4b95..00000000000 --- a/src/MongoDB.Driver/Core/Operations/DeleteOpcodeOperationEmulator.cs +++ /dev/null @@ -1,144 +0,0 @@ -/* Copyright 2013-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System.Threading; -using System.Threading.Tasks; -using MongoDB.Driver.Core.Bindings; -using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.WireProtocol.Messages.Encoders; - -namespace MongoDB.Driver.Core.Operations -{ - internal class DeleteOpcodeOperationEmulator : IExecutableInRetryableWriteContext<WriteConcernResult> - { - // fields - private readonly CollectionNamespace _collectionNamespace; - private readonly DeleteRequest _request; - private readonly MessageEncoderSettings _messageEncoderSettings; - private bool _retryRequested; - private WriteConcern _writeConcern = WriteConcern.Acknowledged; - - // constructors - public DeleteOpcodeOperationEmulator( - CollectionNamespace collectionNamespace, - DeleteRequest request, - MessageEncoderSettings messageEncoderSettings) - { - _collectionNamespace = Ensure.IsNotNull(collectionNamespace, nameof(collectionNamespace)); - _request = Ensure.IsNotNull(request, nameof(request)); - _messageEncoderSettings = messageEncoderSettings; - } - - // properties - public CollectionNamespace CollectionNamespace - { - get { return _collectionNamespace; } - } - - public DeleteRequest Request - { - get { return _request; } - } - - public MessageEncoderSettings MessageEncoderSettings - { - get { return _messageEncoderSettings; } - } - - public bool RetryRequested - { - get { return _retryRequested; } - set { _retryRequested = value; } - } - - public WriteConcern WriteConcern - { - get { return _writeConcern; } - set { _writeConcern = Ensure.IsNotNull(value, nameof(value)); } - } - - // public methods - public WriteConcernResult Execute(RetryableWriteContext context, CancellationToken cancellationToken) - { - Ensure.IsNotNull(context, nameof(context)); - - var operation = CreateOperation(); - BulkWriteOperationResult result; - MongoBulkWriteOperationException exception = null; - try - { - result = operation.Execute(context, cancellationToken); - } - catch (MongoBulkWriteOperationException ex) - { - result = ex.Result; - exception = ex; - } - - return CreateResultOrThrow(context.Channel, result, exception); - } - - public async Task<WriteConcernResult> ExecuteAsync(RetryableWriteContext context, CancellationToken cancellationToken) - { - Ensure.IsNotNull(context, nameof(context)); - - var operation = CreateOperation(); - BulkWriteOperationResult result; - MongoBulkWriteOperationException exception = null; - try - { - result = await operation.ExecuteAsync(context, cancellationToken).ConfigureAwait(false); - } - catch (MongoBulkWriteOperationException ex) - { - result = ex.Result; - exception = ex; - } - - return CreateResultOrThrow(context.Channel, result, exception); - } - - // private methods - private BulkDeleteOperation CreateOperation() - { - var requests = new[] { _request }; - return new BulkDeleteOperation(_collectionNamespace, requests, _messageEncoderSettings) - { - RetryRequested = _retryRequested, - WriteConcern = _writeConcern - }; - } - - private WriteConcernResult CreateResultOrThrow(IChannelHandle channel, BulkWriteOperationResult result, MongoBulkWriteOperationException exception) - { - var converter = new BulkWriteOperationResultConverter(); - if (exception != null) - { - throw converter.ToWriteConcernException(channel.ConnectionDescription.ConnectionId, exception); - } - else - { - if (_writeConcern.IsAcknowledged) - { - return converter.ToWriteConcernResult(result); - } - else - { - return null; - } - } - } - } -} diff --git a/src/MongoDB.Driver/Core/Operations/DeleteRequest.cs b/src/MongoDB.Driver/Core/Operations/DeleteRequest.cs index 0eef8967778..78444e3f055 100644 --- a/src/MongoDB.Driver/Core/Operations/DeleteRequest.cs +++ b/src/MongoDB.Driver/Core/Operations/DeleteRequest.cs @@ -14,7 +14,6 @@ */ using MongoDB.Bson; -using MongoDB.Driver.Core.Connections; using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver.Core.Operations @@ -36,6 +35,6 @@ public DeleteRequest(BsonDocument filter) public int Limit { get; init; } // public methods - public override bool IsRetryable(ConnectionDescription connectionDescription) => Limit != 0; + public override bool IsRetryable() => Limit != 0; } } diff --git a/src/MongoDB.Driver/Core/Operations/DistinctOperation.cs b/src/MongoDB.Driver/Core/Operations/DistinctOperation.cs index 441996f2889..a64cee8a13c 100644 --- a/src/MongoDB.Driver/Core/Operations/DistinctOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/DistinctOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.IO; @@ -105,15 +104,15 @@ public IBsonSerializer<TValue> ValueSerializer get { return _valueSerializer; } } - public IAsyncCursor<TValue> Execute(IReadBinding binding, CancellationToken cancellationToken) + public IAsyncCursor<TValue> Execute(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var context = RetryableReadContext.Create(binding, _retryRequested, cancellationToken)) + using (var context = RetryableReadContext.Create(operationContext, binding, _retryRequested)) { - var operation = CreateOperation(context); - var result = operation.Execute(context, cancellationToken); + var operation = CreateOperation(operationContext, context); + var result = operation.Execute(operationContext, context); binding.Session.SetSnapshotTimeIfNeeded(result.AtClusterTime); @@ -121,15 +120,15 @@ public IAsyncCursor<TValue> Execute(IReadBinding binding, CancellationToken canc } } - public async Task<IAsyncCursor<TValue>> ExecuteAsync(IReadBinding binding, CancellationToken cancellationToken) + public async Task<IAsyncCursor<TValue>> ExecuteAsync(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var context = await RetryableReadContext.CreateAsync(binding, _retryRequested, cancellationToken).ConfigureAwait(false)) + using (var context = await RetryableReadContext.CreateAsync(operationContext, binding, _retryRequested).ConfigureAwait(false)) { - var operation = CreateOperation(context); - var result = await operation.ExecuteAsync(context, cancellationToken).ConfigureAwait(false); + var operation = CreateOperation(operationContext, context); + var result = await operation.ExecuteAsync(operationContext, context).ConfigureAwait(false); binding.Session.SetSnapshotTimeIfNeeded(result.AtClusterTime); @@ -137,7 +136,7 @@ public async Task<IAsyncCursor<TValue>> ExecuteAsync(IReadBinding binding, Cance } } - public BsonDocument CreateCommand(ConnectionDescription connectionDescription, ICoreSession session) + public BsonDocument CreateCommand(OperationContext operationContext, ICoreSession session, ConnectionDescription connectionDescription) { var readConcern = ReadConcernHelper.GetReadConcernForCommand(session, connectionDescription, _readConcern); return new BsonDocument @@ -145,7 +144,7 @@ public BsonDocument CreateCommand(ConnectionDescription connectionDescription, I { "distinct", _collectionNamespace.CollectionName }, { "key", _fieldName }, { "query", _filter, _filter != null }, - { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue }, + { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue && !operationContext.IsRootContextTimeoutConfigured() }, { "collation", () => _collation.ToBsonDocument(), _collation != null }, { "comment", _comment, _comment != null }, { "readConcern", readConcern, readConcern != null } @@ -154,9 +153,9 @@ public BsonDocument CreateCommand(ConnectionDescription connectionDescription, I private IDisposable BeginOperation() => EventContext.BeginOperation("distinct"); - private ReadCommandOperation<DistinctResult> CreateOperation(RetryableReadContext context) + private ReadCommandOperation<DistinctResult> CreateOperation(OperationContext operationContext, RetryableReadContext context) { - var command = CreateCommand(context.Channel.ConnectionDescription, context.Binding.Session); + var command = CreateCommand(operationContext, context.Binding.Session, context.Channel.ConnectionDescription); var serializer = new DistinctResultDeserializer(_valueSerializer); return new ReadCommandOperation<DistinctResult>(_collectionNamespace.DatabaseNamespace, command, serializer, _messageEncoderSettings) diff --git a/src/MongoDB.Driver/Core/Operations/DropCollectionOperation.cs b/src/MongoDB.Driver/Core/Operations/DropCollectionOperation.cs index fa7e11f8c70..8465a9f42eb 100644 --- a/src/MongoDB.Driver/Core/Operations/DropCollectionOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/DropCollectionOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -96,20 +95,20 @@ public WriteConcern WriteConcern set { _writeConcern = value; } } - public BsonDocument Execute(IWriteBinding binding, CancellationToken cancellationToken) + public BsonDocument Execute(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var channelSource = binding.GetWriteChannelSource(cancellationToken)) - using (var channel = channelSource.GetChannel(cancellationToken)) + using (var channelSource = binding.GetWriteChannelSource(operationContext)) + using (var channel = channelSource.GetChannel(operationContext)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session); + var operation = CreateOperation(operationContext, channelBinding.Session); BsonDocument result; try { - result = operation.Execute(channelBinding, cancellationToken); + result = operation.Execute(operationContext, channelBinding); } catch (MongoCommandException ex) { @@ -123,20 +122,20 @@ public BsonDocument Execute(IWriteBinding binding, CancellationToken cancellatio } } - public async Task<BsonDocument> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) + public async Task<BsonDocument> ExecuteAsync(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var channelSource = await binding.GetWriteChannelSourceAsync(cancellationToken).ConfigureAwait(false)) - using (var channel = await channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)) + using (var channelSource = await binding.GetWriteChannelSourceAsync(operationContext).ConfigureAwait(false)) + using (var channel = await channelSource.GetChannelAsync(operationContext).ConfigureAwait(false)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session); + var operation = CreateOperation(operationContext, channelBinding.Session); BsonDocument result; try { - result = await operation.ExecuteAsync(channelBinding, cancellationToken).ConfigureAwait(false); + result = await operation.ExecuteAsync(operationContext, channelBinding).ConfigureAwait(false); } catch (MongoCommandException ex) { @@ -150,9 +149,9 @@ public async Task<BsonDocument> ExecuteAsync(IWriteBinding binding, Cancellation } } - internal BsonDocument CreateCommand(ICoreSessionHandle session) + internal BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session) { - var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(session, _writeConcern); + var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(operationContext, session, _writeConcern); return new BsonDocument { { "drop", _collectionNamespace.CollectionName }, @@ -162,9 +161,9 @@ internal BsonDocument CreateCommand(ICoreSessionHandle session) private IDisposable BeginOperation() => EventContext.BeginOperation("drop"); - private WriteCommandOperation<BsonDocument> CreateOperation(ICoreSessionHandle session) + private WriteCommandOperation<BsonDocument> CreateOperation(OperationContext operationContext, ICoreSessionHandle session) { - var command = CreateCommand(session); + var command = CreateCommand(operationContext, session); return new WriteCommandOperation<BsonDocument>(_collectionNamespace.DatabaseNamespace, command, BsonDocumentSerializer.Instance, _messageEncoderSettings); } diff --git a/src/MongoDB.Driver/Core/Operations/DropDatabaseOperation.cs b/src/MongoDB.Driver/Core/Operations/DropDatabaseOperation.cs index 315a5307409..df6f9b1ac71 100644 --- a/src/MongoDB.Driver/Core/Operations/DropDatabaseOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/DropDatabaseOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -53,11 +52,11 @@ public WriteConcern WriteConcern { get { return _writeConcern; } set { _writeConcern = value; } - } - - public BsonDocument CreateCommand(ICoreSessionHandle session) + } + + public BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session) { - var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(session, _writeConcern); + var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(operationContext, session, _writeConcern); return new BsonDocument { { "dropDatabase", 1 }, @@ -65,39 +64,39 @@ public BsonDocument CreateCommand(ICoreSessionHandle session) }; } - public BsonDocument Execute(IWriteBinding binding, CancellationToken cancellationToken) + public BsonDocument Execute(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var channelSource = binding.GetWriteChannelSource(cancellationToken)) - using (var channel = channelSource.GetChannel(cancellationToken)) + using (var channelSource = binding.GetWriteChannelSource(operationContext)) + using (var channel = channelSource.GetChannel(operationContext)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session); - return operation.Execute(channelBinding, cancellationToken); + var operation = CreateOperation(operationContext, channelBinding.Session); + return operation.Execute(operationContext, channelBinding); } } - public async Task<BsonDocument> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) + public async Task<BsonDocument> ExecuteAsync(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var channelSource = await binding.GetWriteChannelSourceAsync(cancellationToken).ConfigureAwait(false)) - using (var channel = await channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)) + using (var channelSource = await binding.GetWriteChannelSourceAsync(operationContext).ConfigureAwait(false)) + using (var channel = await channelSource.GetChannelAsync(operationContext).ConfigureAwait(false)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session); - return await operation.ExecuteAsync(channelBinding, cancellationToken).ConfigureAwait(false); + var operation = CreateOperation(operationContext, channelBinding.Session); + return await operation.ExecuteAsync(operationContext, channelBinding).ConfigureAwait(false); } } private IDisposable BeginOperation() => EventContext.BeginOperation("dropDatabase"); - private WriteCommandOperation<BsonDocument> CreateOperation(ICoreSessionHandle session) + private WriteCommandOperation<BsonDocument> CreateOperation(OperationContext operationContext, ICoreSessionHandle session) { - var command = CreateCommand(session); + var command = CreateCommand(operationContext, session); return new WriteCommandOperation<BsonDocument>(_databaseNamespace, command, BsonDocumentSerializer.Instance, _messageEncoderSettings); } } diff --git a/src/MongoDB.Driver/Core/Operations/DropIndexOperation.cs b/src/MongoDB.Driver/Core/Operations/DropIndexOperation.cs index 577ed62e448..0c8e2fd8fda 100644 --- a/src/MongoDB.Driver/Core/Operations/DropIndexOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/DropIndexOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -85,33 +84,33 @@ public TimeSpan? MaxTime set { _maxTime = Ensure.IsNullOrInfiniteOrGreaterThanOrEqualToZero(value, nameof(value)); } } - public BsonDocument CreateCommand(ICoreSessionHandle session) + public BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session) { - var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(session, _writeConcern); + var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(operationContext, session, _writeConcern); return new BsonDocument { { "dropIndexes", _collectionNamespace.CollectionName }, { "index", _indexName }, - { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue }, + { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue && !operationContext.IsRootContextTimeoutConfigured() }, { "writeConcern", writeConcern, writeConcern != null }, { "comment", _comment, _comment != null } }; } - public BsonDocument Execute(IWriteBinding binding, CancellationToken cancellationToken) + public BsonDocument Execute(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var channelSource = binding.GetWriteChannelSource(cancellationToken)) - using (var channel = channelSource.GetChannel(cancellationToken)) + using (var channelSource = binding.GetWriteChannelSource(operationContext)) + using (var channel = channelSource.GetChannel(operationContext)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session); + var operation = CreateOperation(operationContext, channelBinding.Session); BsonDocument result; try { - result = operation.Execute(channelBinding, cancellationToken); + result = operation.Execute(operationContext, channelBinding); } catch (MongoCommandException ex) { @@ -125,20 +124,20 @@ public BsonDocument Execute(IWriteBinding binding, CancellationToken cancellatio } } - public async Task<BsonDocument> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) + public async Task<BsonDocument> ExecuteAsync(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var channelSource = await binding.GetWriteChannelSourceAsync(cancellationToken).ConfigureAwait(false)) - using (var channel = await channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)) + using (var channelSource = await binding.GetWriteChannelSourceAsync(operationContext).ConfigureAwait(false)) + using (var channel = await channelSource.GetChannelAsync(operationContext).ConfigureAwait(false)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session); + var operation = CreateOperation(operationContext, channelBinding.Session); BsonDocument result; try { - result = await operation.ExecuteAsync(channelBinding, cancellationToken).ConfigureAwait(false); + result = await operation.ExecuteAsync(operationContext, channelBinding).ConfigureAwait(false); } catch (MongoCommandException ex) { @@ -150,13 +149,13 @@ public async Task<BsonDocument> ExecuteAsync(IWriteBinding binding, Cancellation } return result; } - } + } private IDisposable BeginOperation() => EventContext.BeginOperation("dropIndexes"); - private WriteCommandOperation<BsonDocument> CreateOperation(ICoreSessionHandle session) + private WriteCommandOperation<BsonDocument> CreateOperation(OperationContext operationContext, ICoreSessionHandle session) { - var command = CreateCommand(session); + var command = CreateCommand(operationContext, session); return new WriteCommandOperation<BsonDocument>(_collectionNamespace.DatabaseNamespace, command, BsonDocumentSerializer.Instance, _messageEncoderSettings); } diff --git a/src/MongoDB.Driver/Core/Operations/DropSearchIndexOperation.cs b/src/MongoDB.Driver/Core/Operations/DropSearchIndexOperation.cs index 653039feb98..aff890be381 100644 --- a/src/MongoDB.Driver/Core/Operations/DropSearchIndexOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/DropSearchIndexOperation.cs @@ -13,7 +13,6 @@ * limitations under the License. */ -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -63,20 +62,20 @@ private WriteCommandOperation<BsonDocument> CreateOperation() => new(_collectionNamespace.DatabaseNamespace, CreateCommand(), BsonDocumentSerializer.Instance, _messageEncoderSettings); /// <inheritdoc/> - public BsonDocument Execute(IWriteBinding binding, CancellationToken cancellationToken) + public BsonDocument Execute(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (EventContext.BeginOperation("dropSearchIndex")) - using (var channelSource = binding.GetWriteChannelSource(cancellationToken)) - using (var channel = channelSource.GetChannel(cancellationToken)) + using (var channelSource = binding.GetWriteChannelSource(operationContext)) + using (var channel = channelSource.GetChannel(operationContext)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { var operation = CreateOperation(); try { - return operation.Execute(channelBinding, cancellationToken); + return operation.Execute(operationContext, channelBinding); } catch (MongoCommandException ex) when (ShouldIgnoreException(ex)) { @@ -86,20 +85,20 @@ public BsonDocument Execute(IWriteBinding binding, CancellationToken cancellatio } /// <inheritdoc/> - public async Task<BsonDocument> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) + public async Task<BsonDocument> ExecuteAsync(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (EventContext.BeginOperation("dropSearchIndex")) - using (var channelSource = await binding.GetWriteChannelSourceAsync(cancellationToken).ConfigureAwait(false)) - using (var channel = await channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)) + using (var channelSource = await binding.GetWriteChannelSourceAsync(operationContext).ConfigureAwait(false)) + using (var channel = await channelSource.GetChannelAsync(operationContext).ConfigureAwait(false)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { var operation = CreateOperation(); try { - return await operation.ExecuteAsync(channelBinding, cancellationToken).ConfigureAwait(false); + return await operation.ExecuteAsync(operationContext, channelBinding).ConfigureAwait(false); } catch (MongoCommandException ex) when (ShouldIgnoreException(ex)) { diff --git a/src/MongoDB.Driver/Core/Operations/EndTransactionOperation.cs b/src/MongoDB.Driver/Core/Operations/EndTransactionOperation.cs index 90e5f44c882..b4bc0e39171 100644 --- a/src/MongoDB.Driver/Core/Operations/EndTransactionOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/EndTransactionOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2018-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -51,45 +50,51 @@ public MessageEncoderSettings MessageEncoderSettings protected abstract string CommandName { get; } - public virtual BsonDocument Execute(IReadBinding binding, CancellationToken cancellationToken) + public virtual BsonDocument Execute(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); - using (var channelSource = binding.GetReadChannelSource(cancellationToken)) - using (var channel = channelSource.GetChannel(cancellationToken)) + using (var channelSource = binding.GetReadChannelSource(operationContext)) + using (var channel = channelSource.GetChannel(operationContext)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(); - return operation.Execute(channelBinding, cancellationToken); + var operation = CreateOperation(operationContext); + return operation.Execute(operationContext, channelBinding); } } - public virtual async Task<BsonDocument> ExecuteAsync(IReadBinding binding, CancellationToken cancellationToken) + public virtual async Task<BsonDocument> ExecuteAsync(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); - using (var channelSource = await binding.GetReadChannelSourceAsync(cancellationToken).ConfigureAwait(false)) - using (var channel = await channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)) + using (var channelSource = await binding.GetReadChannelSourceAsync(operationContext).ConfigureAwait(false)) + using (var channel = await channelSource.GetChannelAsync(operationContext).ConfigureAwait(false)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(); - return await operation.ExecuteAsync(channelBinding, cancellationToken).ConfigureAwait(false); + var operation = CreateOperation(operationContext); + return await operation.ExecuteAsync(operationContext, channelBinding).ConfigureAwait(false); } } - protected virtual BsonDocument CreateCommand() + protected virtual BsonDocument CreateCommand(OperationContext operationContext) { + var writeConcern = _writeConcern; + if (operationContext.IsRootContextTimeoutConfigured()) + { + writeConcern = writeConcern.With(wTimeout: null); + } + return new BsonDocument { { CommandName, 1 }, - { "writeConcern", () => _writeConcern.ToBsonDocument(), !_writeConcern.IsServerDefault }, + { "writeConcern", () => _writeConcern.ToBsonDocument(), !writeConcern.IsServerDefault }, { "recoveryToken", _recoveryToken, _recoveryToken != null } }; } - private IReadOperation<BsonDocument> CreateOperation() + private IReadOperation<BsonDocument> CreateOperation(OperationContext operationContext) { - var command = CreateCommand(); + var command = CreateCommand(operationContext); return new ReadCommandOperation<BsonDocument>(DatabaseNamespace.Admin, command, BsonDocumentSerializer.Instance, _messageEncoderSettings) { RetryRequested = false @@ -134,11 +139,11 @@ public TimeSpan? MaxCommitTime protected override string CommandName => "commitTransaction"; - public override BsonDocument Execute(IReadBinding binding, CancellationToken cancellationToken) + public override BsonDocument Execute(OperationContext operationContext, IReadBinding binding) { try { - return base.Execute(binding, cancellationToken); + return base.Execute(operationContext, binding); } catch (MongoException exception) when (ShouldReplaceTransientTransactionErrorWithUnknownTransactionCommitResult(exception)) { @@ -147,11 +152,11 @@ public override BsonDocument Execute(IReadBinding binding, CancellationToken can } } - public override async Task<BsonDocument> ExecuteAsync(IReadBinding binding, CancellationToken cancellationToken) + public override async Task<BsonDocument> ExecuteAsync(OperationContext operationContext, IReadBinding binding) { try { - return await base.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false); + return await base.ExecuteAsync(operationContext, binding).ConfigureAwait(false); } catch (MongoException exception) when (ShouldReplaceTransientTransactionErrorWithUnknownTransactionCommitResult(exception)) { @@ -160,10 +165,10 @@ public override async Task<BsonDocument> ExecuteAsync(IReadBinding binding, Canc } } - protected override BsonDocument CreateCommand() + protected override BsonDocument CreateCommand(OperationContext operationContext) { - var command = base.CreateCommand(); - if (_maxCommitTime.HasValue) + var command = base.CreateCommand(operationContext); + if (_maxCommitTime.HasValue && !operationContext.IsRootContextTimeoutConfigured()) { command.Add("maxTimeMS", (long)_maxCommitTime.Value.TotalMilliseconds); } diff --git a/src/MongoDB.Driver/Core/Operations/EstimatedDocumentCountOperation.cs b/src/MongoDB.Driver/Core/Operations/EstimatedDocumentCountOperation.cs index 27958d906f4..eac2d1a32a1 100644 --- a/src/MongoDB.Driver/Core/Operations/EstimatedDocumentCountOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/EstimatedDocumentCountOperation.cs @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Driver.Core.Bindings; @@ -67,29 +66,29 @@ public bool RetryRequested set => _retryRequested = value; } - public long Execute(IReadBinding binding, CancellationToken cancellationToken) + public long Execute(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var context = RetryableReadContext.Create(binding, _retryRequested, cancellationToken)) + using (var context = RetryableReadContext.Create(operationContext, binding, _retryRequested)) { var operation = CreateCountOperation(); - return operation.Execute(context, cancellationToken); + return operation.Execute(operationContext, context); } } - public async Task<long> ExecuteAsync(IReadBinding binding, CancellationToken cancellationToken) + public async Task<long> ExecuteAsync(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var context = RetryableReadContext.Create(binding, _retryRequested, cancellationToken)) + using (var context = RetryableReadContext.Create(operationContext, binding, _retryRequested)) { var operation = CreateCountOperation(); - return await operation.ExecuteAsync(context, cancellationToken).ConfigureAwait(false); + return await operation.ExecuteAsync(operationContext, context).ConfigureAwait(false); } } diff --git a/src/MongoDB.Driver/Core/Operations/EvalOperation.cs b/src/MongoDB.Driver/Core/Operations/EvalOperation.cs deleted file mode 100644 index 218d4b91e11..00000000000 --- a/src/MongoDB.Driver/Core/Operations/EvalOperation.cs +++ /dev/null @@ -1,113 +0,0 @@ -/* Copyright 2013-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System; -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; -using MongoDB.Bson; -using MongoDB.Bson.Serialization.Serializers; -using MongoDB.Driver.Core.Bindings; -using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.WireProtocol.Messages.Encoders; - -namespace MongoDB.Driver.Core.Operations -{ - internal sealed class EvalOperation : IWriteOperation<BsonValue> - { - private IEnumerable<BsonValue> _args; - private readonly DatabaseNamespace _databaseNamespace; - private readonly BsonJavaScript _function; - private TimeSpan? _maxTime; - private readonly MessageEncoderSettings _messageEncoderSettings; - private bool? _noLock; - - public EvalOperation( - DatabaseNamespace databaseNamespace, - BsonJavaScript function, - MessageEncoderSettings messageEncoderSettings) - { - _databaseNamespace = Ensure.IsNotNull(databaseNamespace, nameof(databaseNamespace)); - _function = Ensure.IsNotNull(function, nameof(function)); - _messageEncoderSettings = messageEncoderSettings; - } - - public IEnumerable<BsonValue> Args - { - get { return _args; } - set { _args = value; } - } - - public DatabaseNamespace DatabaseNamespace - { - get { return _databaseNamespace; } - } - - public BsonJavaScript Function - { - get { return _function; } - } - - public TimeSpan? MaxTime - { - get { return _maxTime; } - set { _maxTime = Ensure.IsNullOrInfiniteOrGreaterThanOrEqualToZero(value, nameof(value)); } - } - - public MessageEncoderSettings MessageEncoderSettings - { - get { return _messageEncoderSettings; } - } - - public bool? NoLock - { - get { return _noLock; } - set { _noLock = value; } - } - - public BsonDocument CreateCommand() - { - return new BsonDocument - { - { "$eval", _function }, - { "args", () => new BsonArray(_args), _args != null }, - { "nolock", () => _noLock.Value, _noLock.HasValue }, - { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue } - }; - } - - public BsonValue Execute(IWriteBinding binding, CancellationToken cancellationToken) - { - Ensure.IsNotNull(binding, nameof(binding)); - var operation = CreateOperation(); - var result = operation.Execute(binding, cancellationToken); - return result["retval"]; - } - - public async Task<BsonValue> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) - { - Ensure.IsNotNull(binding, nameof(binding)); - var operation = CreateOperation(); - var result = await operation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false); - return result["retval"]; - } - - private WriteCommandOperation<BsonDocument> CreateOperation() - { - var command = CreateCommand(); - return new WriteCommandOperation<BsonDocument>(_databaseNamespace, command, BsonDocumentSerializer.Instance, _messageEncoderSettings); - } - } -} diff --git a/src/MongoDB.Driver/Core/Operations/FindAndModifyOperationBase.cs b/src/MongoDB.Driver/Core/Operations/FindAndModifyOperationBase.cs index 8bad1138f40..1b346fe013f 100644 --- a/src/MongoDB.Driver/Core/Operations/FindAndModifyOperationBase.cs +++ b/src/MongoDB.Driver/Core/Operations/FindAndModifyOperationBase.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,7 +15,6 @@ using System; using System.Text; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.IO; @@ -85,39 +84,39 @@ public bool RetryRequested set { _retryRequested = value; } } - public TResult Execute(IWriteBinding binding, CancellationToken cancellationToken) + public TResult Execute(OperationContext operationContext, IWriteBinding binding) { using (BeginOperation()) { - return RetryableWriteOperationExecutor.Execute(this, binding, _retryRequested, cancellationToken); + return RetryableWriteOperationExecutor.Execute(operationContext, this, binding, _retryRequested); } } - public TResult Execute(RetryableWriteContext context, CancellationToken cancellationToken) + public TResult Execute(OperationContext operationContext, RetryableWriteContext context) { using (BeginOperation()) { - return RetryableWriteOperationExecutor.Execute(this, context, cancellationToken); + return RetryableWriteOperationExecutor.Execute(operationContext, this, context); } } - public Task<TResult> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) + public Task<TResult> ExecuteAsync(OperationContext operationContext, IWriteBinding binding) { using (BeginOperation()) - { - return RetryableWriteOperationExecutor.ExecuteAsync(this, binding, _retryRequested, cancellationToken); + { + return RetryableWriteOperationExecutor.ExecuteAsync(operationContext, this, binding, _retryRequested); } } - public Task<TResult> ExecuteAsync(RetryableWriteContext context, CancellationToken cancellationToken) + public Task<TResult> ExecuteAsync(OperationContext operationContext, RetryableWriteContext context) { using (BeginOperation()) { - return RetryableWriteOperationExecutor.ExecuteAsync(this, context, cancellationToken); + return RetryableWriteOperationExecutor.ExecuteAsync(operationContext, this, context); } } - public TResult ExecuteAttempt(RetryableWriteContext context, int attempt, long? transactionNumber, CancellationToken cancellationToken) + public TResult ExecuteAttempt(OperationContext operationContext, RetryableWriteContext context, int attempt, long? transactionNumber) { var binding = context.Binding; var channelSource = context.ChannelSource; @@ -125,15 +124,15 @@ public TResult ExecuteAttempt(RetryableWriteContext context, int attempt, long? using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session, channel.ConnectionDescription, transactionNumber); - using (var rawBsonDocument = operation.Execute(channelBinding, cancellationToken)) + var operation = CreateOperation(operationContext, channelBinding.Session, channel.ConnectionDescription, transactionNumber); + using (var rawBsonDocument = operation.Execute(operationContext, channelBinding)) { return ProcessCommandResult(channel.ConnectionDescription.ConnectionId, rawBsonDocument); } } } - public async Task<TResult> ExecuteAttemptAsync(RetryableWriteContext context, int attempt, long? transactionNumber, CancellationToken cancellationToken) + public async Task<TResult> ExecuteAttemptAsync(OperationContext operationContext, RetryableWriteContext context, int attempt, long? transactionNumber) { var binding = context.Binding; var channelSource = context.ChannelSource; @@ -141,23 +140,23 @@ public async Task<TResult> ExecuteAttemptAsync(RetryableWriteContext context, in using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session, channel.ConnectionDescription, transactionNumber); - using (var rawBsonDocument = await operation.ExecuteAsync(channelBinding, cancellationToken).ConfigureAwait(false)) + var operation = CreateOperation(operationContext, channelBinding.Session, channel.ConnectionDescription, transactionNumber); + using (var rawBsonDocument = await operation.ExecuteAsync(operationContext, channelBinding).ConfigureAwait(false)) { return ProcessCommandResult(channel.ConnectionDescription.ConnectionId, rawBsonDocument); } } - } - - public abstract BsonDocument CreateCommand(ICoreSessionHandle session, ConnectionDescription connectionDescription, long? transactionNumber); + } + + public abstract BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session, ConnectionDescription connectionDescription, long? transactionNumber); + + protected abstract IElementNameValidator GetCommandValidator(); - protected abstract IElementNameValidator GetCommandValidator(); - private IDisposable BeginOperation() => EventContext.BeginOperation("findAndModify"); - private WriteCommandOperation<RawBsonDocument> CreateOperation(ICoreSessionHandle session, ConnectionDescription connectionDescription, long? transactionNumber) + private WriteCommandOperation<RawBsonDocument> CreateOperation(OperationContext operationContext, ICoreSessionHandle session, ConnectionDescription connectionDescription, long? transactionNumber) { - var command = CreateCommand(session, connectionDescription, transactionNumber); + var command = CreateCommand(operationContext, session, connectionDescription, transactionNumber); return new WriteCommandOperation<RawBsonDocument>(_collectionNamespace.DatabaseNamespace, command, RawBsonDocumentSerializer.Instance, _messageEncoderSettings) { CommandValidator = GetCommandValidator() diff --git a/src/MongoDB.Driver/Core/Operations/FindOneAndDeleteOperation.cs b/src/MongoDB.Driver/Core/Operations/FindOneAndDeleteOperation.cs index a06756a1a6f..7af4bc6b05b 100644 --- a/src/MongoDB.Driver/Core/Operations/FindOneAndDeleteOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/FindOneAndDeleteOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -74,7 +74,7 @@ public BsonDocument Sort set { _sort = value; } } - public override BsonDocument CreateCommand(ICoreSessionHandle session, ConnectionDescription connectionDescription, long? transactionNumber) + public override BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session, ConnectionDescription connectionDescription, long? transactionNumber) { var wireVersion = connectionDescription.MaxWireVersion; FindProjectionChecker.ThrowIfAggregationExpressionIsUsedWhenNotSupported(_projection, wireVersion); @@ -87,7 +87,7 @@ public override BsonDocument CreateCommand(ICoreSessionHandle session, Connectio } } - var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(session, WriteConcern); + var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(operationContext, session, WriteConcern); return new BsonDocument { { "findAndModify", CollectionNamespace.CollectionName }, @@ -95,7 +95,7 @@ public override BsonDocument CreateCommand(ICoreSessionHandle session, Connectio { "remove", true }, { "sort", _sort, _sort != null }, { "fields", _projection, _projection != null }, - { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue }, + { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue && !operationContext.IsRootContextTimeoutConfigured() }, { "writeConcern", writeConcern, writeConcern != null }, { "collation", () => Collation.ToBsonDocument(), Collation != null }, { "comment", Comment, Comment != null }, diff --git a/src/MongoDB.Driver/Core/Operations/FindOneAndReplaceOperation.cs b/src/MongoDB.Driver/Core/Operations/FindOneAndReplaceOperation.cs index f7aaa59e809..6ddb3a9df04 100644 --- a/src/MongoDB.Driver/Core/Operations/FindOneAndReplaceOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/FindOneAndReplaceOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -103,7 +103,7 @@ public BsonDocument Sort set { _sort = value; } } - public override BsonDocument CreateCommand(ICoreSessionHandle session, ConnectionDescription connectionDescription, long? transactionNumber) + public override BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session, ConnectionDescription connectionDescription, long? transactionNumber) { var wireVersion = connectionDescription.MaxWireVersion; FindProjectionChecker.ThrowIfAggregationExpressionIsUsedWhenNotSupported(_projection, wireVersion); @@ -116,7 +116,7 @@ public override BsonDocument CreateCommand(ICoreSessionHandle session, Connectio } } - var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(session, WriteConcern); + var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(operationContext, session, WriteConcern); return new BsonDocument { { "findAndModify", CollectionNamespace.CollectionName }, @@ -126,7 +126,7 @@ public override BsonDocument CreateCommand(ICoreSessionHandle session, Connectio { "sort", _sort, _sort != null }, { "fields", _projection, _projection != null }, { "upsert", true, _isUpsert }, - { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue }, + { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue && !operationContext.IsRootContextTimeoutConfigured() }, { "writeConcern", writeConcern, writeConcern != null }, { "bypassDocumentValidation", () => _bypassDocumentValidation.Value, _bypassDocumentValidation.HasValue }, { "collation", () => Collation.ToBsonDocument(), Collation != null }, diff --git a/src/MongoDB.Driver/Core/Operations/FindOneAndUpdateOperation.cs b/src/MongoDB.Driver/Core/Operations/FindOneAndUpdateOperation.cs index 42f1e1702d2..7c4281b74f6 100644 --- a/src/MongoDB.Driver/Core/Operations/FindOneAndUpdateOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/FindOneAndUpdateOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -112,7 +112,7 @@ public BsonValue Update get { return _update; } } - public override BsonDocument CreateCommand(ICoreSessionHandle session, ConnectionDescription connectionDescription, long? transactionNumber) + public override BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session, ConnectionDescription connectionDescription, long? transactionNumber) { var wireVersion = connectionDescription.MaxWireVersion; FindProjectionChecker.ThrowIfAggregationExpressionIsUsedWhenNotSupported(_projection, wireVersion); @@ -125,7 +125,7 @@ public override BsonDocument CreateCommand(ICoreSessionHandle session, Connectio } } - var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(session, WriteConcern); + var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(operationContext, session, WriteConcern); return new BsonDocument { { "findAndModify", CollectionNamespace.CollectionName }, @@ -135,7 +135,7 @@ public override BsonDocument CreateCommand(ICoreSessionHandle session, Connectio { "sort", _sort, _sort != null }, { "fields", _projection, _projection != null }, { "upsert", true, _isUpsert }, - { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue }, + { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue && !operationContext.IsRootContextTimeoutConfigured() }, { "writeConcern", writeConcern, writeConcern != null }, { "bypassDocumentValidation", () => _bypassDocumentValidation.Value, _bypassDocumentValidation.HasValue }, { "collation", () => Collation.ToBsonDocument(), Collation != null }, diff --git a/src/MongoDB.Driver/Core/Operations/FindOperation.cs b/src/MongoDB.Driver/Core/Operations/FindOperation.cs index 236476cf4c2..82fa1a31c9f 100644 --- a/src/MongoDB.Driver/Core/Operations/FindOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/FindOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2015-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization; @@ -47,7 +46,6 @@ internal sealed class FindOperation<TDocument> : IReadOperation<IAsyncCursor<TDo private BsonValue _comment; private CursorType _cursorType; private BsonDocument _filter; - private int? _firstBatchSize; private BsonValue _hint; private BsonDocument _let; private int? _limit; @@ -126,12 +124,6 @@ public BsonDocument Filter set { _filter = value; } } - public int? FirstBatchSize - { - get { return _firstBatchSize; } - set { _firstBatchSize = Ensure.IsNullOrGreaterThanOrEqualToZero(value, nameof(value)); } - } - public BsonValue Hint { get { return _hint; } @@ -245,41 +237,37 @@ public BsonDocument Sort set { _sort = value; } } - public BsonDocument CreateCommand(ConnectionDescription connectionDescription, ICoreSession session) + public BsonDocument CreateCommand(OperationContext operationContext, ICoreSession session, ConnectionDescription connectionDescription) { var wireVersion = connectionDescription.MaxWireVersion; FindProjectionChecker.ThrowIfAggregationExpressionIsUsedWhenNotSupported(_projection, wireVersion); - var firstBatchSize = _firstBatchSize ?? (_batchSize > 0 ? _batchSize : null); - var isShardRouter = connectionDescription.HelloResult.ServerType == ServerType.ShardRouter; - - var effectiveComment = _comment; - var effectiveHint = _hint; - var effectiveMax = _max; - var effectiveMaxTime = _maxTime; - var effectiveMin = _min; - var effectiveReturnKey = _returnKey; - var effectiveShowRecordId = _showRecordId; - var effectiveSort = _sort; + var batchSize = _batchSize; + // https://github.com/mongodb/specifications/blob/668992950d975d3163e538849dd20383a214fc37/source/crud/crud.md?plain=1#L803 + if (batchSize.HasValue && batchSize == _limit) + { + batchSize = _limit + 1; + } + var isShardRouter = connectionDescription.HelloResult.ServerType == ServerType.ShardRouter; var readConcern = ReadConcernHelper.GetReadConcernForCommand(session, connectionDescription, _readConcern); return new BsonDocument { { "find", _collectionNamespace.CollectionName }, { "filter", _filter, _filter != null }, - { "sort", effectiveSort, effectiveSort != null }, + { "sort", _sort, _sort != null }, { "projection", _projection, _projection != null }, - { "hint", effectiveHint, effectiveHint != null }, + { "hint", _hint, _hint != null }, { "skip", () => _skip.Value, _skip.HasValue }, { "limit", () => Math.Abs(_limit.Value), _limit.HasValue && _limit != 0 }, - { "batchSize", () => firstBatchSize.Value, firstBatchSize.HasValue }, + { "batchSize", () => batchSize.Value, batchSize.HasValue && batchSize > 0 }, { "singleBatch", () => _limit < 0 || _singleBatch.Value, _limit < 0 || _singleBatch.HasValue }, - { "comment", effectiveComment, effectiveComment != null }, - { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(effectiveMaxTime.Value), effectiveMaxTime.HasValue }, - { "max", effectiveMax, effectiveMax != null }, - { "min", effectiveMin, effectiveMin != null }, - { "returnKey", () => effectiveReturnKey.Value, effectiveReturnKey.HasValue }, - { "showRecordId", () => effectiveShowRecordId.Value, effectiveShowRecordId.HasValue }, + { "comment", _comment, _comment != null }, + { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue && !operationContext.IsRootContextTimeoutConfigured() }, + { "max", _max, _max != null }, + { "min", _min, _min != null }, + { "returnKey", () => _returnKey.Value, _returnKey.HasValue }, + { "showRecordId", () => _showRecordId.Value, _showRecordId.HasValue }, { "tailable", true, _cursorType == CursorType.Tailable || _cursorType == CursorType.TailableAwait }, { "oplogReplay", () => _oplogReplay.Value, _oplogReplay.HasValue }, { "noCursorTimeout", () => _noCursorTimeout.Value, _noCursorTimeout.HasValue }, @@ -292,48 +280,48 @@ public BsonDocument CreateCommand(ConnectionDescription connectionDescription, I }; } - public IAsyncCursor<TDocument> Execute(IReadBinding binding, CancellationToken cancellationToken = default(CancellationToken)) + public IAsyncCursor<TDocument> Execute(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var context = RetryableReadContext.Create(binding, _retryRequested, cancellationToken)) + using (var context = RetryableReadContext.Create(operationContext, binding, _retryRequested)) { - return Execute(context, cancellationToken); + return Execute(operationContext, context); } } - public IAsyncCursor<TDocument> Execute(RetryableReadContext context, CancellationToken cancellationToken = default(CancellationToken)) + public IAsyncCursor<TDocument> Execute(OperationContext operationContext, RetryableReadContext context) { Ensure.IsNotNull(context, nameof(context)); using (EventContext.BeginFind(_batchSize, _limit)) { - var operation = CreateOperation(context); - var commandResult = operation.Execute(context, cancellationToken); + var operation = CreateOperation(operationContext, context); + var commandResult = operation.Execute(operationContext, context); return CreateCursor(context.ChannelSource, context.Channel, commandResult); } } - public async Task<IAsyncCursor<TDocument>> ExecuteAsync(IReadBinding binding, CancellationToken cancellationToken = default(CancellationToken)) + public async Task<IAsyncCursor<TDocument>> ExecuteAsync(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var context = await RetryableReadContext.CreateAsync(binding, _retryRequested, cancellationToken).ConfigureAwait(false)) + using (var context = await RetryableReadContext.CreateAsync(operationContext, binding, _retryRequested).ConfigureAwait(false)) { - return await ExecuteAsync(context, cancellationToken).ConfigureAwait(false); + return await ExecuteAsync(operationContext, context).ConfigureAwait(false); } } - public async Task<IAsyncCursor<TDocument>> ExecuteAsync(RetryableReadContext context, CancellationToken cancellationToken = default(CancellationToken)) + public async Task<IAsyncCursor<TDocument>> ExecuteAsync(OperationContext operationContext, RetryableReadContext context) { Ensure.IsNotNull(context, nameof(context)); using (EventContext.BeginFind(_batchSize, _limit)) { - var operation = CreateOperation(context); - var commandResult = await operation.ExecuteAsync(context, cancellationToken).ConfigureAwait(false); + var operation = CreateOperation(operationContext, context); + var commandResult = await operation.ExecuteAsync(operationContext, context).ConfigureAwait(false); return CreateCursor(context.ChannelSource, context.Channel, commandResult); } } @@ -377,9 +365,9 @@ private CursorBatch<TDocument> CreateFirstCursorBatch(BsonDocument cursorDocumen private IDisposable BeginOperation() => EventContext.BeginOperation(null, "find"); - private ReadCommandOperation<BsonDocument> CreateOperation(RetryableReadContext context) + private ReadCommandOperation<BsonDocument> CreateOperation(OperationContext operationContext, RetryableReadContext context) { - var command = CreateCommand(context.Channel.ConnectionDescription, context.Binding.Session); + var command = CreateCommand(operationContext, context.Binding.Session, context.Channel.ConnectionDescription); var operation = new ReadCommandOperation<BsonDocument>( _collectionNamespace.DatabaseNamespace, command, diff --git a/src/MongoDB.Driver/Core/Operations/GroupOperation.cs b/src/MongoDB.Driver/Core/Operations/GroupOperation.cs deleted file mode 100644 index 7ae72c2444b..00000000000 --- a/src/MongoDB.Driver/Core/Operations/GroupOperation.cs +++ /dev/null @@ -1,178 +0,0 @@ -/* Copyright 2013-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System; -using System.Collections.Generic; -using System.Threading; -using System.Threading.Tasks; -using MongoDB.Bson; -using MongoDB.Bson.Serialization; -using MongoDB.Bson.Serialization.Serializers; -using MongoDB.Driver.Core.Bindings; -using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.WireProtocol.Messages.Encoders; - -namespace MongoDB.Driver.Core.Operations -{ - internal sealed class GroupOperation<TResult> : IReadOperation<IEnumerable<TResult>> - { - private Collation _collation; - private readonly CollectionNamespace _collectionNamespace; - private readonly BsonDocument _filter; - private BsonJavaScript _finalizeFunction; - private readonly BsonDocument _initial; - private readonly BsonDocument _key; - private readonly BsonJavaScript _keyFunction; - private TimeSpan? _maxTime; - private readonly MessageEncoderSettings _messageEncoderSettings; - private readonly BsonJavaScript _reduceFunction; - private IBsonSerializer<TResult> _resultSerializer; - - public GroupOperation(CollectionNamespace collectionNamespace, BsonDocument key, BsonDocument initial, BsonJavaScript reduceFunction, BsonDocument filter, MessageEncoderSettings messageEncoderSettings) - { - _collectionNamespace = Ensure.IsNotNull(collectionNamespace, nameof(collectionNamespace)); - _key = Ensure.IsNotNull(key, nameof(key)); - _initial = Ensure.IsNotNull(initial, nameof(initial)); - _reduceFunction = Ensure.IsNotNull(reduceFunction, nameof(reduceFunction)); - _filter = filter; // can be null - _messageEncoderSettings = messageEncoderSettings; - } - - public GroupOperation(CollectionNamespace collectionNamespace, BsonJavaScript keyFunction, BsonDocument initial, BsonJavaScript reduceFunction, BsonDocument filter, MessageEncoderSettings messageEncoderSettings) - { - _collectionNamespace = Ensure.IsNotNull(collectionNamespace, nameof(collectionNamespace)); - _keyFunction = Ensure.IsNotNull(keyFunction, nameof(keyFunction)); - _initial = Ensure.IsNotNull(initial, nameof(initial)); - _reduceFunction = Ensure.IsNotNull(reduceFunction, nameof(reduceFunction)); - _filter = filter; - _messageEncoderSettings = messageEncoderSettings; - } - - public Collation Collation - { - get { return _collation; } - set { _collation = value; } - } - - public CollectionNamespace CollectionNamespace - { - get { return _collectionNamespace; } - } - - public BsonDocument Filter - { - get { return _filter; } - } - - public BsonJavaScript FinalizeFunction - { - get { return _finalizeFunction; } - set { _finalizeFunction = value; } - } - - public BsonDocument Initial - { - get { return _initial; } - } - - public BsonDocument Key - { - get { return _key; } - } - - public BsonJavaScript KeyFunction - { - get { return _keyFunction; } - } - - public TimeSpan? MaxTime - { - get { return _maxTime; } - set { _maxTime = Ensure.IsNullOrInfiniteOrGreaterThanOrEqualToZero(value, nameof(value)); } - } - - public MessageEncoderSettings MessageEncoderSettings - { - get { return _messageEncoderSettings; } - } - - public BsonJavaScript ReduceFunction - { - get { return _reduceFunction; } - } - - public IBsonSerializer<TResult> ResultSerializer - { - get { return _resultSerializer; } - set { _resultSerializer = value; } - } - - public BsonDocument CreateCommand() - { - return new BsonDocument - { - { "group", new BsonDocument - { - { "ns", _collectionNamespace.CollectionName }, - { "key", _key, _key != null }, - { "$keyf", _keyFunction, _keyFunction != null }, - { "$reduce", _reduceFunction }, - { "initial", _initial }, - { "cond", _filter, _filter != null }, - { "finalize", _finalizeFunction, _finalizeFunction != null }, - { "collation", () => _collation.ToBsonDocument(), _collation != null } - } - }, - { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue } - }; - } - - public IEnumerable<TResult> Execute(IReadBinding binding, CancellationToken cancellationToken) - { - Ensure.IsNotNull(binding, nameof(binding)); - using (var channelSource = binding.GetReadChannelSource(cancellationToken)) - using (var channel = channelSource.GetChannel(cancellationToken)) - using (var channelBinding = new ChannelReadBinding(channelSource.Server, channel, binding.ReadPreference, binding.Session.Fork())) - { - var operation = CreateOperation(); - return operation.Execute(channelBinding, cancellationToken); - } - } - - public async Task<IEnumerable<TResult>> ExecuteAsync(IReadBinding binding, CancellationToken cancellationToken) - { - Ensure.IsNotNull(binding, nameof(binding)); - using (var channelSource = await binding.GetReadChannelSourceAsync(cancellationToken).ConfigureAwait(false)) - using (var channel = await channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)) - using (var channelBinding = new ChannelReadBinding(channelSource.Server, channel, binding.ReadPreference, binding.Session.Fork())) - { - var operation = CreateOperation(); - return await operation.ExecuteAsync(channelBinding, cancellationToken).ConfigureAwait(false); - } - } - - private ReadCommandOperation<TResult[]> CreateOperation() - { - var command = CreateCommand(); - var resultSerializer = _resultSerializer ?? BsonSerializer.LookupSerializer<TResult>(); - var resultArraySerializer = new ArraySerializer<TResult>(resultSerializer); - var commandResultSerializer = new ElementDeserializer<TResult[]>("retval", resultArraySerializer); - return new ReadCommandOperation<TResult[]>(_collectionNamespace.DatabaseNamespace, command, commandResultSerializer, _messageEncoderSettings) - { - RetryRequested = false - }; - } - } -} diff --git a/src/MongoDB.Driver/Core/Operations/IOperation.cs b/src/MongoDB.Driver/Core/Operations/IOperation.cs index 06fa1fbabbf..1ccd8dcb2ad 100644 --- a/src/MongoDB.Driver/Core/Operations/IOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/IOperation.cs @@ -13,7 +13,6 @@ * limitations under the License. */ -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Bindings; @@ -21,13 +20,13 @@ namespace MongoDB.Driver.Core.Operations { internal interface IReadOperation<TResult> { - TResult Execute(IReadBinding binding, CancellationToken cancellationToken); - Task<TResult> ExecuteAsync(IReadBinding binding, CancellationToken cancellationToken); + TResult Execute(OperationContext operationContext, IReadBinding binding); + Task<TResult> ExecuteAsync(OperationContext operationContext, IReadBinding binding); } internal interface IWriteOperation<TResult> { - TResult Execute(IWriteBinding binding, CancellationToken cancellationToken); - Task<TResult> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken); + TResult Execute(OperationContext operationContext, IWriteBinding binding); + Task<TResult> ExecuteAsync(OperationContext operationContext, IWriteBinding binding); } } diff --git a/src/MongoDB.Driver/Core/Operations/IRetryableOperation.cs b/src/MongoDB.Driver/Core/Operations/IRetryableOperation.cs index efa510c1553..d29a8948477 100644 --- a/src/MongoDB.Driver/Core/Operations/IRetryableOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/IRetryableOperation.cs @@ -13,34 +13,33 @@ * limitations under the License. */ -using System.Threading; using System.Threading.Tasks; namespace MongoDB.Driver.Core.Operations { internal interface IExecutableInRetryableReadContext<TResult> { - TResult Execute(RetryableReadContext context, CancellationToken cancellationToken); - Task<TResult> ExecuteAsync(RetryableReadContext context, CancellationToken cancellationToken); + TResult Execute(OperationContext operationContext, RetryableReadContext context); + Task<TResult> ExecuteAsync(OperationContext operationContext, RetryableReadContext context); } internal interface IExecutableInRetryableWriteContext<TResult> { - TResult Execute(RetryableWriteContext context, CancellationToken cancellationToken); - Task<TResult> ExecuteAsync(RetryableWriteContext context, CancellationToken cancellationToken); + TResult Execute(OperationContext operationContext, RetryableWriteContext context); + Task<TResult> ExecuteAsync(OperationContext operationContext, RetryableWriteContext context); } internal interface IRetryableReadOperation<TResult> : IExecutableInRetryableReadContext<TResult> { - TResult ExecuteAttempt(RetryableReadContext context, int attempt, long? transactionNumber, CancellationToken cancellationToken); - Task<TResult> ExecuteAttemptAsync(RetryableReadContext context, int attempt, long? transactionNumber, CancellationToken cancellationToken); + TResult ExecuteAttempt(OperationContext operationContext, RetryableReadContext context, int attempt, long? transactionNumber); + Task<TResult> ExecuteAttemptAsync(OperationContext operationContext, RetryableReadContext context, int attempt, long? transactionNumber); } internal interface IRetryableWriteOperation<TResult> : IExecutableInRetryableWriteContext<TResult> { WriteConcern WriteConcern { get; } - TResult ExecuteAttempt(RetryableWriteContext context, int attempt, long? transactionNumber, CancellationToken cancellationToken); - Task<TResult> ExecuteAttemptAsync(RetryableWriteContext context, int attempt, long? transactionNumber, CancellationToken cancellationToken); + TResult ExecuteAttempt(OperationContext operationContext, RetryableWriteContext context, int attempt, long? transactionNumber); + Task<TResult> ExecuteAttemptAsync(OperationContext operationContext, RetryableWriteContext context, int attempt, long? transactionNumber); } } diff --git a/src/MongoDB.Driver/Core/Operations/InsertOpcodeOperation.cs b/src/MongoDB.Driver/Core/Operations/InsertOpcodeOperation.cs deleted file mode 100644 index 71316bccbff..00000000000 --- a/src/MongoDB.Driver/Core/Operations/InsertOpcodeOperation.cs +++ /dev/null @@ -1,161 +0,0 @@ -/* Copyright 2013-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System.Collections.Generic; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; -using MongoDB.Bson.Serialization; -using MongoDB.Driver.Core.Bindings; -using MongoDB.Driver.Core.Events; -using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.WireProtocol.Messages.Encoders; - -namespace MongoDB.Driver.Core.Operations -{ - internal sealed class InsertOpcodeOperation<TDocument> : IWriteOperation<IEnumerable<WriteConcernResult>> - { - private bool? _bypassDocumentValidation; - private readonly CollectionNamespace _collectionNamespace; - private bool _continueOnError; - private readonly IReadOnlyList<TDocument> _documents; - private readonly BatchableSource<TDocument> _documentSource; - private int? _maxBatchCount; - private int? _maxDocumentSize; - private int? _maxMessageSize; - private readonly MessageEncoderSettings _messageEncoderSettings; - private bool _retryRequested; - private readonly IBsonSerializer<TDocument> _serializer; - private WriteConcern _writeConcern; - - public InsertOpcodeOperation(CollectionNamespace collectionNamespace, IEnumerable<TDocument> documents, IBsonSerializer<TDocument> serializer, MessageEncoderSettings messageEncoderSettings) - { - _collectionNamespace = Ensure.IsNotNull(collectionNamespace, nameof(collectionNamespace)); - _documents = Ensure.IsNotNull(documents, nameof(documents)).ToList(); - _serializer = Ensure.IsNotNull(serializer, nameof(serializer)); - _messageEncoderSettings = Ensure.IsNotNull(messageEncoderSettings, nameof(messageEncoderSettings)); - _writeConcern = WriteConcern.Acknowledged; - - _documentSource = new BatchableSource<TDocument>(_documents, canBeSplit: true); - } - - public bool? BypassDocumentValidation - { - get { return _bypassDocumentValidation; } - set { _bypassDocumentValidation = value; } - } - - public CollectionNamespace CollectionNamespace - { - get { return _collectionNamespace; } - } - - public bool ContinueOnError - { - get { return _continueOnError; } - set { _continueOnError = value; } - } - - public IReadOnlyList<TDocument> Documents - { - get { return _documents; } - } - - public BatchableSource<TDocument> DocumentSource - { - get { return _documentSource; } - } - - public int? MaxBatchCount - { - get { return _maxBatchCount; } - set { _maxBatchCount = Ensure.IsNullOrGreaterThanZero(value, nameof(value)); } - } - - public int? MaxDocumentSize - { - get { return _maxDocumentSize; } - set { _maxDocumentSize = Ensure.IsNullOrGreaterThanZero(value, nameof(value)); } - } - - public int? MaxMessageSize - { - get { return _maxMessageSize; } - set { _maxMessageSize = Ensure.IsNullOrGreaterThanZero(value, nameof(value)); } - } - - public MessageEncoderSettings MessageEncoderSettings - { - get { return _messageEncoderSettings; } - } - - public bool RetryRequested - { - get { return _retryRequested; } - set { _retryRequested = value; } - } - - public IBsonSerializer<TDocument> Serializer - { - get { return _serializer; } - } - - public WriteConcern WriteConcern - { - get { return _writeConcern; } - set { _writeConcern = Ensure.IsNotNull(value, nameof(value)); } - } - - public IEnumerable<WriteConcernResult> Execute(IWriteBinding binding, CancellationToken cancellationToken) - { - Ensure.IsNotNull(binding, nameof(binding)); - - using (EventContext.BeginOperation()) - using (var context = RetryableWriteContext.Create(binding, false, cancellationToken)) - { - var emulator = CreateEmulator(); - var result = emulator.Execute(context, cancellationToken); - return result != null ? new[] { result } : null; - } - } - - public async Task<IEnumerable<WriteConcernResult>> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) - { - Ensure.IsNotNull(binding, nameof(binding)); - - using (EventContext.BeginOperation()) - using (var context = await RetryableWriteContext.CreateAsync(binding, false, cancellationToken).ConfigureAwait(false)) - { - var emulator = CreateEmulator(); - var result = await emulator.ExecuteAsync(context, cancellationToken).ConfigureAwait(false); - return result != null ? new[] { result } : null; - } - } - - private InsertOpcodeOperationEmulator<TDocument> CreateEmulator() - { - return new InsertOpcodeOperationEmulator<TDocument>(_collectionNamespace, _serializer, _documentSource, _messageEncoderSettings) - { - BypassDocumentValidation = _bypassDocumentValidation, - ContinueOnError = _continueOnError, - MaxBatchCount = _maxBatchCount, - MaxDocumentSize = _maxDocumentSize, - MaxMessageSize = _maxMessageSize, - RetryRequested = _retryRequested, - WriteConcern = _writeConcern - }; - } - } -} diff --git a/src/MongoDB.Driver/Core/Operations/InsertOpcodeOperationEmulator.cs b/src/MongoDB.Driver/Core/Operations/InsertOpcodeOperationEmulator.cs deleted file mode 100644 index 7b22092b54e..00000000000 --- a/src/MongoDB.Driver/Core/Operations/InsertOpcodeOperationEmulator.cs +++ /dev/null @@ -1,203 +0,0 @@ -/* Copyright 2013-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; -using MongoDB.Bson; -using MongoDB.Bson.Serialization; -using MongoDB.Driver.Core.Bindings; -using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.WireProtocol.Messages.Encoders; - -namespace MongoDB.Driver.Core.Operations -{ - internal class InsertOpcodeOperationEmulator<TDocument> : IExecutableInRetryableWriteContext<WriteConcernResult> - { - // fields - private bool? _bypassDocumentValidation; - private readonly CollectionNamespace _collectionNamespace; - private bool _continueOnError; - private readonly BatchableSource<TDocument> _documentSource; - private int? _maxBatchCount; - private int? _maxDocumentSize; - private int? _maxMessageSize; - private readonly MessageEncoderSettings _messageEncoderSettings; - private bool _retryRequested; - private readonly IBsonSerializer<TDocument> _serializer; - private WriteConcern _writeConcern = WriteConcern.Acknowledged; - - // constructors - public InsertOpcodeOperationEmulator( - CollectionNamespace collectionNamespace, - IBsonSerializer<TDocument> serializer, - BatchableSource<TDocument> documentSource, - MessageEncoderSettings messageEncoderSettings) - { - _collectionNamespace = Ensure.IsNotNull(collectionNamespace, nameof(collectionNamespace)); - _serializer = Ensure.IsNotNull(serializer, nameof(serializer)); - _documentSource = Ensure.IsNotNull(documentSource, nameof(documentSource)); - _messageEncoderSettings = messageEncoderSettings; - - if (documentSource.Items.Skip(documentSource.Offset).Take(documentSource.Count).Any(d => d == null)) - { - throw new ArgumentException("Batch contains one or more null documents."); - } - } - - // properties - public bool? BypassDocumentValidation - { - get { return _bypassDocumentValidation; } - set { _bypassDocumentValidation = value; } - } - - public CollectionNamespace CollectionNamespace - { - get { return _collectionNamespace; } - } - - public bool ContinueOnError - { - get { return _continueOnError; } - set { _continueOnError = value; } - } - - public BatchableSource<TDocument> DocumentSource - { - get { return _documentSource; } - } - - public int? MaxBatchCount - { - get { return _maxBatchCount; } - set { _maxBatchCount = Ensure.IsNullOrGreaterThanZero(value, nameof(value)); } - } - - public int? MaxDocumentSize - { - get { return _maxDocumentSize; } - set { _maxDocumentSize = Ensure.IsNullOrGreaterThanZero(value, nameof(value)); } - } - - public int? MaxMessageSize - { - get { return _maxMessageSize; } - set { _maxMessageSize = Ensure.IsNullOrGreaterThanZero(value, nameof(value)); } - } - - public MessageEncoderSettings MessageEncoderSettings - { - get { return _messageEncoderSettings; } - } - - public bool RetryRequested - { - get { return _retryRequested; } - set { _retryRequested = value; } - } - - public IBsonSerializer<TDocument> Serializer - { - get { return _serializer; } - } - - public WriteConcern WriteConcern - { - get { return _writeConcern; } - set { _writeConcern = Ensure.IsNotNull(value, nameof(value)); } - } - - // public methods - public WriteConcernResult Execute(RetryableWriteContext context, CancellationToken cancellationToken) - { - Ensure.IsNotNull(context, nameof(context)); - - var operation = CreateOperation(); - BulkWriteOperationResult result; - MongoBulkWriteOperationException exception = null; - try - { - result = operation.Execute(context, cancellationToken); - } - catch (MongoBulkWriteOperationException ex) - { - result = ex.Result; - exception = ex; - } - - return CreateResultOrThrow(context.Channel, result, exception); - } - - public async Task<WriteConcernResult> ExecuteAsync(RetryableWriteContext context, CancellationToken cancellationToken) - { - Ensure.IsNotNull(context, nameof(context)); - - var operation = CreateOperation(); - BulkWriteOperationResult result; - MongoBulkWriteOperationException exception = null; - try - { - result = await operation.ExecuteAsync(context, cancellationToken).ConfigureAwait(false); - } - catch (MongoBulkWriteOperationException ex) - { - result = ex.Result; - exception = ex; - } - - return CreateResultOrThrow(context.Channel, result, exception); - } - - // private methods - private BulkInsertOperation CreateOperation() - { - var requests = _documentSource.GetBatchItems().Select(d => new InsertRequest(new BsonDocumentWrapper(d, _serializer))); - - return new BulkInsertOperation(_collectionNamespace, requests, _messageEncoderSettings) - { - BypassDocumentValidation = _bypassDocumentValidation, - IsOrdered = !_continueOnError, - MaxBatchCount = _maxBatchCount, - MaxBatchLength = _maxMessageSize, - // ReaderSettings = ? - RetryRequested = _retryRequested, - WriteConcern = _writeConcern, - // WriteSettings = ? - }; - } - - private WriteConcernResult CreateResultOrThrow(IChannel channel, BulkWriteOperationResult result, MongoBulkWriteOperationException exception) - { - var converter = new BulkWriteOperationResultConverter(); - if (exception != null) - { - throw converter.ToWriteConcernException(channel.ConnectionDescription.ConnectionId, exception); - } - else - { - if (_writeConcern.IsAcknowledged) - { - return converter.ToWriteConcernResult(result); - } - else - { - return null; - } - } - } - } -} diff --git a/src/MongoDB.Driver/Core/Operations/InsertRequest.cs b/src/MongoDB.Driver/Core/Operations/InsertRequest.cs index 90a3ef331ff..28c2c820f9b 100644 --- a/src/MongoDB.Driver/Core/Operations/InsertRequest.cs +++ b/src/MongoDB.Driver/Core/Operations/InsertRequest.cs @@ -14,7 +14,6 @@ */ using MongoDB.Bson; -using MongoDB.Driver.Core.Connections; using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver.Core.Operations @@ -32,6 +31,6 @@ public InsertRequest(BsonDocument document) public BsonDocument Document { get; } // public methods - public override bool IsRetryable(ConnectionDescription connectionDescription) => true; + public override bool IsRetryable() => true; } } diff --git a/src/MongoDB.Driver/Core/Operations/ListCollectionsOperation.cs b/src/MongoDB.Driver/Core/Operations/ListCollectionsOperation.cs index e25b97bbbd7..320c1c3ea71 100644 --- a/src/MongoDB.Driver/Core/Operations/ListCollectionsOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/ListCollectionsOperation.cs @@ -15,7 +15,6 @@ using System; using System.Linq; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -91,52 +90,52 @@ public bool RetryRequested set => _retryRequested = value; } - public IAsyncCursor<BsonDocument> Execute(IReadBinding binding, CancellationToken cancellationToken) + public IAsyncCursor<BsonDocument> Execute(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) { - using (var context = RetryableReadContext.Create(binding, _retryRequested, cancellationToken)) + using (var context = RetryableReadContext.Create(operationContext, binding, _retryRequested)) { - return Execute(context, cancellationToken); + return Execute(operationContext, context); } } } - public IAsyncCursor<BsonDocument> Execute(RetryableReadContext context, CancellationToken cancellationToken) + public IAsyncCursor<BsonDocument> Execute(OperationContext operationContext, RetryableReadContext context) { Ensure.IsNotNull(context, nameof(context)); using (BeginOperation()) { var operation = CreateOperation(); - var result = operation.Execute(context, cancellationToken); + var result = operation.Execute(operationContext, context); return CreateCursor(context.ChannelSource, context.Channel, result); } } - public async Task<IAsyncCursor<BsonDocument>> ExecuteAsync(IReadBinding binding, CancellationToken cancellationToken) + public async Task<IAsyncCursor<BsonDocument>> ExecuteAsync(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) { - using (var context = await RetryableReadContext.CreateAsync(binding, _retryRequested, cancellationToken).ConfigureAwait(false)) + using (var context = await RetryableReadContext.CreateAsync(operationContext, binding, _retryRequested).ConfigureAwait(false)) { - return await ExecuteAsync(context, cancellationToken).ConfigureAwait(false); + return await ExecuteAsync(operationContext, context).ConfigureAwait(false); } } } - public async Task<IAsyncCursor<BsonDocument>> ExecuteAsync(RetryableReadContext context, CancellationToken cancellationToken) + public async Task<IAsyncCursor<BsonDocument>> ExecuteAsync(OperationContext operationContext, RetryableReadContext context) { Ensure.IsNotNull(context, nameof(context)); using (BeginOperation()) { var operation = CreateOperation(); - var result = await operation.ExecuteAsync(context, cancellationToken).ConfigureAwait(false); + var result = await operation.ExecuteAsync(operationContext, context).ConfigureAwait(false); return CreateCursor(context.ChannelSource, context.Channel, result); } } diff --git a/src/MongoDB.Driver/Core/Operations/ListDatabasesOperation.cs b/src/MongoDB.Driver/Core/Operations/ListDatabasesOperation.cs index fb9ec4fbfa5..e55151e5b5c 100644 --- a/src/MongoDB.Driver/Core/Operations/ListDatabasesOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/ListDatabasesOperation.cs @@ -15,7 +15,6 @@ using System; using System.Linq; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -73,8 +72,8 @@ public bool RetryRequested { get { return _retryRequested; } set { _retryRequested = value; } - } - + } + public BsonDocument CreateCommand() { return new BsonDocument @@ -87,26 +86,26 @@ public BsonDocument CreateCommand() }; } - public IAsyncCursor<BsonDocument> Execute(IReadBinding binding, CancellationToken cancellationToken) + public IAsyncCursor<BsonDocument> Execute(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) { var operation = CreateOperation(); - var reply = operation.Execute(binding, cancellationToken); + var reply = operation.Execute(operationContext, binding); return CreateCursor(reply); } } - public async Task<IAsyncCursor<BsonDocument>> ExecuteAsync(IReadBinding binding, CancellationToken cancellationToken) + public async Task<IAsyncCursor<BsonDocument>> ExecuteAsync(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) { var operation = CreateOperation(); - var reply = await operation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false); + var reply = await operation.ExecuteAsync(operationContext, binding).ConfigureAwait(false); return CreateCursor(reply); } } diff --git a/src/MongoDB.Driver/Core/Operations/ListIndexesOperation.cs b/src/MongoDB.Driver/Core/Operations/ListIndexesOperation.cs index 1bd4fb40102..b91633c4716 100644 --- a/src/MongoDB.Driver/Core/Operations/ListIndexesOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/ListIndexesOperation.cs @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Driver.Core.Bindings; @@ -68,27 +67,27 @@ public bool RetryRequested set => _retryRequested = value; } - public IAsyncCursor<BsonDocument> Execute(IReadBinding binding, CancellationToken cancellationToken) + public IAsyncCursor<BsonDocument> Execute(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var context = RetryableReadContext.Create(binding, _retryRequested, cancellationToken)) + using (var context = RetryableReadContext.Create(operationContext, binding, _retryRequested)) { var operation = CreateOperation(); - return operation.Execute(context, cancellationToken); + return operation.Execute(operationContext, context); } } - public async Task<IAsyncCursor<BsonDocument>> ExecuteAsync(IReadBinding binding, CancellationToken cancellationToken) + public async Task<IAsyncCursor<BsonDocument>> ExecuteAsync(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var context = await RetryableReadContext.CreateAsync(binding, _retryRequested, cancellationToken).ConfigureAwait(false)) + using (var context = await RetryableReadContext.CreateAsync(operationContext, binding, _retryRequested).ConfigureAwait(false)) { var operation = CreateOperation(); - return await operation.ExecuteAsync(context, cancellationToken).ConfigureAwait(false); + return await operation.ExecuteAsync(operationContext, context).ConfigureAwait(false); } } diff --git a/src/MongoDB.Driver/Core/Operations/ListIndexesUsingCommandOperation.cs b/src/MongoDB.Driver/Core/Operations/ListIndexesUsingCommandOperation.cs index a16ba814dbc..290bbbf00e5 100644 --- a/src/MongoDB.Driver/Core/Operations/ListIndexesUsingCommandOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/ListIndexesUsingCommandOperation.cs @@ -15,7 +15,6 @@ using System.Collections.Generic; using System.Linq; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -70,17 +69,17 @@ public bool RetryRequested set => _retryRequested = value; } - public IAsyncCursor<BsonDocument> Execute(IReadBinding binding, CancellationToken cancellationToken) + public IAsyncCursor<BsonDocument> Execute(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); - using (var context = RetryableReadContext.Create(binding, _retryRequested, cancellationToken)) + using (var context = RetryableReadContext.Create(operationContext, binding, _retryRequested)) { - return Execute(context, cancellationToken); + return Execute(operationContext, context); } } - public IAsyncCursor<BsonDocument> Execute(RetryableReadContext context, CancellationToken cancellationToken) + public IAsyncCursor<BsonDocument> Execute(OperationContext operationContext, RetryableReadContext context) { Ensure.IsNotNull(context, nameof(context)); @@ -89,7 +88,7 @@ public IAsyncCursor<BsonDocument> Execute(RetryableReadContext context, Cancella var operation = CreateOperation(); try { - var result = operation.Execute(context, cancellationToken); + var result = operation.Execute(operationContext, context); return CreateCursor(context.ChannelSource, context.Channel, result); } catch (MongoCommandException ex) when (IsCollectionNotFoundException(ex)) @@ -99,17 +98,17 @@ public IAsyncCursor<BsonDocument> Execute(RetryableReadContext context, Cancella } } - public async Task<IAsyncCursor<BsonDocument>> ExecuteAsync(IReadBinding binding, CancellationToken cancellationToken) + public async Task<IAsyncCursor<BsonDocument>> ExecuteAsync(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); - using (var context = await RetryableReadContext.CreateAsync(binding, _retryRequested, cancellationToken).ConfigureAwait(false)) + using (var context = await RetryableReadContext.CreateAsync(operationContext, binding, _retryRequested).ConfigureAwait(false)) { - return await ExecuteAsync(context, cancellationToken).ConfigureAwait(false); + return await ExecuteAsync(operationContext, context).ConfigureAwait(false); } } - public async Task<IAsyncCursor<BsonDocument>> ExecuteAsync(RetryableReadContext context, CancellationToken cancellationToken) + public async Task<IAsyncCursor<BsonDocument>> ExecuteAsync(OperationContext operationContext, RetryableReadContext context) { Ensure.IsNotNull(context, nameof(context)); @@ -118,7 +117,7 @@ public async Task<IAsyncCursor<BsonDocument>> ExecuteAsync(RetryableReadContext var operation = CreateOperation(); try { - var result = await operation.ExecuteAsync(context, cancellationToken).ConfigureAwait(false); + var result = await operation.ExecuteAsync(operationContext, context).ConfigureAwait(false); return CreateCursor(context.ChannelSource, context.Channel, result); } catch (MongoCommandException ex) when (IsCollectionNotFoundException(ex)) diff --git a/src/MongoDB.Driver/Core/Operations/MapReduceOperation.cs b/src/MongoDB.Driver/Core/Operations/MapReduceOperation.cs index a509cfd33ed..21f2a99d684 100644 --- a/src/MongoDB.Driver/Core/Operations/MapReduceOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/MapReduceOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization; @@ -88,39 +87,39 @@ protected override BsonDocument CreateOutputOptions() } /// <inheritdoc/> - public IAsyncCursor<TResult> Execute(IReadBinding binding, CancellationToken cancellationToken) + public IAsyncCursor<TResult> Execute(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); - using (var channelSource = binding.GetReadChannelSource(cancellationToken)) - using (var channel = channelSource.GetChannel(cancellationToken)) + using (var channelSource = binding.GetReadChannelSource(operationContext)) + using (var channel = channelSource.GetChannel(operationContext)) using (var channelBinding = new ChannelReadBinding(channelSource.Server, channel, binding.ReadPreference, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session, channel.ConnectionDescription); - var result = operation.Execute(channelBinding, cancellationToken); + var operation = CreateOperation(operationContext, channelBinding.Session, channel.ConnectionDescription); + var result = operation.Execute(operationContext, channelBinding); return new SingleBatchAsyncCursor<TResult>(result); } } /// <inheritdoc/> - public async Task<IAsyncCursor<TResult>> ExecuteAsync(IReadBinding binding, CancellationToken cancellationToken) + public async Task<IAsyncCursor<TResult>> ExecuteAsync(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); - using (var channelSource = await binding.GetReadChannelSourceAsync(cancellationToken).ConfigureAwait(false)) - using (var channel = await channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)) + using (var channelSource = await binding.GetReadChannelSourceAsync(operationContext).ConfigureAwait(false)) + using (var channel = await channelSource.GetChannelAsync(operationContext).ConfigureAwait(false)) using (var channelBinding = new ChannelReadBinding(channelSource.Server, channel, binding.ReadPreference, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session, channel.ConnectionDescription); - var result = await operation.ExecuteAsync(channelBinding, cancellationToken).ConfigureAwait(false); + var operation = CreateOperation(operationContext, channelBinding.Session, channel.ConnectionDescription); + var result = await operation.ExecuteAsync(operationContext, channelBinding).ConfigureAwait(false); return new SingleBatchAsyncCursor<TResult>(result); } } /// <inheritdoc/> - protected internal override BsonDocument CreateCommand(ICoreSessionHandle session, ConnectionDescription connectionDescription) + protected internal override BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session, ConnectionDescription connectionDescription) { - var command = base.CreateCommand(session, connectionDescription); + var command = base.CreateCommand(operationContext, session, connectionDescription); var readConcern = ReadConcernHelper.GetReadConcernForCommand(session, connectionDescription, _readConcern); if (readConcern != null) @@ -131,9 +130,9 @@ protected internal override BsonDocument CreateCommand(ICoreSessionHandle sessio return command; } - private ReadCommandOperation<TResult[]> CreateOperation(ICoreSessionHandle session, ConnectionDescription connectionDescription) + private ReadCommandOperation<TResult[]> CreateOperation(OperationContext operationContext, ICoreSessionHandle session, ConnectionDescription connectionDescription) { - var command = CreateCommand(session, connectionDescription); + var command = CreateCommand(operationContext, session, connectionDescription); var resultArraySerializer = new ArraySerializer<TResult>(_resultSerializer); var resultSerializer = new ElementDeserializer<TResult[]>("results", resultArraySerializer); return new ReadCommandOperation<TResult[]>(CollectionNamespace.DatabaseNamespace, command, resultSerializer, MessageEncoderSettings) diff --git a/src/MongoDB.Driver/Core/Operations/MapReduceOperationBase.cs b/src/MongoDB.Driver/Core/Operations/MapReduceOperationBase.cs index 5840e28ac78..f1f455352bc 100644 --- a/src/MongoDB.Driver/Core/Operations/MapReduceOperationBase.cs +++ b/src/MongoDB.Driver/Core/Operations/MapReduceOperationBase.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -221,15 +221,7 @@ public bool? Verbose } // methods - /// <summary> - /// Creates the command. - /// </summary> - /// <param name="session">The session.</param> - /// <param name="connectionDescription">The connection description.</param> - /// <returns> - /// The command. - /// </returns> - protected internal virtual BsonDocument CreateCommand(ICoreSessionHandle session, ConnectionDescription connectionDescription) + protected internal virtual BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session, ConnectionDescription connectionDescription) { return new BsonDocument { @@ -244,7 +236,7 @@ protected internal virtual BsonDocument CreateCommand(ICoreSessionHandle session { "scope", _scope, _scope != null }, { "jsMode", () => _javaScriptMode.Value, _javaScriptMode.HasValue }, { "verbose", () => _verbose.Value, _verbose.HasValue }, - { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue }, + { "maxTimeMS", () => MaxTimeHelper.ToMaxTimeMS(_maxTime.Value), _maxTime.HasValue && !operationContext.IsRootContextTimeoutConfigured() }, { "collation", () => _collation.ToBsonDocument(), _collation != null } }; } diff --git a/src/MongoDB.Driver/Core/Operations/MapReduceOutputToCollectionOperation.cs b/src/MongoDB.Driver/Core/Operations/MapReduceOutputToCollectionOperation.cs index 53e787daf15..475b628a8e2 100644 --- a/src/MongoDB.Driver/Core/Operations/MapReduceOutputToCollectionOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/MapReduceOutputToCollectionOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -140,15 +139,15 @@ public WriteConcern WriteConcern // methods /// <inheritdoc/> - protected internal override BsonDocument CreateCommand(ICoreSessionHandle session, ConnectionDescription connectionDescription) + protected internal override BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session, ConnectionDescription connectionDescription) { - var command = base.CreateCommand(session, connectionDescription); + var command = base.CreateCommand(operationContext, session, connectionDescription); if (_bypassDocumentValidation.HasValue) { command.Add("bypassDocumentValidation", _bypassDocumentValidation.Value); } - var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(session, _writeConcern); + var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(operationContext, session, _writeConcern); if (writeConcern != null) { command.Add("writeConcern", writeConcern.ToBsonDocument()); @@ -170,36 +169,36 @@ protected override BsonDocument CreateOutputOptions() } /// <inheritdoc/> - public BsonDocument Execute(IWriteBinding binding, CancellationToken cancellationToken) + public BsonDocument Execute(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); - using (var channelSource = binding.GetWriteChannelSource(cancellationToken)) - using (var channel = channelSource.GetChannel(cancellationToken)) + using (var channelSource = binding.GetWriteChannelSource(operationContext)) + using (var channel = channelSource.GetChannel(operationContext)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session, channel.ConnectionDescription); - return operation.Execute(channelBinding, cancellationToken); + var operation = CreateOperation(operationContext, channelBinding.Session, channel.ConnectionDescription); + return operation.Execute(operationContext, channelBinding); } } /// <inheritdoc/> - public async Task<BsonDocument> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) + public async Task<BsonDocument> ExecuteAsync(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); - using (var channelSource = await binding.GetWriteChannelSourceAsync(cancellationToken).ConfigureAwait(false)) - using (var channel = await channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)) + using (var channelSource = await binding.GetWriteChannelSourceAsync(operationContext).ConfigureAwait(false)) + using (var channel = await channelSource.GetChannelAsync(operationContext).ConfigureAwait(false)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session, channel.ConnectionDescription); - return await operation.ExecuteAsync(channelBinding, cancellationToken).ConfigureAwait(false); + var operation = CreateOperation(operationContext, channelBinding.Session, channel.ConnectionDescription); + return await operation.ExecuteAsync(operationContext, channelBinding).ConfigureAwait(false); } } - private WriteCommandOperation<BsonDocument> CreateOperation(ICoreSessionHandle session, ConnectionDescription connectionDescription) + private WriteCommandOperation<BsonDocument> CreateOperation(OperationContext operationContext, ICoreSessionHandle session, ConnectionDescription connectionDescription) { - var command = CreateCommand(session, connectionDescription); + var command = CreateCommand(operationContext, session, connectionDescription); return new WriteCommandOperation<BsonDocument>(CollectionNamespace.DatabaseNamespace, command, BsonDocumentSerializer.Instance, MessageEncoderSettings); } } diff --git a/src/MongoDB.Driver/Core/Operations/OperationExtensionMethods.cs b/src/MongoDB.Driver/Core/Operations/OperationExtensionMethods.cs index bf9fe96b58b..fe54eb7d637 100644 --- a/src/MongoDB.Driver/Core/Operations/OperationExtensionMethods.cs +++ b/src/MongoDB.Driver/Core/Operations/OperationExtensionMethods.cs @@ -13,7 +13,6 @@ * limitations under the License. */ -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Misc; @@ -24,55 +23,55 @@ internal static class OperationExtensionMethods { public static TResult Execute<TResult>( this IReadOperation<TResult> operation, + OperationContext operationContext, IChannelSourceHandle channelSource, ReadPreference readPreference, - ICoreSessionHandle session, - CancellationToken cancellationToken) + ICoreSessionHandle session) { Ensure.IsNotNull(operation, nameof(operation)); using (var readBinding = new ChannelSourceReadWriteBinding(channelSource.Fork(), readPreference, session.Fork())) { - return operation.Execute(readBinding, cancellationToken); + return operation.Execute(operationContext, readBinding); } } public static TResult Execute<TResult>( this IWriteOperation<TResult> operation, + OperationContext operationContext, IChannelSourceHandle channelSource, - ICoreSessionHandle session, - CancellationToken cancellationToken) + ICoreSessionHandle session) { Ensure.IsNotNull(operation, nameof(operation)); using (var writeBinding = new ChannelSourceReadWriteBinding(channelSource.Fork(), ReadPreference.Primary, session.Fork())) { - return operation.Execute(writeBinding, cancellationToken); + return operation.Execute(operationContext, writeBinding); } } public static async Task<TResult> ExecuteAsync<TResult>( this IReadOperation<TResult> operation, + OperationContext operationContext, IChannelSourceHandle channelSource, ReadPreference readPreference, - ICoreSessionHandle session, - CancellationToken cancellationToken) + ICoreSessionHandle session) { Ensure.IsNotNull(operation, nameof(operation)); using (var readBinding = new ChannelSourceReadWriteBinding(channelSource.Fork(), readPreference, session.Fork())) { - return await operation.ExecuteAsync(readBinding, cancellationToken).ConfigureAwait(false); + return await operation.ExecuteAsync(operationContext, readBinding).ConfigureAwait(false); } } public static async Task<TResult> ExecuteAsync<TResult>( this IWriteOperation<TResult> operation, + OperationContext operationContext, IChannelSourceHandle channelSource, - ICoreSessionHandle session, - CancellationToken cancellationToken) + ICoreSessionHandle session) { Ensure.IsNotNull(operation, nameof(operation)); using (var writeBinding = new ChannelSourceReadWriteBinding(channelSource.Fork(), ReadPreference.Primary, session.Fork())) { - return await operation.ExecuteAsync(writeBinding, cancellationToken).ConfigureAwait(false); + return await operation.ExecuteAsync(operationContext, writeBinding).ConfigureAwait(false); } } } diff --git a/src/MongoDB.Driver/Core/Operations/PingOperation.cs b/src/MongoDB.Driver/Core/Operations/PingOperation.cs deleted file mode 100644 index cf86b18243c..00000000000 --- a/src/MongoDB.Driver/Core/Operations/PingOperation.cs +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright 2013-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System.Threading; -using System.Threading.Tasks; -using MongoDB.Bson; -using MongoDB.Bson.Serialization.Serializers; -using MongoDB.Driver.Core.Bindings; -using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.WireProtocol.Messages.Encoders; - -namespace MongoDB.Driver.Core.Operations -{ - internal sealed class PingOperation : IReadOperation<BsonDocument> - { - private MessageEncoderSettings _messageEncoderSettings; - - public PingOperation(MessageEncoderSettings messageEncoderSettings) - { - _messageEncoderSettings = messageEncoderSettings; - } - - public MessageEncoderSettings MessageEncoderSettings - { - get { return _messageEncoderSettings; } - } - - public BsonDocument CreateCommand() - { - return new BsonDocument - { - { "ping", 1 } - }; - } - - public BsonDocument Execute(IReadBinding binding, CancellationToken cancellationToken) - { - Ensure.IsNotNull(binding, nameof(binding)); - var operation = CreateOperation(); - return operation.Execute(binding, cancellationToken); - } - - public async Task<BsonDocument> ExecuteAsync(IReadBinding binding, CancellationToken cancellationToken) - { - Ensure.IsNotNull(binding, nameof(binding)); - var operation = CreateOperation(); - return await operation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false); - } - - private ReadCommandOperation<BsonDocument> CreateOperation() - { - var command = CreateCommand(); - return new ReadCommandOperation<BsonDocument>(DatabaseNamespace.Admin, command, BsonDocumentSerializer.Instance, _messageEncoderSettings) - { - RetryRequested = false - }; - } - } -} diff --git a/src/MongoDB.Driver/Core/Operations/ReadCommandOperation.cs b/src/MongoDB.Driver/Core/Operations/ReadCommandOperation.cs index 386bc77c4dd..711d31c37df 100644 --- a/src/MongoDB.Driver/Core/Operations/ReadCommandOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/ReadCommandOperation.cs @@ -13,7 +13,6 @@ * limitations under the License. */ -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization; @@ -43,54 +42,54 @@ public bool RetryRequested set => _retryRequested = value; } - public TCommandResult Execute(IReadBinding binding, CancellationToken cancellationToken) + public TCommandResult Execute(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); - using (var context = RetryableReadContext.Create(binding, _retryRequested, cancellationToken)) + using (var context = RetryableReadContext.Create(operationContext, binding, _retryRequested)) { - return Execute(context, cancellationToken); + return Execute(operationContext, context); } } - public TCommandResult Execute(RetryableReadContext context, CancellationToken cancellationToken) + public TCommandResult Execute(OperationContext operationContext, RetryableReadContext context) { Ensure.IsNotNull(context, nameof(context)); using (EventContext.BeginOperation()) { - return RetryableReadOperationExecutor.Execute(this, context, cancellationToken); + return RetryableReadOperationExecutor.Execute(operationContext, this, context); } } - public async Task<TCommandResult> ExecuteAsync(IReadBinding binding, CancellationToken cancellationToken) + public async Task<TCommandResult> ExecuteAsync(OperationContext operationContext, IReadBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); - using (var context = await RetryableReadContext.CreateAsync(binding, _retryRequested, cancellationToken).ConfigureAwait(false)) + using (var context = await RetryableReadContext.CreateAsync(operationContext, binding, _retryRequested).ConfigureAwait(false)) { - return await ExecuteAsync(context, cancellationToken).ConfigureAwait(false); + return await ExecuteAsync(operationContext, context).ConfigureAwait(false); } } - public async Task<TCommandResult> ExecuteAsync(RetryableReadContext context, CancellationToken cancellationToken) + public async Task<TCommandResult> ExecuteAsync(OperationContext operationContext, RetryableReadContext context) { Ensure.IsNotNull(context, nameof(context)); using (EventContext.BeginOperation()) { - return await RetryableReadOperationExecutor.ExecuteAsync(this, context, cancellationToken).ConfigureAwait(false); + return await RetryableReadOperationExecutor.ExecuteAsync(operationContext, this, context).ConfigureAwait(false); } } - public TCommandResult ExecuteAttempt(RetryableReadContext context, int attempt, long? transactionNumber, CancellationToken cancellationToken) + public TCommandResult ExecuteAttempt(OperationContext operationContext, RetryableReadContext context, int attempt, long? transactionNumber) { - return ExecuteProtocol(context.Channel, context.Binding.Session, context.Binding.ReadPreference, cancellationToken); + return ExecuteProtocol(operationContext, context.Channel, context.Binding.Session, context.Binding.ReadPreference); } - public Task<TCommandResult> ExecuteAttemptAsync(RetryableReadContext context, int attempt, long? transactionNumber, CancellationToken cancellationToken) + public Task<TCommandResult> ExecuteAttemptAsync(OperationContext operationContext, RetryableReadContext context, int attempt, long? transactionNumber) { - return ExecuteProtocolAsync(context.Channel, context.Binding.Session, context.Binding.ReadPreference, cancellationToken); + return ExecuteProtocolAsync(operationContext, context.Channel, context.Binding.Session, context.Binding.ReadPreference); } } } diff --git a/src/MongoDB.Driver/Core/Operations/RenameCollectionOperation.cs b/src/MongoDB.Driver/Core/Operations/RenameCollectionOperation.cs index 7f5fb113da5..c9b37f6cc02 100644 --- a/src/MongoDB.Driver/Core/Operations/RenameCollectionOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/RenameCollectionOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -71,9 +70,9 @@ public WriteConcern WriteConcern set { _writeConcern = value; } } - public BsonDocument CreateCommand(ICoreSessionHandle session, ConnectionDescription connectionDescription) + public BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session, ConnectionDescription connectionDescription) { - var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(session, _writeConcern); + var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(operationContext, session, _writeConcern); return new BsonDocument { { "renameCollection", _collectionNamespace.FullName }, @@ -83,39 +82,39 @@ public BsonDocument CreateCommand(ICoreSessionHandle session, ConnectionDescript }; } - public BsonDocument Execute(IWriteBinding binding, CancellationToken cancellationToken) + public BsonDocument Execute(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var channelSource = binding.GetWriteChannelSource(cancellationToken)) - using (var channel = channelSource.GetChannel(cancellationToken)) + using (var channelSource = binding.GetWriteChannelSource(operationContext)) + using (var channel = channelSource.GetChannel(operationContext)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session, channel.ConnectionDescription); - return operation.Execute(channelBinding, cancellationToken); + var operation = CreateOperation(operationContext, channelBinding.Session, channel.ConnectionDescription); + return operation.Execute(operationContext, channelBinding); } } - public async Task<BsonDocument> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) + public async Task<BsonDocument> ExecuteAsync(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (BeginOperation()) - using (var channelSource = await binding.GetWriteChannelSourceAsync(cancellationToken).ConfigureAwait(false)) - using (var channel = await channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)) + using (var channelSource = await binding.GetWriteChannelSourceAsync(operationContext).ConfigureAwait(false)) + using (var channel = await channelSource.GetChannelAsync(operationContext).ConfigureAwait(false)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { - var operation = CreateOperation(channelBinding.Session, channel.ConnectionDescription); - return await operation.ExecuteAsync(channelBinding, cancellationToken).ConfigureAwait(false); + var operation = CreateOperation(operationContext, channelBinding.Session, channel.ConnectionDescription); + return await operation.ExecuteAsync(operationContext, channelBinding).ConfigureAwait(false); } - } - + } + private IDisposable BeginOperation() => EventContext.BeginOperation("renameCollection"); - private WriteCommandOperation<BsonDocument> CreateOperation(ICoreSessionHandle session, ConnectionDescription connectionDescription) + private WriteCommandOperation<BsonDocument> CreateOperation(OperationContext operationContext, ICoreSessionHandle session, ConnectionDescription connectionDescription) { - var command = CreateCommand(session, connectionDescription); + var command = CreateCommand(operationContext, session, connectionDescription); return new WriteCommandOperation<BsonDocument>(DatabaseNamespace.Admin, command, BsonDocumentSerializer.Instance, _messageEncoderSettings); } } diff --git a/src/MongoDB.Driver/Core/Operations/RetryabilityHelper.cs b/src/MongoDB.Driver/Core/Operations/RetryabilityHelper.cs index fd8f735dff7..0ccde3f9c7d 100644 --- a/src/MongoDB.Driver/Core/Operations/RetryabilityHelper.cs +++ b/src/MongoDB.Driver/Core/Operations/RetryabilityHelper.cs @@ -1,4 +1,4 @@ -/* Copyright 2018-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -135,20 +135,17 @@ public static bool IsResumableChangeStreamException(Exception exception, int max { return exception is MongoException mongoException ? mongoException.HasErrorLabel(ResumableChangeStreamErrorLabel) : false; } - else + + if (exception is MongoCommandException commandException) { - var commandException = exception as MongoCommandException; - if (commandException != null) + var code = (ServerErrorCode)commandException.Code; + if (__resumableChangeStreamErrorCodes.Contains(code)) { - var code = (ServerErrorCode)commandException.Code; - if (__resumableChangeStreamErrorCodes.Contains(code)) - { - return true; - } + return true; } - - return __resumableChangeStreamExceptions.Contains(exception.GetType()); } + + return __resumableChangeStreamExceptions.Contains(exception.GetType()); } /// <summary> diff --git a/src/MongoDB.Driver/Core/Operations/RetryableDeleteCommandOperation.cs b/src/MongoDB.Driver/Core/Operations/RetryableDeleteCommandOperation.cs index 480c152a749..53596ce2af4 100644 --- a/src/MongoDB.Driver/Core/Operations/RetryableDeleteCommandOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/RetryableDeleteCommandOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2017-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -60,7 +60,7 @@ public BatchableSource<DeleteRequest> Deletes get { return _deletes; } } - protected override BsonDocument CreateCommand(ICoreSessionHandle session, int attempt, long? transactionNumber) + protected override BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session, int attempt, long? transactionNumber) { if (WriteConcern != null && !WriteConcern.IsAcknowledged) { @@ -70,7 +70,7 @@ protected override BsonDocument CreateCommand(ICoreSessionHandle session, int at } } - var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(session, WriteConcern); + var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(operationContext, session, WriteConcern); return new BsonDocument { { "delete", _collectionNamespace.CollectionName }, diff --git a/src/MongoDB.Driver/Core/Operations/RetryableInsertCommandOperation.cs b/src/MongoDB.Driver/Core/Operations/RetryableInsertCommandOperation.cs index 1952ad2d019..cd4b5f696b6 100644 --- a/src/MongoDB.Driver/Core/Operations/RetryableInsertCommandOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/RetryableInsertCommandOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2017-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -66,9 +66,9 @@ public IBsonSerializer<TDocument> DocumentSerializer get { return _documentSerializer; } } - protected override BsonDocument CreateCommand(ICoreSessionHandle session, int attempt, long? transactionNumber) + protected override BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session, int attempt, long? transactionNumber) { - var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(session, WriteConcern); + var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(operationContext, session, WriteConcern); return new BsonDocument { { "insert", _collectionNamespace.CollectionName }, diff --git a/src/MongoDB.Driver/Core/Operations/RetryableReadContext.cs b/src/MongoDB.Driver/Core/Operations/RetryableReadContext.cs index 4593b1c06e8..d1f8f36c15f 100644 --- a/src/MongoDB.Driver/Core/Operations/RetryableReadContext.cs +++ b/src/MongoDB.Driver/Core/Operations/RetryableReadContext.cs @@ -1,4 +1,4 @@ -/* Copyright 2019-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,10 +14,11 @@ */ using System; -using System.Threading; +using System.Collections.Generic; using System.Threading.Tasks; using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Misc; +using MongoDB.Driver.Core.Servers; namespace MongoDB.Driver.Core.Operations { @@ -25,46 +26,38 @@ internal sealed class RetryableReadContext : IDisposable { #region static - public static RetryableReadContext Create(IReadBinding binding, bool retryRequested, CancellationToken cancellationToken) + public static RetryableReadContext Create(OperationContext operationContext, IReadBinding binding, bool retryRequested) { var context = new RetryableReadContext(binding, retryRequested); try { - context.Initialize(cancellationToken); - - ChannelPinningHelper.PinChannellIfRequired( - context.ChannelSource, - context.Channel, - context.Binding.Session); - - return context; + context.AcquireOrReplaceChannel(operationContext, null); } catch { context.Dispose(); throw; } + + ChannelPinningHelper.PinChannellIfRequired(context.ChannelSource, context.Channel, context.Binding.Session); + return context; } - public static async Task<RetryableReadContext> CreateAsync(IReadBinding binding, bool retryRequested, CancellationToken cancellationToken) + public static async Task<RetryableReadContext> CreateAsync(OperationContext operationContext, IReadBinding binding, bool retryRequested) { var context = new RetryableReadContext(binding, retryRequested); try { - await context.InitializeAsync(cancellationToken).ConfigureAwait(false); - - ChannelPinningHelper.PinChannellIfRequired( - context.ChannelSource, - context.Channel, - context.Binding.Session); - - return context; + await context.AcquireOrReplaceChannelAsync(operationContext, null).ConfigureAwait(false); } catch { context.Dispose(); throw; } + + ChannelPinningHelper.PinChannellIfRequired(context.ChannelSource, context.Channel, context.Binding.Session); + return context; } #endregion @@ -97,14 +90,52 @@ public void Dispose() } } - public void ReplaceChannel(IChannelHandle channel) + public void AcquireOrReplaceChannel(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) + { + var attempt = 1; + while (true) + { + operationContext.ThrowIfTimedOutOrCanceled(); + ReplaceChannelSource(Binding.GetReadChannelSource(operationContext, deprioritizedServers)); + try + { + ReplaceChannel(ChannelSource.GetChannel(operationContext)); + return; + } + catch (Exception ex) when (RetryableReadOperationExecutor.ShouldConnectionAcquireBeRetried(operationContext, this, ex, attempt)) + { + attempt++; + } + } + } + + public async Task AcquireOrReplaceChannelAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) + { + var attempt = 1; + while (true) + { + operationContext.ThrowIfTimedOutOrCanceled(); + ReplaceChannelSource(await Binding.GetReadChannelSourceAsync(operationContext, deprioritizedServers).ConfigureAwait(false)); + try + { + ReplaceChannel(await ChannelSource.GetChannelAsync(operationContext).ConfigureAwait(false)); + return; + } + catch (Exception ex) when (RetryableReadOperationExecutor.ShouldConnectionAcquireBeRetried(operationContext, this, ex, attempt)) + { + attempt++; + } + } + } + + private void ReplaceChannel(IChannelHandle channel) { Ensure.IsNotNull(channel, nameof(channel)); _channel?.Dispose(); _channel = channel; } - public void ReplaceChannelSource(IChannelSourceHandle channelSource) + private void ReplaceChannelSource(IChannelSourceHandle channelSource) { Ensure.IsNotNull(channelSource, nameof(channelSource)); _channelSource?.Dispose(); @@ -112,35 +143,5 @@ public void ReplaceChannelSource(IChannelSourceHandle channelSource) _channelSource = channelSource; _channel = null; } - - private void Initialize(CancellationToken cancellationToken) - { - _channelSource = _binding.GetReadChannelSource(cancellationToken); - - try - { - _channel = _channelSource.GetChannel(cancellationToken); - } - catch (Exception ex) when (RetryableReadOperationExecutor.ShouldConnectionAcquireBeRetried(this, ex)) - { - ReplaceChannelSource(_binding.GetReadChannelSource(cancellationToken)); - ReplaceChannel(_channelSource.GetChannel(cancellationToken)); - } - } - - private async Task InitializeAsync(CancellationToken cancellationToken) - { - _channelSource = await _binding.GetReadChannelSourceAsync(cancellationToken).ConfigureAwait(false); - - try - { - _channel = await _channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false); - } - catch (Exception ex) when (RetryableReadOperationExecutor.ShouldConnectionAcquireBeRetried(this, ex)) - { - ReplaceChannelSource(await _binding.GetReadChannelSourceAsync(cancellationToken).ConfigureAwait(false)); - ReplaceChannel(await _channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)); - } - } } } diff --git a/src/MongoDB.Driver/Core/Operations/RetryableReadOperationExecutor.cs b/src/MongoDB.Driver/Core/Operations/RetryableReadOperationExecutor.cs index cdb74827dc0..77f9479e979 100644 --- a/src/MongoDB.Driver/Core/Operations/RetryableReadOperationExecutor.cs +++ b/src/MongoDB.Driver/Core/Operations/RetryableReadOperationExecutor.cs @@ -1,4 +1,4 @@ -/* Copyright 2019-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,122 +14,115 @@ */ using System; -using System.Threading; +using System.Collections.Generic; using System.Threading.Tasks; -using MongoDB.Driver.Core.Bindings; +using MongoDB.Driver.Core.Servers; namespace MongoDB.Driver.Core.Operations { internal static class RetryableReadOperationExecutor { // public static methods - public static TResult Execute<TResult>(IRetryableReadOperation<TResult> operation, IReadBinding binding, bool retryRequested, CancellationToken cancellationToken) + public static TResult Execute<TResult>(OperationContext operationContext, IRetryableReadOperation<TResult> operation, RetryableReadContext context) { - using (var context = RetryableReadContext.Create(binding, retryRequested, cancellationToken)) - { - return Execute(operation, context, cancellationToken); - } - } + HashSet<ServerDescription> deprioritizedServers = null; + var attempt = 1; + Exception originalException = null; - public static TResult Execute<TResult>(IRetryableReadOperation<TResult> operation, RetryableReadContext context, CancellationToken cancellationToken) - { - if (!ShouldReadBeRetried(context)) - { - return operation.ExecuteAttempt(context, attempt: 1, transactionNumber: null, cancellationToken); - } + while (true) // Circle breaking logic based on ShouldRetryOperation method, see the catch block below. + { + operationContext.ThrowIfTimedOutOrCanceled(); + var server = context.ChannelSource.ServerDescription; + try + { + return operation.ExecuteAttempt(operationContext, context, attempt, transactionNumber: null); + } + catch (Exception ex) + { + if (!ShouldRetryOperation(operationContext, context, ex, attempt)) + { + throw originalException ?? ex; + } - Exception originalException; - try - { - return operation.ExecuteAttempt(context, attempt: 1, transactionNumber: null, cancellationToken); + originalException ??= ex; + } - } - catch (Exception ex) when (RetryabilityHelper.IsRetryableReadException(ex)) - { - originalException = ex; - } + deprioritizedServers ??= new HashSet<ServerDescription>(); + deprioritizedServers.Add(server); - try - { - context.ReplaceChannelSource(context.Binding.GetReadChannelSource(new[] { context.ChannelSource.ServerDescription }, cancellationToken)); - context.ReplaceChannel(context.ChannelSource.GetChannel(cancellationToken)); - } - catch - { - throw originalException; - } + try + { + context.AcquireOrReplaceChannel(operationContext, deprioritizedServers); + } + catch + { + throw originalException; + } - try - { - return operation.ExecuteAttempt(context, attempt: 2, transactionNumber: null, cancellationToken); - } - catch (Exception ex) when (ShouldThrowOriginalException(ex)) - { - throw originalException; + attempt++; } } - public static async Task<TResult> ExecuteAsync<TResult>(IRetryableReadOperation<TResult> operation, IReadBinding binding, bool retryRequested, CancellationToken cancellationToken) + public static async Task<TResult> ExecuteAsync<TResult>(OperationContext operationContext, IRetryableReadOperation<TResult> operation, RetryableReadContext context) { - using (var context = await RetryableReadContext.CreateAsync(binding, retryRequested, cancellationToken).ConfigureAwait(false)) - { - return await ExecuteAsync(operation, context, cancellationToken).ConfigureAwait(false); - } - } + HashSet<ServerDescription> deprioritizedServers = null; + var attempt = 1; + Exception originalException = null; - public static async Task<TResult> ExecuteAsync<TResult>(IRetryableReadOperation<TResult> operation, RetryableReadContext context, CancellationToken cancellationToken) - { - if (!ShouldReadBeRetried(context)) - { - return await operation.ExecuteAttemptAsync(context, attempt: 1, transactionNumber: null, cancellationToken).ConfigureAwait(false); - } + while (true) // Circle breaking logic based on ShouldRetryOperation method, see the catch block below. + { + operationContext.ThrowIfTimedOutOrCanceled(); + var server = context.ChannelSource.ServerDescription; + try + { + return await operation.ExecuteAttemptAsync(operationContext, context, attempt, transactionNumber: null).ConfigureAwait(false); + } + catch (Exception ex) + { + if (!ShouldRetryOperation(operationContext, context, ex, attempt)) + { + throw originalException ?? ex; + } - Exception originalException; - try - { - return await operation.ExecuteAttemptAsync(context, attempt: 1, transactionNumber: null, cancellationToken).ConfigureAwait(false); - } - catch (Exception ex) when (RetryabilityHelper.IsRetryableReadException(ex)) - { - originalException = ex; - } + originalException ??= ex; + } - try - { - context.ReplaceChannelSource(context.Binding.GetReadChannelSource(new[] { context.ChannelSource.ServerDescription }, cancellationToken)); - context.ReplaceChannel(context.ChannelSource.GetChannel(cancellationToken)); - } - catch - { - throw originalException; - } + deprioritizedServers ??= new HashSet<ServerDescription>(); + deprioritizedServers.Add(server); - try - { - return await operation.ExecuteAttemptAsync(context, attempt: 2, transactionNumber: null, cancellationToken).ConfigureAwait(false); - } - catch (Exception ex) when (ShouldThrowOriginalException(ex)) - { - throw originalException; + try + { + await context.AcquireOrReplaceChannelAsync(operationContext, deprioritizedServers).ConfigureAwait(false); + } + catch + { + throw originalException; + } + + attempt++; } } - public static bool ShouldConnectionAcquireBeRetried(RetryableReadContext context, Exception ex) + public static bool ShouldConnectionAcquireBeRetried(OperationContext operationContext, RetryableReadContext context, Exception exception, int attempt) { - // According the spec error during handshake should be handle according to RetryableReads logic - var innerException = ex is MongoAuthenticationException mongoAuthenticationException ? mongoAuthenticationException.InnerException : ex; - return context.RetryRequested && !context.Binding.Session.IsInTransaction && RetryabilityHelper.IsRetryableReadException(innerException); + var innerException = exception is MongoAuthenticationException mongoAuthenticationException ? mongoAuthenticationException.InnerException : exception; + return ShouldRetryOperation(operationContext, context, innerException, attempt); } // private static methods - private static bool ShouldReadBeRetried(RetryableReadContext context) + private static bool ShouldRetryOperation(OperationContext operationContext, RetryableReadContext context, Exception exception, int attempt) { - return context.RetryRequested && !context.Binding.Session.IsInTransaction; - } + if (!context.RetryRequested || context.Binding.Session.IsInTransaction) + { + return false; + } - private static bool ShouldThrowOriginalException(Exception retryException) - { - return retryException is MongoException && !(retryException is MongoConnectionException); + if (!RetryabilityHelper.IsRetryableReadException(exception)) + { + return false; + } + + return operationContext.IsRootContextTimeoutConfigured() || attempt < 2; } } } diff --git a/src/MongoDB.Driver/Core/Operations/RetryableUpdateCommandOperation.cs b/src/MongoDB.Driver/Core/Operations/RetryableUpdateCommandOperation.cs index e51e6170872..d7c390e45be 100644 --- a/src/MongoDB.Driver/Core/Operations/RetryableUpdateCommandOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/RetryableUpdateCommandOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2017-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -67,7 +67,7 @@ public BatchableSource<UpdateRequest> Updates get { return _updates; } } - protected override BsonDocument CreateCommand(ICoreSessionHandle session, int attempt, long? transactionNumber) + protected override BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session, int attempt, long? transactionNumber) { if (WriteConcern != null && !WriteConcern.IsAcknowledged) { @@ -77,7 +77,7 @@ protected override BsonDocument CreateCommand(ICoreSessionHandle session, int at } } - var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(session, WriteConcern); + var writeConcern = WriteConcernHelper.GetEffectiveWriteConcern(operationContext, session, WriteConcern); return new BsonDocument { { "update", _collectionNamespace.CollectionName }, diff --git a/src/MongoDB.Driver/Core/Operations/RetryableWriteCommandOperationBase.cs b/src/MongoDB.Driver/Core/Operations/RetryableWriteCommandOperationBase.cs index cd5e3e87dc9..fab21530060 100644 --- a/src/MongoDB.Driver/Core/Operations/RetryableWriteCommandOperationBase.cs +++ b/src/MongoDB.Driver/Core/Operations/RetryableWriteCommandOperationBase.cs @@ -1,4 +1,4 @@ -/* Copyright 2017-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,6 @@ using System; using System.Collections.Generic; using System.Linq; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.IO; @@ -87,37 +86,37 @@ public WriteConcern WriteConcern set { _writeConcern = value; } } - public virtual BsonDocument Execute(IWriteBinding binding, CancellationToken cancellationToken) + public virtual BsonDocument Execute(OperationContext operationContext, IWriteBinding binding) { - using (var context = RetryableWriteContext.Create(binding, _retryRequested, cancellationToken)) + using (var context = RetryableWriteContext.Create(operationContext, binding, _retryRequested)) { - return Execute(context, cancellationToken); + return Execute(operationContext, context); } } - public virtual BsonDocument Execute(RetryableWriteContext context, CancellationToken cancellationToken) + public virtual BsonDocument Execute(OperationContext operationContext, RetryableWriteContext context) { - return RetryableWriteOperationExecutor.Execute(this, context, cancellationToken); + return RetryableWriteOperationExecutor.Execute(operationContext, this, context); } - public virtual async Task<BsonDocument> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) + public virtual async Task<BsonDocument> ExecuteAsync(OperationContext operationContext, IWriteBinding binding) { - using (var context = await RetryableWriteContext.CreateAsync(binding, _retryRequested, cancellationToken).ConfigureAwait(false)) + using (var context = await RetryableWriteContext.CreateAsync(operationContext, binding, _retryRequested).ConfigureAwait(false)) { - return await ExecuteAsync(context, cancellationToken).ConfigureAwait(false); + return await ExecuteAsync(operationContext, context).ConfigureAwait(false); } } - public virtual Task<BsonDocument> ExecuteAsync(RetryableWriteContext context, CancellationToken cancellationToken) + public virtual Task<BsonDocument> ExecuteAsync(OperationContext operationContext, RetryableWriteContext context) { - return RetryableWriteOperationExecutor.ExecuteAsync(this, context, cancellationToken); + return RetryableWriteOperationExecutor.ExecuteAsync(operationContext, this, context); } - public BsonDocument ExecuteAttempt(RetryableWriteContext context, int attempt, long? transactionNumber, CancellationToken cancellationToken) + public BsonDocument ExecuteAttempt(OperationContext operationContext, RetryableWriteContext context, int attempt, long? transactionNumber) { - var args = GetCommandArgs(context, attempt, transactionNumber); - + var args = GetCommandArgs(operationContext, context, attempt, transactionNumber); return context.Channel.Command<BsonDocument>( + operationContext, context.ChannelSource.Session, ReadPreference.Primary, _databaseNamespace, @@ -128,15 +127,14 @@ public BsonDocument ExecuteAttempt(RetryableWriteContext context, int attempt, l args.PostWriteAction, args.ResponseHandling, BsonDocumentSerializer.Instance, - args.MessageEncoderSettings, - cancellationToken); + args.MessageEncoderSettings); } - public Task<BsonDocument> ExecuteAttemptAsync(RetryableWriteContext context, int attempt, long? transactionNumber, CancellationToken cancellationToken) + public Task<BsonDocument> ExecuteAttemptAsync(OperationContext operationContext, RetryableWriteContext context, int attempt, long? transactionNumber) { - var args = GetCommandArgs(context, attempt, transactionNumber); - + var args = GetCommandArgs(operationContext, context, attempt, transactionNumber); return context.Channel.CommandAsync<BsonDocument>( + operationContext, context.ChannelSource.Session, ReadPreference.Primary, _databaseNamespace, @@ -147,11 +145,10 @@ public Task<BsonDocument> ExecuteAttemptAsync(RetryableWriteContext context, int args.PostWriteAction, args.ResponseHandling, BsonDocumentSerializer.Instance, - args.MessageEncoderSettings, - cancellationToken); + args.MessageEncoderSettings); } - protected abstract BsonDocument CreateCommand(ICoreSessionHandle session, int attempt, long? transactionNumber); + protected abstract BsonDocument CreateCommand(OperationContext operationContext, ICoreSessionHandle session, int attempt, long? transactionNumber); protected abstract IEnumerable<BatchableCommandMessageSection> CreateCommandPayloads(IChannelHandle channel, int attempt); @@ -164,10 +161,10 @@ private MessageEncoderSettings CreateMessageEncoderSettings(IChannelHandle chann return clone; } - private CommandArgs GetCommandArgs(RetryableWriteContext context, int attempt, long? transactionNumber) + private CommandArgs GetCommandArgs(OperationContext operationContext, RetryableWriteContext context, int attempt, long? transactionNumber) { var args = new CommandArgs(); - args.Command = CreateCommand(context.Binding.Session, attempt, transactionNumber); + args.Command = CreateCommand(operationContext, context.Binding.Session, attempt, transactionNumber); args.CommandPayloads = CreateCommandPayloads(context.Channel, attempt).ToList(); args.PostWriteAction = GetPostWriteAction(args.CommandPayloads); args.ResponseHandling = GetResponseHandling(); diff --git a/src/MongoDB.Driver/Core/Operations/RetryableWriteContext.cs b/src/MongoDB.Driver/Core/Operations/RetryableWriteContext.cs index 4aa1a3eb9fd..cbf64188d99 100644 --- a/src/MongoDB.Driver/Core/Operations/RetryableWriteContext.cs +++ b/src/MongoDB.Driver/Core/Operations/RetryableWriteContext.cs @@ -1,4 +1,4 @@ -/* Copyright 2017-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,11 +15,10 @@ using System; using System.Collections.Generic; -using System.Linq; -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Misc; +using MongoDB.Driver.Core.Servers; namespace MongoDB.Driver.Core.Operations { @@ -27,46 +26,38 @@ internal sealed class RetryableWriteContext : IDisposable { #region static - public static RetryableWriteContext Create(IWriteBinding binding, bool retryRequested, CancellationToken cancellationToken) + public static RetryableWriteContext Create(OperationContext operationContext, IWriteBinding binding, bool retryRequested) { var context = new RetryableWriteContext(binding, retryRequested); try { - context.Initialize(cancellationToken); - - ChannelPinningHelper.PinChannellIfRequired( - context.ChannelSource, - context.Channel, - context.Binding.Session); - - return context; + context.AcquireOrReplaceChannel(operationContext, null); } catch { context.Dispose(); throw; } + + ChannelPinningHelper.PinChannellIfRequired(context.ChannelSource, context.Channel, context.Binding.Session); + return context; } - public static async Task<RetryableWriteContext> CreateAsync(IWriteBinding binding, bool retryRequested, CancellationToken cancellationToken) + public static async Task<RetryableWriteContext> CreateAsync(OperationContext operationContext, IWriteBinding binding, bool retryRequested) { var context = new RetryableWriteContext(binding, retryRequested); try { - await context.InitializeAsync(cancellationToken).ConfigureAwait(false); - - ChannelPinningHelper.PinChannellIfRequired( - context.ChannelSource, - context.Channel, - context.Binding.Session); - - return context; + await context.AcquireOrReplaceChannelAsync(operationContext, null).ConfigureAwait(false); } catch { context.Dispose(); throw; } + + ChannelPinningHelper.PinChannellIfRequired(context.ChannelSource, context.Channel, context.Binding.Session); + return context; } #endregion @@ -89,35 +80,64 @@ public RetryableWriteContext(IWriteBinding binding, bool retryRequested) public IChannelSourceHandle ChannelSource => _channelSource; public bool RetryRequested => _retryRequested; - public void DisableRetriesIfAnyWriteRequestIsNotRetryable(IEnumerable<WriteRequest> requests) + public void Dispose() { - if (_retryRequested) + if (!_disposed) { - if (requests.Any(r => !r.IsRetryable(_channel.ConnectionDescription))) + _channelSource?.Dispose(); + _channel?.Dispose(); + _disposed = true; + } + } + + public void AcquireOrReplaceChannel(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) + { + var attempt = 1; + while (true) + { + operationContext.ThrowIfTimedOutOrCanceled(); + ReplaceChannelSource(Binding.GetWriteChannelSource(operationContext, deprioritizedServers)); + var server = ChannelSource.ServerDescription; + try + { + ReplaceChannel(ChannelSource.GetChannel(operationContext)); + return; + } + catch (Exception ex) when (RetryableWriteOperationExecutor.ShouldConnectionAcquireBeRetried(operationContext, this, server, ex, attempt)) { - _retryRequested = false; + attempt++; } } } - public void Dispose() + public async Task AcquireOrReplaceChannelAsync(OperationContext operationContext, IReadOnlyCollection<ServerDescription> deprioritizedServers) { - if (!_disposed) + var attempt = 1; + while (true) { - _channelSource?.Dispose(); - _channel?.Dispose(); - _disposed = true; + operationContext.ThrowIfTimedOutOrCanceled(); + ReplaceChannelSource(await Binding.GetWriteChannelSourceAsync(operationContext, deprioritizedServers).ConfigureAwait(false)); + var server = ChannelSource.ServerDescription; + try + { + ReplaceChannel(await ChannelSource.GetChannelAsync(operationContext).ConfigureAwait(false)); + return; + } + catch (Exception ex) when (RetryableWriteOperationExecutor.ShouldConnectionAcquireBeRetried(operationContext, this, server, ex, attempt)) + { + attempt++; + } } } - public void ReplaceChannel(IChannelHandle channel) + private void ReplaceChannel(IChannelHandle channel) { Ensure.IsNotNull(channel, nameof(channel)); _channel?.Dispose(); _channel = channel; } - public void ReplaceChannelSource(IChannelSourceHandle channelSource) + private void ReplaceChannelSource(IChannelSourceHandle channelSource) { Ensure.IsNotNull(channelSource, nameof(channelSource)); _channelSource?.Dispose(); @@ -125,37 +145,5 @@ public void ReplaceChannelSource(IChannelSourceHandle channelSource) _channelSource = channelSource; _channel = null; } - - private void Initialize(CancellationToken cancellationToken) - { - _channelSource = _binding.GetWriteChannelSource(cancellationToken); - var serverDescription = _channelSource.ServerDescription; - - try - { - _channel = _channelSource.GetChannel(cancellationToken); - } - catch (Exception ex) when (RetryableWriteOperationExecutor.ShouldConnectionAcquireBeRetried(this, serverDescription, ex)) - { - ReplaceChannelSource(_binding.GetWriteChannelSource(cancellationToken)); - ReplaceChannel(_channelSource.GetChannel(cancellationToken)); - } - } - - private async Task InitializeAsync(CancellationToken cancellationToken) - { - _channelSource = await _binding.GetWriteChannelSourceAsync(cancellationToken).ConfigureAwait(false); - var serverDescription = _channelSource.ServerDescription; - - try - { - _channel = await _channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false); - } - catch (Exception ex) when (RetryableWriteOperationExecutor.ShouldConnectionAcquireBeRetried(this, serverDescription, ex)) - { - ReplaceChannelSource(await _binding.GetWriteChannelSourceAsync(cancellationToken).ConfigureAwait(false)); - ReplaceChannel(await _channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)); - } - } } } diff --git a/src/MongoDB.Driver/Core/Operations/RetryableWriteOperationExecutor.cs b/src/MongoDB.Driver/Core/Operations/RetryableWriteOperationExecutor.cs index 6e73af1e758..ff8568bda36 100644 --- a/src/MongoDB.Driver/Core/Operations/RetryableWriteOperationExecutor.cs +++ b/src/MongoDB.Driver/Core/Operations/RetryableWriteOperationExecutor.cs @@ -1,4 +1,4 @@ -/* Copyright 2017-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,10 +14,9 @@ */ using System; -using System.Threading; +using System.Collections.Generic; using System.Threading.Tasks; using MongoDB.Driver.Core.Bindings; -using MongoDB.Driver.Core.Connections; using MongoDB.Driver.Core.Servers; namespace MongoDB.Driver.Core.Operations @@ -25,133 +24,151 @@ namespace MongoDB.Driver.Core.Operations internal static class RetryableWriteOperationExecutor { // public static methods - public static TResult Execute<TResult>(IRetryableWriteOperation<TResult> operation, IWriteBinding binding, bool retryRequested, CancellationToken cancellationToken) + public static TResult Execute<TResult>(OperationContext operationContext, IRetryableWriteOperation<TResult> operation, IWriteBinding binding, bool retryRequested) { - using (var context = RetryableWriteContext.Create(binding, retryRequested, cancellationToken)) + using (var context = RetryableWriteContext.Create(operationContext, binding, retryRequested)) { - return Execute(operation, context, cancellationToken); + return Execute(operationContext, operation, context); } } - public static TResult Execute<TResult>(IRetryableWriteOperation<TResult> operation, RetryableWriteContext context, CancellationToken cancellationToken) + public static TResult Execute<TResult>(OperationContext operationContext, IRetryableWriteOperation<TResult> operation, RetryableWriteContext context) { - if (!AreRetriesAllowed(operation, context)) - { - return operation.ExecuteAttempt(context, 1, null, cancellationToken); - } - - var transactionNumber = context.Binding.Session.AdvanceTransactionNumber(); - Exception originalException; - try - { - return operation.ExecuteAttempt(context, 1, transactionNumber, cancellationToken); - } - catch (Exception ex) when (RetryabilityHelper.IsRetryableWriteException(ex)) - { - originalException = ex; - } - - try - { - context.ReplaceChannelSource(context.Binding.GetWriteChannelSource(new[] { context.ChannelSource.ServerDescription }, cancellationToken)); - context.ReplaceChannel(context.ChannelSource.GetChannel(cancellationToken)); - } - catch - { - throw originalException; - } - - if (!AreRetryableWritesSupported(context.Channel.ConnectionDescription)) - { - throw originalException; + HashSet<ServerDescription> deprioritizedServers = null; + var attempt = 1; + Exception originalException = null; + + long? transactionNumber = AreRetriesAllowed(operation.WriteConcern, context, context.ChannelSource.ServerDescription) ? context.Binding.Session.AdvanceTransactionNumber() : null; + + while (true) // Circle breaking logic based on ShouldRetryOperation method, see the catch block below. + { + operationContext.ThrowIfTimedOutOrCanceled(); + var server = context.ChannelSource.ServerDescription; + try + { + return operation.ExecuteAttempt(operationContext, context, attempt, transactionNumber); + } + catch (Exception ex) + { + if (!ShouldRetryOperation(operationContext, operation.WriteConcern, context, server, ex, attempt)) + { + throw originalException ?? ex; + } + + originalException ??= ex; + } + + deprioritizedServers ??= new HashSet<ServerDescription>(); + deprioritizedServers.Add(server); + + try + { + context.AcquireOrReplaceChannel(operationContext, deprioritizedServers); + } + catch + { + throw originalException; + } + + if (!AreRetryableWritesSupported(context.ChannelSource.ServerDescription)) + { + throw originalException; + } + + attempt++; } + } - try - { - return operation.ExecuteAttempt(context, 2, transactionNumber, cancellationToken); - } - catch (Exception ex) when (ShouldThrowOriginalException(ex)) + public async static Task<TResult> ExecuteAsync<TResult>(OperationContext operationContext, IRetryableWriteOperation<TResult> operation, IWriteBinding binding, bool retryRequested) + { + using (var context = await RetryableWriteContext.CreateAsync(operationContext, binding, retryRequested).ConfigureAwait(false)) { - throw originalException; + return await ExecuteAsync(operationContext, operation, context).ConfigureAwait(false); } } - public async static Task<TResult> ExecuteAsync<TResult>(IRetryableWriteOperation<TResult> operation, IWriteBinding binding, bool retryRequested, CancellationToken cancellationToken) + public static async Task<TResult> ExecuteAsync<TResult>(OperationContext operationContext, IRetryableWriteOperation<TResult> operation, RetryableWriteContext context) { - using (var context = await RetryableWriteContext.CreateAsync(binding, retryRequested, cancellationToken).ConfigureAwait(false)) - { - return await ExecuteAsync(operation, context, cancellationToken).ConfigureAwait(false); + HashSet<ServerDescription> deprioritizedServers = null; + var attempt = 1; + Exception originalException = null; + + long? transactionNumber = AreRetriesAllowed(operation.WriteConcern, context, context.ChannelSource.ServerDescription) ? context.Binding.Session.AdvanceTransactionNumber() : null; + + while (true) // Circle breaking logic based on ShouldRetryOperation method, see the catch block below. + { + operationContext.ThrowIfTimedOutOrCanceled(); + var server = context.ChannelSource.ServerDescription; + try + { + return await operation.ExecuteAttemptAsync(operationContext, context, attempt, transactionNumber).ConfigureAwait(false); + } + catch (Exception ex) + { + if (!ShouldRetryOperation(operationContext, operation.WriteConcern, context, server, ex, attempt)) + { + throw originalException ?? ex; + } + + originalException ??= ex; + } + + deprioritizedServers ??= new HashSet<ServerDescription>(); + deprioritizedServers.Add(server); + + try + { + await context.AcquireOrReplaceChannelAsync(operationContext, deprioritizedServers).ConfigureAwait(false); + } + catch + { + throw originalException; + } + + if (!AreRetryableWritesSupported(context.ChannelSource.ServerDescription)) + { + throw originalException; + } + + attempt++; } } - public static async Task<TResult> ExecuteAsync<TResult>(IRetryableWriteOperation<TResult> operation, RetryableWriteContext context, CancellationToken cancellationToken) + public static bool ShouldConnectionAcquireBeRetried(OperationContext operationContext, RetryableWriteContext context, ServerDescription server, Exception exception, int attempt) { - if (!AreRetriesAllowed(operation, context)) + if (!DoesContextAllowRetries(context, server)) { - return await operation.ExecuteAttemptAsync(context, 1, null, cancellationToken).ConfigureAwait(false); + return false; } - var transactionNumber = context.Binding.Session.AdvanceTransactionNumber(); - Exception originalException; - try - { - return await operation.ExecuteAttemptAsync(context, 1, transactionNumber, cancellationToken).ConfigureAwait(false); - } - catch (Exception ex) when (RetryabilityHelper.IsRetryableWriteException(ex)) + var innerException = exception is MongoAuthenticationException mongoAuthenticationException ? mongoAuthenticationException.InnerException : exception; + // According the spec error during handshake should be handle according to RetryableReads logic + if (!RetryabilityHelper.IsRetryableReadException(innerException)) { - originalException = ex; + return false; } - try - { - context.ReplaceChannelSource(await context.Binding.GetWriteChannelSourceAsync(new[] { context.ChannelSource.ServerDescription }, cancellationToken).ConfigureAwait(false)); - context.ReplaceChannel(await context.ChannelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)); - } - catch - { - throw originalException; - } + return operationContext.IsRootContextTimeoutConfigured() || attempt < 2; + } - if (!AreRetryableWritesSupported(context.Channel.ConnectionDescription)) + // private static methods + private static bool ShouldRetryOperation(OperationContext operationContext, WriteConcern writeConcern, RetryableWriteContext context, ServerDescription server, Exception exception, int attempt) + { + if (!AreRetriesAllowed(writeConcern, context, server)) { - throw originalException; + return false; } - try + if (!RetryabilityHelper.IsRetryableWriteException(exception)) { - return await operation.ExecuteAttemptAsync(context, 2, transactionNumber, cancellationToken).ConfigureAwait(false); + return false; } - catch (Exception ex) when (ShouldThrowOriginalException(ex)) - { - throw originalException; - } - } - - public static bool ShouldConnectionAcquireBeRetried(RetryableWriteContext context, ServerDescription serverDescription, Exception exception) - { - var innerException = exception is MongoAuthenticationException mongoAuthenticationException ? mongoAuthenticationException.InnerException : exception; - // According the spec error during handshake should be handle according to RetryableReads logic - return context.RetryRequested && - AreRetryableWritesSupported(serverDescription) && - context.Binding.Session.Id != null && - !context.Binding.Session.IsInTransaction && - RetryabilityHelper.IsRetryableReadException(innerException); - } - - // private static methods - private static bool AreRetriesAllowed<TResult>(IRetryableWriteOperation<TResult> operation, RetryableWriteContext context) - { - return IsOperationAcknowledged(operation) && DoesContextAllowRetries(context); + return operationContext.IsRootContextTimeoutConfigured() || attempt < 2; } - private static bool AreRetryableWritesSupported(ConnectionDescription connectionDescription) - { - var helloResult = connectionDescription.HelloResult; - return - helloResult.ServerType == ServerType.LoadBalanced || - (helloResult.LogicalSessionTimeout != null && helloResult.ServerType != ServerType.Standalone); - } + private static bool AreRetriesAllowed(WriteConcern writeConcern, RetryableWriteContext context, ServerDescription server) + => IsOperationAcknowledged(writeConcern) && DoesContextAllowRetries(context, server); private static bool AreRetryableWritesSupported(ServerDescription serverDescription) { @@ -159,25 +176,14 @@ private static bool AreRetryableWritesSupported(ServerDescription serverDescript (serverDescription.LogicalSessionTimeout != null && serverDescription.Type != ServerType.Standalone); } - private static bool DoesContextAllowRetries(RetryableWriteContext context) - { - return - context.RetryRequested && - AreRetryableWritesSupported(context.Channel.ConnectionDescription) && - context.Binding.Session.Id != null && - !context.Binding.Session.IsInTransaction; - } - - private static bool IsOperationAcknowledged<TResult>(IRetryableWriteOperation<TResult> operation) - { - var writeConcern = operation.WriteConcern; - return - writeConcern == null || // null means use server default write concern which implies acknowledged - writeConcern.IsAcknowledged; - } + private static bool DoesContextAllowRetries(RetryableWriteContext context, ServerDescription server) + => context.RetryRequested && + AreRetryableWritesSupported(server) && + context.Binding.Session.Id != null && + !context.Binding.Session.IsInTransaction; - private static bool ShouldThrowOriginalException(Exception retryException) => - retryException == null || - retryException is MongoException && !(retryException is MongoConnectionException || retryException is MongoConnectionPoolPausedException); + private static bool IsOperationAcknowledged(WriteConcern writeConcern) + => writeConcern == null || // null means use server default write concern which implies acknowledged + writeConcern.IsAcknowledged; } } diff --git a/src/MongoDB.Driver/Core/Operations/UpdateOpcodeOperation.cs b/src/MongoDB.Driver/Core/Operations/UpdateOpcodeOperation.cs deleted file mode 100644 index 72862d1079f..00000000000 --- a/src/MongoDB.Driver/Core/Operations/UpdateOpcodeOperation.cs +++ /dev/null @@ -1,133 +0,0 @@ -/* Copyright 2010-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System.Threading; -using System.Threading.Tasks; -using MongoDB.Driver.Core.Bindings; -using MongoDB.Driver.Core.Events; -using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.WireProtocol.Messages.Encoders; - -namespace MongoDB.Driver.Core.Operations -{ - internal sealed class UpdateOpcodeOperation : IWriteOperation<WriteConcernResult>, IExecutableInRetryableWriteContext<WriteConcernResult> - { - private bool? _bypassDocumentValidation; - private readonly CollectionNamespace _collectionNamespace; - private int? _maxDocumentSize; - private readonly MessageEncoderSettings _messageEncoderSettings; - private readonly UpdateRequest _request; - private bool _retryRequested; - private WriteConcern _writeConcern = WriteConcern.Acknowledged; - - public UpdateOpcodeOperation( - CollectionNamespace collectionNamespace, - UpdateRequest request, - MessageEncoderSettings messageEncoderSettings) - { - _collectionNamespace = Ensure.IsNotNull(collectionNamespace, nameof(collectionNamespace)); - _request = Ensure.IsNotNull(request, nameof(request)); - _messageEncoderSettings = Ensure.IsNotNull(messageEncoderSettings, nameof(messageEncoderSettings)); - } - - public bool? BypassDocumentValidation - { - get { return _bypassDocumentValidation; } - set { _bypassDocumentValidation = value; } - } - - public CollectionNamespace CollectionNamespace - { - get { return _collectionNamespace; } - } - - public int? MaxDocumentSize - { - get { return _maxDocumentSize; } - set { _maxDocumentSize = Ensure.IsNullOrGreaterThanZero(value, nameof(value)); } - } - - public MessageEncoderSettings MessageEncoderSettings - { - get { return _messageEncoderSettings; } - } - - public UpdateRequest Request - { - get { return _request; } - } - - public bool RetryRequested - { - get { return _retryRequested; } - set { _retryRequested = value; } - } - - public WriteConcern WriteConcern - { - get { return _writeConcern; } - set { _writeConcern = Ensure.IsNotNull(value, nameof(value)); } - } - - public WriteConcernResult Execute(IWriteBinding binding, CancellationToken cancellationToken) - { - Ensure.IsNotNull(binding, nameof(binding)); - - using (var context = RetryableWriteContext.Create(binding, false, cancellationToken)) - { - return Execute(context, cancellationToken); - } - } - - public WriteConcernResult Execute(RetryableWriteContext context, CancellationToken cancellationToken) - { - using (EventContext.BeginOperation()) - { - var emulator = CreateEmulator(); - return emulator.Execute(context, cancellationToken); - } - } - - public async Task<WriteConcernResult> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) - { - Ensure.IsNotNull(binding, nameof(binding)); - - using (var context = await RetryableWriteContext.CreateAsync(binding, false, cancellationToken).ConfigureAwait(false)) - { - return await ExecuteAsync(context, cancellationToken).ConfigureAwait(false); - } - } - - public async Task<WriteConcernResult> ExecuteAsync(RetryableWriteContext context, CancellationToken cancellationToken) - { - using (EventContext.BeginOperation()) - { - var emulator = CreateEmulator(); - return await emulator.ExecuteAsync(context, cancellationToken).ConfigureAwait(false); - } - } - - private UpdateOpcodeOperationEmulator CreateEmulator() - { - return new UpdateOpcodeOperationEmulator(_collectionNamespace, _request, _messageEncoderSettings) - { - BypassDocumentValidation = _bypassDocumentValidation, - MaxDocumentSize = _maxDocumentSize, - RetryRequested = _retryRequested, - WriteConcern = _writeConcern - }; - } - } -} diff --git a/src/MongoDB.Driver/Core/Operations/UpdateOpcodeOperationEmulator.cs b/src/MongoDB.Driver/Core/Operations/UpdateOpcodeOperationEmulator.cs deleted file mode 100644 index 77947fdf372..00000000000 --- a/src/MongoDB.Driver/Core/Operations/UpdateOpcodeOperationEmulator.cs +++ /dev/null @@ -1,168 +0,0 @@ -/* Copyright 2013-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System; -using System.Threading; -using System.Threading.Tasks; -using MongoDB.Driver.Core.Bindings; -using MongoDB.Driver.Core.Connections; -using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.WireProtocol.Messages.Encoders; - -namespace MongoDB.Driver.Core.Operations -{ - internal class UpdateOpcodeOperationEmulator : IExecutableInRetryableWriteContext<WriteConcernResult> - { - // fields - private bool? _bypassDocumentValidation; - private readonly CollectionNamespace _collectionNamespace; - private int? _maxDocumentSize; - private readonly MessageEncoderSettings _messageEncoderSettings; - private readonly UpdateRequest _request; - private bool _retryRequested; - private WriteConcern _writeConcern = WriteConcern.Acknowledged; - - // constructors - public UpdateOpcodeOperationEmulator( - CollectionNamespace collectionNamespace, - UpdateRequest request, - MessageEncoderSettings messageEncoderSettings) - { - _collectionNamespace = Ensure.IsNotNull(collectionNamespace, nameof(collectionNamespace)); - _request = Ensure.IsNotNull(request, nameof(request)); - _messageEncoderSettings = Ensure.IsNotNull(messageEncoderSettings, nameof(messageEncoderSettings)); - } - - // properties - public bool? BypassDocumentValidation - { - get { return _bypassDocumentValidation; } - set { _bypassDocumentValidation = value; } - } - - public CollectionNamespace CollectionNamespace - { - get { return _collectionNamespace; } - } - - /// <summary> - /// Gets or sets the maximum size of a document. - /// </summary> - /// <value> - /// The maximum size of a document. - /// </value> - public int? MaxDocumentSize - { - get { return _maxDocumentSize; } - set { _maxDocumentSize = Ensure.IsNullOrGreaterThanZero(value, nameof(value)); } - } - - public MessageEncoderSettings MessageEncoderSettings - { - get { return _messageEncoderSettings; } - } - - public UpdateRequest Request - { - get { return _request; } - } - - public bool RetryRequested - { - get { return _retryRequested; } - set { _retryRequested = value; } - } - - public WriteConcern WriteConcern - { - get { return _writeConcern; } - set { _writeConcern = Ensure.IsNotNull(value, nameof(value)); } - } - - // public methods - public WriteConcernResult Execute(RetryableWriteContext context, CancellationToken cancellationToken) - { - Ensure.IsNotNull(context, nameof(context)); - - var operation = CreateOperation(); - BulkWriteOperationResult result; - MongoBulkWriteOperationException exception = null; - try - { - result = operation.Execute(context, cancellationToken); - } - catch (MongoBulkWriteOperationException ex) - { - result = ex.Result; - exception = ex; - } - - return CreateResultOrThrow(context.Channel, result, exception); - } - - public async Task<WriteConcernResult> ExecuteAsync(RetryableWriteContext context, CancellationToken cancellationToken) - { - Ensure.IsNotNull(context, nameof(context)); - - var operation = CreateOperation(); - BulkWriteOperationResult result; - MongoBulkWriteOperationException exception = null; - try - { - result = await operation.ExecuteAsync(context, cancellationToken).ConfigureAwait(false); - } - catch (MongoBulkWriteOperationException ex) - { - result = ex.Result; - exception = ex; - } - - return CreateResultOrThrow(context.Channel, result, exception); - } - - // private methods - private BulkUpdateOperation CreateOperation() - { - var requests = new[] { _request }; - return new BulkUpdateOperation(_collectionNamespace, requests, _messageEncoderSettings) - { - BypassDocumentValidation = _bypassDocumentValidation, - IsOrdered = true, - RetryRequested = _retryRequested, - WriteConcern = _writeConcern - }; - } - - private WriteConcernResult CreateResultOrThrow(IChannelHandle channel, BulkWriteOperationResult result, MongoBulkWriteOperationException exception) - { - var converter = new BulkWriteOperationResultConverter(); - if (exception != null) - { - throw converter.ToWriteConcernException(channel.ConnectionDescription.ConnectionId, exception); - } - else - { - if (_writeConcern.IsAcknowledged) - { - return converter.ToWriteConcernResult(result); - } - else - { - return null; - } - } - } - } -} diff --git a/src/MongoDB.Driver/Core/Operations/UpdateRequest.cs b/src/MongoDB.Driver/Core/Operations/UpdateRequest.cs index 88810698d4c..af885a697be 100644 --- a/src/MongoDB.Driver/Core/Operations/UpdateRequest.cs +++ b/src/MongoDB.Driver/Core/Operations/UpdateRequest.cs @@ -16,7 +16,6 @@ using System; using System.Collections.Generic; using MongoDB.Bson; -using MongoDB.Driver.Core.Connections; using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver.Core.Operations @@ -44,7 +43,7 @@ public UpdateRequest(UpdateType updateType, BsonDocument filter, BsonValue updat public UpdateType UpdateType { get; init; } // public methods - public override bool IsRetryable(ConnectionDescription connectionDescription) => !IsMulti; + public override bool IsRetryable() => !IsMulti; // private methods private static BsonValue EnsureUpdateIsValid(BsonValue update, UpdateType updateType) diff --git a/src/MongoDB.Driver/Core/Operations/UpdateSearchIndexOperation.cs b/src/MongoDB.Driver/Core/Operations/UpdateSearchIndexOperation.cs index 90657153792..09496dd9d3b 100644 --- a/src/MongoDB.Driver/Core/Operations/UpdateSearchIndexOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/UpdateSearchIndexOperation.cs @@ -13,7 +13,6 @@ * limitations under the License. */ -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -44,27 +43,27 @@ public UpdateSearchIndexOperation( _messageEncoderSettings = Ensure.IsNotNull(messageEncoderSettings, nameof(messageEncoderSettings)); } - public BsonDocument Execute(IWriteBinding binding, CancellationToken cancellationToken) + public BsonDocument Execute(OperationContext operationContext, IWriteBinding binding) { using (EventContext.BeginOperation("updateSearchIndex")) - using (var channelSource = binding.GetWriteChannelSource(cancellationToken)) - using (var channel = channelSource.GetChannel(cancellationToken)) + using (var channelSource = binding.GetWriteChannelSource(operationContext)) + using (var channel = channelSource.GetChannel(operationContext)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { var operation = CreateOperation(); - return operation.Execute(channelBinding, cancellationToken); + return operation.Execute(operationContext, channelBinding); } } - public async Task<BsonDocument> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken) + public async Task<BsonDocument> ExecuteAsync(OperationContext operationContext, IWriteBinding binding) { using (EventContext.BeginOperation("updateSearchIndex")) - using (var channelSource = await binding.GetWriteChannelSourceAsync(cancellationToken).ConfigureAwait(false)) - using (var channel = await channelSource.GetChannelAsync(cancellationToken).ConfigureAwait(false)) + using (var channelSource = await binding.GetWriteChannelSourceAsync(operationContext).ConfigureAwait(false)) + using (var channel = await channelSource.GetChannelAsync(operationContext).ConfigureAwait(false)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, binding.Session.Fork())) { var operation = CreateOperation(); - return await operation.ExecuteAsync(channelBinding, cancellationToken).ConfigureAwait(false); + return await operation.ExecuteAsync(operationContext, channelBinding).ConfigureAwait(false); } } diff --git a/src/MongoDB.Driver/Core/Operations/WriteCommandOperation.cs b/src/MongoDB.Driver/Core/Operations/WriteCommandOperation.cs index 183efdf9888..033db2d0eae 100644 --- a/src/MongoDB.Driver/Core/Operations/WriteCommandOperation.cs +++ b/src/MongoDB.Driver/Core/Operations/WriteCommandOperation.cs @@ -13,7 +13,6 @@ * limitations under the License. */ -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.Serialization; @@ -39,25 +38,25 @@ public ReadPreference ReadPreference set => _readPreference = Ensure.IsNotNull(value, nameof(value)); } - public TCommandResult Execute(IWriteBinding binding, CancellationToken cancellationToken) + public TCommandResult Execute(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (EventContext.BeginOperation()) - using (var channelSource = binding.GetWriteChannelSource(cancellationToken)) + using (var channelSource = binding.GetWriteChannelSource(operationContext)) { - return ExecuteProtocol(channelSource, binding.Session, _readPreference, cancellationToken); + return ExecuteProtocol(operationContext, channelSource, binding.Session, _readPreference); } } - public async Task<TCommandResult> ExecuteAsync(IWriteBinding binding, CancellationToken cancellationToken = default(CancellationToken)) + public async Task<TCommandResult> ExecuteAsync(OperationContext operationContext, IWriteBinding binding) { Ensure.IsNotNull(binding, nameof(binding)); using (EventContext.BeginOperation()) - using (var channelSource = await binding.GetWriteChannelSourceAsync(cancellationToken).ConfigureAwait(false)) + using (var channelSource = await binding.GetWriteChannelSourceAsync(operationContext).ConfigureAwait(false)) { - return await ExecuteProtocolAsync(channelSource, binding.Session, _readPreference, cancellationToken).ConfigureAwait(false); + return await ExecuteProtocolAsync(operationContext, channelSource, binding.Session, _readPreference).ConfigureAwait(false); } } } diff --git a/src/MongoDB.Driver/Core/Operations/WriteConcernHelper.cs b/src/MongoDB.Driver/Core/Operations/WriteConcernHelper.cs index 44df6f8e231..345effcfc5d 100644 --- a/src/MongoDB.Driver/Core/Operations/WriteConcernHelper.cs +++ b/src/MongoDB.Driver/Core/Operations/WriteConcernHelper.cs @@ -1,4 +1,4 @@ -/* Copyright 2018-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,11 +20,19 @@ namespace MongoDB.Driver.Core.Operations { internal static class WriteConcernHelper { - public static BsonDocument GetEffectiveWriteConcern(ICoreSession session, WriteConcern writeConcern) + public static BsonDocument GetEffectiveWriteConcern(OperationContext operationContext, ICoreSession session, WriteConcern writeConcern) { - if (!session.IsInTransaction && writeConcern != null && !writeConcern.IsServerDefault) + if (writeConcern != null) { - return writeConcern.ToBsonDocument(); + if (operationContext.IsRootContextTimeoutConfigured()) + { + writeConcern = writeConcern.With(wTimeout: null); + } + + if (!session.IsInTransaction && !writeConcern.IsServerDefault) + { + return writeConcern.ToBsonDocument(); + } } return null; diff --git a/src/MongoDB.Driver/Core/Operations/WriteRequest.cs b/src/MongoDB.Driver/Core/Operations/WriteRequest.cs index 104d525c574..1da745c87f0 100644 --- a/src/MongoDB.Driver/Core/Operations/WriteRequest.cs +++ b/src/MongoDB.Driver/Core/Operations/WriteRequest.cs @@ -13,9 +13,6 @@ * limitations under the License. */ -using System; -using MongoDB.Driver.Core.Connections; - namespace MongoDB.Driver.Core.Operations { internal abstract class WriteRequest @@ -31,6 +28,6 @@ protected WriteRequest(WriteRequestType requestType) public WriteRequestType RequestType { get; init; } // public methods - public abstract bool IsRetryable(ConnectionDescription connectionDescription); + public abstract bool IsRetryable(); } } diff --git a/src/MongoDB.Driver/Core/ReadConcernLevel.cs b/src/MongoDB.Driver/Core/ReadConcernLevel.cs index 699ea4511de..b8ff17e2e75 100644 --- a/src/MongoDB.Driver/Core/ReadConcernLevel.cs +++ b/src/MongoDB.Driver/Core/ReadConcernLevel.cs @@ -36,10 +36,10 @@ public enum ReadConcernLevel Majority, /// <summary> - /// Avoids returning data from a "stale" primary - /// (one that has already been superseded by a new primary but doesn't know it yet). - /// It is important to note that readConcern level linearizable does not by itself - /// produce linearizable reads; they must be issued in conjunction with w:majority + /// Avoids returning data from a "stale" primary + /// (one that has already been superseded by a new primary but doesn't know it yet). + /// It is important to note that readConcern level linearizable does not by itself + /// produce linearizable reads; they must be issued in conjunction with w:majority /// writes to the same document(s) in order to be linearizable. /// </summary> Linearizable, diff --git a/src/MongoDB.Driver/Core/Servers/IServer.cs b/src/MongoDB.Driver/Core/Servers/IServer.cs index bbd9e0cbb80..9010405d1b3 100644 --- a/src/MongoDB.Driver/Core/Servers/IServer.cs +++ b/src/MongoDB.Driver/Core/Servers/IServer.cs @@ -15,9 +15,11 @@ using System; using System.Net; -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Bindings; +using MongoDB.Driver.Core.Clusters; +using MongoDB.Driver.Core.ConnectionPools; +using MongoDB.Driver.Core.Connections; namespace MongoDB.Driver.Core.Servers { @@ -25,16 +27,21 @@ internal interface IServer { event EventHandler<ServerDescriptionChangedEventArgs> DescriptionChanged; + IClusterClock ClusterClock { get; } ServerDescription Description { get; } EndPoint EndPoint { get; } ServerId ServerId { get; } + ServerApi ServerApi { get; } - IChannelHandle GetChannel(CancellationToken cancellationToken); - Task<IChannelHandle> GetChannelAsync(CancellationToken cancellationToken); + void DecrementOutstandingOperationsCount(); + IChannelHandle GetChannel(OperationContext operationContext); + Task<IChannelHandle> GetChannelAsync(OperationContext operationContext); + void HandleChannelException(IConnectionHandle connection, Exception exception); } internal interface IClusterableServer : IServer, IDisposable { + IConnectionPool ConnectionPool { get; } bool IsInitialized { get; } int OutstandingOperationsCount { get; } @@ -42,4 +49,9 @@ internal interface IClusterableServer : IServer, IDisposable void Invalidate(string reasonInvalidated, TopologyVersion responseTopologyVersion); void RequestHeartbeat(); } + + internal interface ISelectedServer : IServer + { + ServerDescription DescriptionWhenSelected { get; } + } } diff --git a/src/MongoDB.Driver/Core/Servers/RoundTripTimeMonitor.cs b/src/MongoDB.Driver/Core/Servers/RoundTripTimeMonitor.cs index 23a306a9dcf..0521743d5ae 100644 --- a/src/MongoDB.Driver/Core/Servers/RoundTripTimeMonitor.cs +++ b/src/MongoDB.Driver/Core/Servers/RoundTripTimeMonitor.cs @@ -39,6 +39,7 @@ internal sealed class RoundTripTimeMonitor : IRoundTripTimeMonitor private readonly CancellationToken _cancellationToken; private readonly CancellationTokenSource _cancellationTokenSource; private readonly IConnectionFactory _connectionFactory; + private readonly TimeSpan _connectTimeout; private bool _disposed; private readonly EndPoint _endPoint; private readonly TimeSpan _heartbeatInterval; @@ -54,6 +55,7 @@ public RoundTripTimeMonitor( ServerId serverId, EndPoint endpoint, TimeSpan heartbeatInterval, + TimeSpan connectTimeout, ServerApi serverApi, ILogger<RoundTripTimeMonitor> logger) { @@ -61,6 +63,7 @@ public RoundTripTimeMonitor( _serverId = Ensure.IsNotNull(serverId, nameof(serverId)); _endPoint = Ensure.IsNotNull(endpoint, nameof(endpoint)); _heartbeatInterval = heartbeatInterval; + _connectTimeout = connectTimeout; _serverApi = serverApi; _cancellationTokenSource = new CancellationTokenSource(); _cancellationToken = _cancellationTokenSource.Token; @@ -122,13 +125,15 @@ private void MonitorServer() _logger?.LogDebug(_serverId, "Monitoring started"); var helloOk = false; - while (!_cancellationToken.IsCancellationRequested) + using var operationContext = new OperationContext(null, _cancellationToken); + while (!operationContext.IsCancelledOrTimedOut()) { try { if (_roundTripTimeConnection == null) { - InitializeConnection(); // sets _roundTripTimeConnection + using var connectOperationContext = operationContext.WithTimeout(_connectTimeout); + InitializeConnection(operationContext); // sets _roundTripTimeConnection } else { @@ -136,7 +141,7 @@ private void MonitorServer() var helloProtocol = HelloHelper.CreateProtocol(helloCommand, _serverApi); var stopwatch = Stopwatch.StartNew(); - var helloResult = HelloHelper.GetResult(_roundTripTimeConnection, helloProtocol, _cancellationToken); + var helloResult = HelloHelper.GetResult(operationContext, _roundTripTimeConnection, helloProtocol); stopwatch.Stop(); AddSample(stopwatch.Elapsed); helloOk = helloResult.HelloOk; @@ -159,23 +164,23 @@ private void MonitorServer() } } - private void InitializeConnection() + private void InitializeConnection(OperationContext operationContext) { - _cancellationToken.ThrowIfCancellationRequested(); + operationContext.ThrowIfTimedOutOrCanceled(); var roundTripTimeConnection = _connectionFactory.CreateConnection(_serverId, _endPoint); var stopwatch = Stopwatch.StartNew(); try { + roundTripTimeConnection.Open(operationContext); // if we are cancelling, it's because the server has // been shut down and we really don't need to wait. - roundTripTimeConnection.Open(_cancellationToken); - _cancellationToken.ThrowIfCancellationRequested(); + operationContext.CancellationToken.ThrowIfCancellationRequested(); } catch { - // dispose it here because the _connection is not initialized yet + // dispose it here because the _roundTripTimeConnection is not initialized yet try { roundTripTimeConnection.Dispose(); } catch { } throw; } diff --git a/src/MongoDB.Driver/Core/Servers/SelectedServer.cs b/src/MongoDB.Driver/Core/Servers/SelectedServer.cs new file mode 100644 index 00000000000..c0e8b6edbeb --- /dev/null +++ b/src/MongoDB.Driver/Core/Servers/SelectedServer.cs @@ -0,0 +1,65 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using System.Net; +using System.Threading.Tasks; +using MongoDB.Driver.Core.Bindings; +using MongoDB.Driver.Core.Clusters; +using MongoDB.Driver.Core.Connections; + +namespace MongoDB.Driver.Core.Servers; + +internal class SelectedServer : ISelectedServer +{ + private readonly ServerDescription _descriptionWhenSelected; + private readonly IServer _server; + + public SelectedServer(IServer server, ServerDescription descriptionWhenSelected) + { + _server = server; + _descriptionWhenSelected = descriptionWhenSelected; + } + + public event EventHandler<ServerDescriptionChangedEventArgs> DescriptionChanged + { + add { _server.DescriptionChanged += value; } + remove => _server.DescriptionChanged -= value; + } + + public IClusterClock ClusterClock => _server.ClusterClock; + public ServerDescription Description => _server.Description; + public EndPoint EndPoint => _server.EndPoint; + public ServerId ServerId => _server.ServerId; + public ServerApi ServerApi => _server.ServerApi; + public ServerDescription DescriptionWhenSelected => _descriptionWhenSelected; + + public void DecrementOutstandingOperationsCount() + => _server.DecrementOutstandingOperationsCount(); + + public IChannelHandle GetChannel(OperationContext operationContext) + { + var channel = _server.GetChannel(operationContext); + return new ServerChannel(this, channel.Connection); + } + + public async Task<IChannelHandle> GetChannelAsync(OperationContext operationContext) + { + var channel = await _server.GetChannelAsync(operationContext).ConfigureAwait(false); + return new ServerChannel(this, channel.Connection); + } + + public void HandleChannelException(IConnectionHandle channel, Exception exception) => _server.HandleChannelException(channel, exception); +} diff --git a/src/MongoDB.Driver/Core/Servers/Server.cs b/src/MongoDB.Driver/Core/Servers/Server.cs index e428c50212c..50c1710e566 100644 --- a/src/MongoDB.Driver/Core/Servers/Server.cs +++ b/src/MongoDB.Driver/Core/Servers/Server.cs @@ -14,14 +14,10 @@ */ using System; -using System.Collections.Generic; using System.Diagnostics; using System.Net; using System.Threading; using System.Threading.Tasks; -using MongoDB.Bson; -using MongoDB.Bson.IO; -using MongoDB.Bson.Serialization; using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Clusters; using MongoDB.Driver.Core.Configuration; @@ -30,9 +26,6 @@ using MongoDB.Driver.Core.Events; using MongoDB.Driver.Core.Logging; using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.WireProtocol; -using MongoDB.Driver.Core.WireProtocol.Messages; -using MongoDB.Driver.Core.WireProtocol.Messages.Encoders; namespace MongoDB.Driver.Core.Servers { @@ -82,6 +75,7 @@ public Server( public abstract ServerDescription Description { get; } public EndPoint EndPoint => _endPoint; public bool IsInitialized => _state.Value != State.Initial; + public ServerApi ServerApi => _serverApi; public ServerId ServerId => _serverId; protected EventLogger<LogCategories.SDAM> EventLogger => _eventLogger; @@ -104,18 +98,49 @@ public void Dispose() } } + public void DecrementOutstandingOperationsCount() + { + Interlocked.Decrement(ref _outstandingOperationsCount); + } + + public void HandleChannelException(IConnectionHandle connection, Exception ex) + { + if (!IsOpen() || ShouldIgnoreException(ex)) + { + return; + } + + ex = GetEffectiveException(ex); + + HandleAfterHandshakeCompletesException(connection, ex); + + bool ShouldIgnoreException(Exception ex) + { + // For most connection exceptions, we are going to immediately + // invalidate the server. However, we aren't going to invalidate + // because of OperationCanceledExceptions. We trust that the + // implementations of connection don't leave themselves in a state + // where they can't be used based on user cancellation. + return ex is OperationCanceledException; + } + + Exception GetEffectiveException(Exception ex) => + ex is AggregateException aggregateException && aggregateException.InnerExceptions.Count == 1 + ? aggregateException.InnerException + : ex; + } + public void HandleExceptionOnOpen(Exception exception) => HandleBeforeHandshakeCompletesException(exception); - public IChannelHandle GetChannel(CancellationToken cancellationToken) + public IChannelHandle GetChannel(OperationContext operationContext) { ThrowIfNotOpen(); try { Interlocked.Increment(ref _outstandingOperationsCount); - - var connection = _connectionPool.AcquireConnection(cancellationToken); + var connection = _connectionPool.AcquireConnection(operationContext); return new ServerChannel(this, connection); } catch @@ -126,14 +151,14 @@ public IChannelHandle GetChannel(CancellationToken cancellationToken) } } - public async Task<IChannelHandle> GetChannelAsync(CancellationToken cancellationToken) + public async Task<IChannelHandle> GetChannelAsync(OperationContext operationContext) { ThrowIfNotOpen(); try { Interlocked.Increment(ref _outstandingOperationsCount); - var connection = await _connectionPool.AcquireConnectionAsync(cancellationToken).ConfigureAwait(false); + var connection = await _connectionPool.AcquireConnectionAsync(operationContext).ConfigureAwait(false); return new ServerChannel(this, connection); } catch @@ -173,7 +198,6 @@ public void Invalidate(string reasonInvalidated, TopologyVersion responseTopolog public abstract void RequestHeartbeat(); // protected methods - protected abstract void Invalidate(string reasonInvalidated, bool clearConnectionPool, TopologyVersion responseTopologyDescription); protected abstract void Dispose(bool disposing); @@ -222,33 +246,6 @@ protected bool ShouldClearConnectionPoolForChannelException(Exception ex, int ma } // private methods - private void HandleChannelException(IConnection connection, Exception ex) - { - if (!IsOpen() || ShouldIgnoreException(ex)) - { - return; - } - - ex = GetEffectiveException(ex); - - HandleAfterHandshakeCompletesException(connection, ex); - - bool ShouldIgnoreException(Exception ex) - { - // For most connection exceptions, we are going to immediately - // invalidate the server. However, we aren't going to invalidate - // because of OperationCanceledExceptions. We trust that the - // implementations of connection don't leave themselves in a state - // where they can't be used based on user cancellation. - return ex is OperationCanceledException; - } - - Exception GetEffectiveException(Exception ex) => - ex is AggregateException aggregateException && aggregateException.InnerExceptions.Count == 1 - ? aggregateException.InnerException - : ex; - } - private bool IsOpen() => _state.Value == State.Open; private void ThrowIfDisposed() @@ -275,360 +272,5 @@ private static class State public const int Open = 1; public const int Disposed = 2; } - - private sealed class ServerChannel : IChannelHandle - { - // fields - private readonly IConnectionHandle _connection; - private readonly Server _server; - - private readonly InterlockedInt32 _state; - private readonly bool _decrementOperationsCount; - - // constructors - public ServerChannel(Server server, IConnectionHandle connection, bool decrementOperationsCount = true) - { - _server = server; - _connection = connection; - - _state = new InterlockedInt32(ChannelState.Initial); - _decrementOperationsCount = decrementOperationsCount; - } - - // properties - public IConnectionHandle Connection => _connection; - - public ConnectionDescription ConnectionDescription - { - get { return _connection.Description; } - } - - // methods - public TResult Command<TResult>( - ICoreSession session, - ReadPreference readPreference, - DatabaseNamespace databaseNamespace, - BsonDocument command, - IEnumerable<BatchableCommandMessageSection> commandPayloads, - IElementNameValidator commandValidator, - BsonDocument additionalOptions, - Action<IMessageEncoderPostProcessor> postWriteAction, - CommandResponseHandling responseHandling, - IBsonSerializer<TResult> resultSerializer, - MessageEncoderSettings messageEncoderSettings, - CancellationToken cancellationToken) - { - var protocol = new CommandWireProtocol<TResult>( - CreateClusterClockAdvancingCoreSession(session), - readPreference, - databaseNamespace, - command, - commandPayloads, - commandValidator, - additionalOptions, - postWriteAction, - responseHandling, - resultSerializer, - messageEncoderSettings, - _server._serverApi); - - return ExecuteProtocol(protocol, session, cancellationToken); - } - - public Task<TResult> CommandAsync<TResult>( - ICoreSession session, - ReadPreference readPreference, - DatabaseNamespace databaseNamespace, - BsonDocument command, - IEnumerable<BatchableCommandMessageSection> commandPayloads, - IElementNameValidator commandValidator, - BsonDocument additionalOptions, - Action<IMessageEncoderPostProcessor> postWriteAction, - CommandResponseHandling responseHandling, - IBsonSerializer<TResult> resultSerializer, - MessageEncoderSettings messageEncoderSettings, - CancellationToken cancellationToken) - { - var protocol = new CommandWireProtocol<TResult>( - CreateClusterClockAdvancingCoreSession(session), - readPreference, - databaseNamespace, - command, - commandPayloads, - commandValidator, - additionalOptions, - postWriteAction, - responseHandling, - resultSerializer, - messageEncoderSettings, - _server._serverApi); - - return ExecuteProtocolAsync(protocol, session, cancellationToken); - } - - public void Dispose() - { - if (_state.TryChange(ChannelState.Initial, ChannelState.Disposed)) - { - if (_decrementOperationsCount) - { - Interlocked.Decrement(ref _server._outstandingOperationsCount); - } - - _connection.Dispose(); - } - } - - public CursorBatch<TDocument> Query<TDocument>( - CollectionNamespace collectionNamespace, - BsonDocument query, - BsonDocument fields, - IElementNameValidator queryValidator, - int skip, - int batchSize, - bool secondaryOk, - bool partialOk, - bool noCursorTimeout, - bool tailableCursor, - bool awaitData, - IBsonSerializer<TDocument> serializer, - MessageEncoderSettings messageEncoderSettings, - CancellationToken cancellationToken) - { -#pragma warning disable 618 - return Query( - collectionNamespace, - query, - fields, - queryValidator, - skip, - batchSize, - secondaryOk, - partialOk, - noCursorTimeout, - oplogReplay: false, - tailableCursor, - awaitData, - serializer, - messageEncoderSettings, - cancellationToken); -#pragma warning restore 618 - } - - [Obsolete("Use the newest overload instead.")] - public CursorBatch<TDocument> Query<TDocument>( - CollectionNamespace collectionNamespace, - BsonDocument query, - BsonDocument fields, - IElementNameValidator queryValidator, - int skip, - int batchSize, - bool secondaryOk, - bool partialOk, - bool noCursorTimeout, - bool oplogReplay, - bool tailableCursor, - bool awaitData, - IBsonSerializer<TDocument> serializer, - MessageEncoderSettings messageEncoderSettings, - CancellationToken cancellationToken) - { - secondaryOk = GetEffectiveSecondaryOk(secondaryOk); -#pragma warning disable 618 - var protocol = new QueryWireProtocol<TDocument>( - collectionNamespace, - query, - fields, - queryValidator, - skip, - batchSize, - secondaryOk, - partialOk, - noCursorTimeout, - oplogReplay, - tailableCursor, - awaitData, - serializer, - messageEncoderSettings); -#pragma warning restore 618 - - return ExecuteProtocol(protocol, cancellationToken); - } - - public Task<CursorBatch<TDocument>> QueryAsync<TDocument>( - CollectionNamespace collectionNamespace, - BsonDocument query, - BsonDocument fields, - IElementNameValidator queryValidator, - int skip, - int batchSize, - bool secondaryOk, - bool partialOk, - bool noCursorTimeout, - bool tailableCursor, - bool awaitData, - IBsonSerializer<TDocument> serializer, - MessageEncoderSettings messageEncoderSettings, - CancellationToken cancellationToken) - { -#pragma warning disable 618 - return QueryAsync( - collectionNamespace, - query, - fields, - queryValidator, - skip, - batchSize, - secondaryOk, - partialOk, - noCursorTimeout, - oplogReplay: false, - tailableCursor, - awaitData, - serializer, - messageEncoderSettings, - cancellationToken); -#pragma warning restore 618 - } - - [Obsolete("Use the newest overload instead.")] - public Task<CursorBatch<TDocument>> QueryAsync<TDocument>( - CollectionNamespace collectionNamespace, - BsonDocument query, - BsonDocument fields, - IElementNameValidator queryValidator, - int skip, - int batchSize, - bool secondaryOk, - bool partialOk, - bool noCursorTimeout, - bool oplogReplay, - bool tailableCursor, - bool awaitData, - IBsonSerializer<TDocument> serializer, - MessageEncoderSettings messageEncoderSettings, - CancellationToken cancellationToken) - { - secondaryOk = GetEffectiveSecondaryOk(secondaryOk); -#pragma warning disable 618 - var protocol = new QueryWireProtocol<TDocument>( - collectionNamespace, - query, - fields, - queryValidator, - skip, - batchSize, - secondaryOk, - partialOk, - noCursorTimeout, - oplogReplay, - tailableCursor, - awaitData, - serializer, - messageEncoderSettings); -#pragma warning restore 618 - - return ExecuteProtocolAsync(protocol, cancellationToken); - } - - private ICoreSession CreateClusterClockAdvancingCoreSession(ICoreSession session) - { - return new ClusterClockAdvancingCoreSession(session, _server.ClusterClock); - } - - private TResult ExecuteProtocol<TResult>(IWireProtocol<TResult> protocol, CancellationToken cancellationToken) - { - try - { - return protocol.Execute(_connection, cancellationToken); - } - catch (Exception ex) - { - _server.HandleChannelException(_connection, ex); - throw; - } - } - - private TResult ExecuteProtocol<TResult>(IWireProtocol<TResult> protocol, ICoreSession session, CancellationToken cancellationToken) - { - try - { - return protocol.Execute(_connection, cancellationToken); - } - catch (Exception ex) - { - MarkSessionDirtyIfNeeded(session, ex); - _server.HandleChannelException(_connection, ex); - throw; - } - } - - private async Task<TResult> ExecuteProtocolAsync<TResult>(IWireProtocol<TResult> protocol, CancellationToken cancellationToken) - { - try - { - return await protocol.ExecuteAsync(_connection, cancellationToken).ConfigureAwait(false); - } - catch (Exception ex) - { - _server.HandleChannelException(_connection, ex); - throw; - } - } - - private async Task<TResult> ExecuteProtocolAsync<TResult>(IWireProtocol<TResult> protocol, ICoreSession session, CancellationToken cancellationToken) - { - try - { - return await protocol.ExecuteAsync(_connection, cancellationToken).ConfigureAwait(false); - } - catch (Exception ex) - { - MarkSessionDirtyIfNeeded(session, ex); - _server.HandleChannelException(_connection, ex); - throw; - } - } - - public IChannelHandle Fork() - { - ThrowIfDisposed(); - - return new ServerChannel(_server, _connection.Fork(), false); - } - - private bool GetEffectiveSecondaryOk(bool secondaryOk) - { - if (_server.DirectConnection && _server.Description.Type != ServerType.ShardRouter) - { - return true; - } - - return secondaryOk; - } - - private void MarkSessionDirtyIfNeeded(ICoreSession session, Exception ex) - { - if (ex is MongoConnectionException) - { - session.MarkDirty(); - } - } - - private void ThrowIfDisposed() - { - if (_state.Value == ChannelState.Disposed) - { - throw new ObjectDisposedException(GetType().Name); - } - } - - // nested types - private static class ChannelState - { - public const int Initial = 0; - public const int Disposed = 1; - } - } } } diff --git a/src/MongoDB.Driver/Core/Servers/ServerChannel.cs b/src/MongoDB.Driver/Core/Servers/ServerChannel.cs new file mode 100644 index 00000000000..bf0ce569e87 --- /dev/null +++ b/src/MongoDB.Driver/Core/Servers/ServerChannel.cs @@ -0,0 +1,206 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using MongoDB.Bson; +using MongoDB.Bson.IO; +using MongoDB.Bson.Serialization; +using MongoDB.Driver.Core.Bindings; +using MongoDB.Driver.Core.Connections; +using MongoDB.Driver.Core.Misc; +using MongoDB.Driver.Core.WireProtocol; +using MongoDB.Driver.Core.WireProtocol.Messages; +using MongoDB.Driver.Core.WireProtocol.Messages.Encoders; + +namespace MongoDB.Driver.Core.Servers +{ + internal sealed class ServerChannel : IChannelHandle + { + // fields + private readonly IConnectionHandle _connection; + private readonly IServer _server; + private readonly InterlockedInt32 _state; + private readonly bool _decrementOperationsCount; + + // constructors + public ServerChannel(IServer server, IConnectionHandle connection, bool decrementOperationsCount = true) + { + _server = server; + _connection = connection; + _state = new InterlockedInt32(ChannelState.Initial); + _decrementOperationsCount = decrementOperationsCount; + } + + // properties + public IConnectionHandle Connection => _connection; + + public ConnectionDescription ConnectionDescription => _connection.Description; + + // methods + public TResult Command<TResult>( + OperationContext operationContext, + ICoreSession session, + ReadPreference readPreference, + DatabaseNamespace databaseNamespace, + BsonDocument command, + IEnumerable<BatchableCommandMessageSection> commandPayloads, + IElementNameValidator commandValidator, + BsonDocument additionalOptions, + Action<IMessageEncoderPostProcessor> postWriteAction, + CommandResponseHandling responseHandling, + IBsonSerializer<TResult> resultSerializer, + MessageEncoderSettings messageEncoderSettings) + { + var roundTripTime = TimeSpan.Zero; + if (_server is ISelectedServer selectedServer) + { + roundTripTime = selectedServer.DescriptionWhenSelected.AverageRoundTripTime; + } + + var protocol = new CommandWireProtocol<TResult>( + CreateClusterClockAdvancingCoreSession(session), + readPreference, + databaseNamespace, + command, + commandPayloads, + commandValidator, + additionalOptions, + postWriteAction, + responseHandling, + resultSerializer, + messageEncoderSettings, + _server.ServerApi, + roundTripTime); + + return ExecuteProtocol(operationContext, protocol, session); + } + + public Task<TResult> CommandAsync<TResult>( + OperationContext operationContext, + ICoreSession session, + ReadPreference readPreference, + DatabaseNamespace databaseNamespace, + BsonDocument command, + IEnumerable<BatchableCommandMessageSection> commandPayloads, + IElementNameValidator commandValidator, + BsonDocument additionalOptions, + Action<IMessageEncoderPostProcessor> postWriteAction, + CommandResponseHandling responseHandling, + IBsonSerializer<TResult> resultSerializer, + MessageEncoderSettings messageEncoderSettings) + { + var roundTripTime = TimeSpan.Zero; + if (_server is ISelectedServer selectedServer) + { + roundTripTime = selectedServer.DescriptionWhenSelected.AverageRoundTripTime; + } + + var protocol = new CommandWireProtocol<TResult>( + CreateClusterClockAdvancingCoreSession(session), + readPreference, + databaseNamespace, + command, + commandPayloads, + commandValidator, + additionalOptions, + postWriteAction, + responseHandling, + resultSerializer, + messageEncoderSettings, + _server.ServerApi, + roundTripTime); + + return ExecuteProtocolAsync(operationContext, protocol, session); + } + + public void Dispose() + { + if (_state.TryChange(ChannelState.Initial, ChannelState.Disposed)) + { + if (_decrementOperationsCount) + { + _server.DecrementOutstandingOperationsCount(); + } + + _connection.Dispose(); + } + } + + private ICoreSession CreateClusterClockAdvancingCoreSession(ICoreSession session) + { + return new ClusterClockAdvancingCoreSession(session, _server.ClusterClock); + } + + private TResult ExecuteProtocol<TResult>(OperationContext operationContext, IWireProtocol<TResult> protocol, ICoreSession session) + { + try + { + return protocol.Execute(operationContext, _connection); + } + catch (Exception ex) + { + MarkSessionDirtyIfNeeded(session, ex); + _server.HandleChannelException(_connection, ex); + throw; + } + } + + private async Task<TResult> ExecuteProtocolAsync<TResult>(OperationContext operationContext, IWireProtocol<TResult> protocol, ICoreSession session) + { + try + { + return await protocol.ExecuteAsync(operationContext, _connection).ConfigureAwait(false); + } + catch (Exception ex) + { + MarkSessionDirtyIfNeeded(session, ex); + _server.HandleChannelException(_connection, ex); + throw; + } + } + + public IChannelHandle Fork() + { + ThrowIfDisposed(); + + return new ServerChannel(_server, _connection.Fork(), false); + } + + private void MarkSessionDirtyIfNeeded(ICoreSession session, Exception ex) + { + if (ex is MongoConnectionException) + { + session.MarkDirty(); + } + } + + private void ThrowIfDisposed() + { + if (_state.Value == ChannelState.Disposed) + { + throw new ObjectDisposedException(GetType().Name); + } + } + + // nested types + private static class ChannelState + { + public const int Initial = 0; + public const int Disposed = 1; + } + } +} diff --git a/src/MongoDB.Driver/Core/Servers/ServerMonitor.cs b/src/MongoDB.Driver/Core/Servers/ServerMonitor.cs index ca043166746..a70572a70e0 100644 --- a/src/MongoDB.Driver/Core/Servers/ServerMonitor.cs +++ b/src/MongoDB.Driver/Core/Servers/ServerMonitor.cs @@ -29,6 +29,8 @@ namespace MongoDB.Driver.Core.Servers { internal sealed class ServerMonitor : IServerMonitor { + private static readonly TimeSpan __minHeartbeatInterval = TimeSpan.FromMilliseconds(500); + private readonly ServerDescription _baseDescription; private volatile IConnection _connection; private readonly IConnectionFactory _connectionFactory; @@ -73,6 +75,7 @@ public ServerMonitor( serverId, endPoint, Ensure.IsNotNull(serverMonitorSettings, nameof(serverMonitorSettings)).HeartbeatInterval, + Ensure.IsNotNull(serverMonitorSettings, nameof(serverMonitorSettings)).ConnectTimeout, serverApi, loggerFactory?.CreateLogger<RoundTripTimeMonitor>()), serverApi, @@ -206,7 +209,7 @@ public void RequestHeartbeat() } // private methods - private IConnection InitializeConnection(CancellationToken cancellationToken) // called setUpConnection in spec + private IConnection InitializeConnection(OperationContext operationContext) // called setUpConnection in spec { var connection = _connectionFactory.CreateConnection(_serverId, _endPoint); _eventLoggerSdam.LogAndPublish(new ServerHeartbeatStartedEvent(connection.ConnectionId, false)); @@ -214,9 +217,10 @@ private IConnection InitializeConnection(CancellationToken cancellationToken) // var stopwatch = Stopwatch.StartNew(); try { + using var openOperationContext = operationContext.WithTimeout(_serverMonitorSettings.ConnectTimeout); // if we are cancelling, it's because the server has // been shut down and we really don't need to wait. - connection.Open(cancellationToken); + connection.Open(openOperationContext); _eventLoggerSdam.LogAndPublish(new ServerHeartbeatSucceededEvent(connection.ConnectionId, stopwatch.Elapsed, false, connection.Description.HelloResult.Wrapped)); } @@ -241,7 +245,6 @@ private CommandWireProtocol<BsonDocument> InitializeHelloProtocol(IConnection co var commandResponseHandling = CommandResponseHandling.Return; if (IsUsingStreamingProtocol(connection.Description.HelloResult)) { - connection.SetReadTimeout(_serverMonitorSettings.ConnectTimeout + _serverMonitorSettings.HeartbeatInterval); commandResponseHandling = CommandResponseHandling.ExhaustAllowed; var veryLargeHeartbeatInterval = TimeSpan.FromDays(1); // the server doesn't support Infinite value, so we set just a big enough value @@ -277,18 +280,24 @@ private bool IsRunningInFaaS() private bool IsUsingStreamingProtocol(HelloResult helloResult) => _isStreamingEnabled && helloResult?.TopologyVersion != null; private HelloResult GetHelloResult( + OperationContext operationContext, IConnection connection, - CommandWireProtocol<BsonDocument> helloProtocol, - CancellationToken cancellationToken) + CommandWireProtocol<BsonDocument> helloProtocol) { - cancellationToken.ThrowIfCancellationRequested(); + var timeout = _serverMonitorSettings.HeartbeatTimeout; + if (IsUsingStreamingProtocol(connection.Description.HelloResult)) + { + timeout = _serverMonitorSettings.ConnectTimeout + _serverMonitorSettings.HeartbeatInterval; + } + operationContext.ThrowIfTimedOutOrCanceled(); _eventLoggerSdam.LogAndPublish(new ServerHeartbeatStartedEvent(connection.ConnectionId, IsUsingStreamingProtocol(connection.Description.HelloResult))); var stopwatch = Stopwatch.StartNew(); try { - var helloResult = HelloHelper.GetResult(connection, helloProtocol, cancellationToken); + using var getHelloOperationContext = operationContext.WithTimeout(timeout); + var helloResult = HelloHelper.GetResult(getHelloOperationContext, connection, helloProtocol); stopwatch.Stop(); // RTT check if using polling monitoring @@ -312,6 +321,7 @@ private HelloResult GetHelloResult( private void Heartbeat(CancellationToken cancellationToken) { + using var operationContext = new OperationContext(null, cancellationToken); CommandWireProtocol<BsonDocument> helloProtocol = null; bool processAnother = true; while (processAnother && !cancellationToken.IsCancellationRequested) @@ -327,9 +337,10 @@ private void Heartbeat(CancellationToken cancellationToken) { connection = _connection; } + if (connection == null) { - var initializedConnection = InitializeConnection(cancellationToken); + var initializedConnection = InitializeConnection(operationContext); lock (_lock) { if (_state.Value == State.Disposed) @@ -351,7 +362,8 @@ private void Heartbeat(CancellationToken cancellationToken) { helloProtocol = InitializeHelloProtocol(connection, previousDescription?.HelloOk ?? false); } - heartbeatHelloResult = GetHelloResult(connection, helloProtocol, cancellationToken); + + heartbeatHelloResult = GetHelloResult(operationContext, connection, helloProtocol); } } catch (OperationCanceledException) when (cancellationToken.IsCancellationRequested) @@ -497,7 +509,7 @@ private void MonitorServer(CancellationToken monitorCancellationToken) HeartbeatDelay newHeartbeatDelay; lock (_lock) { - newHeartbeatDelay = new HeartbeatDelay(metronome.GetNextTickDelay(), _serverMonitorSettings.MinHeartbeatInterval); + newHeartbeatDelay = new HeartbeatDelay(metronome.GetNextTickDelay(), __minHeartbeatInterval); _heartbeatDelay?.Dispose(); _heartbeatDelay = newHeartbeatDelay; diff --git a/src/MongoDB.Driver/Core/Servers/ServerMonitorSettings.cs b/src/MongoDB.Driver/Core/Servers/ServerMonitorSettings.cs index 051b40837cb..1d3cf74fee0 100644 --- a/src/MongoDB.Driver/Core/Servers/ServerMonitorSettings.cs +++ b/src/MongoDB.Driver/Core/Servers/ServerMonitorSettings.cs @@ -14,28 +14,12 @@ */ using System; -using System.Net; namespace MongoDB.Driver.Core.Servers { - internal class ServerMonitorSettings - { - private readonly TimeSpan _connectTimeout; - private readonly TimeSpan _heartbeatInterval; - private readonly TimeSpan _minHeartbeatInterval; - private readonly ServerMonitoringMode _serverMonitoringMode; - - public ServerMonitorSettings(TimeSpan connectTimeout, TimeSpan heartbeatInterval, Optional<TimeSpan> minHeartbeatInterval = default, Optional<ServerMonitoringMode> serverMonitoringMode = default) - { - _connectTimeout = connectTimeout; - _heartbeatInterval = heartbeatInterval; - _minHeartbeatInterval = minHeartbeatInterval.WithDefault(TimeSpan.FromMilliseconds(500)); - _serverMonitoringMode = serverMonitoringMode.WithDefault(ServerMonitoringMode.Auto); - } - - public TimeSpan ConnectTimeout => _connectTimeout; - public TimeSpan HeartbeatInterval => _heartbeatInterval; - public TimeSpan MinHeartbeatInterval => _minHeartbeatInterval; - public ServerMonitoringMode ServerMonitoringMode => _serverMonitoringMode; - } + internal sealed record ServerMonitorSettings( + TimeSpan ConnectTimeout, + TimeSpan HeartbeatInterval, + TimeSpan HeartbeatTimeout, + ServerMonitoringMode ServerMonitoringMode = ServerMonitoringMode.Auto); } diff --git a/src/MongoDB.Driver/Core/TransactionOptions.cs b/src/MongoDB.Driver/Core/TransactionOptions.cs index 9003b0829d4..c4305a95ad3 100644 --- a/src/MongoDB.Driver/Core/TransactionOptions.cs +++ b/src/MongoDB.Driver/Core/TransactionOptions.cs @@ -1,4 +1,4 @@ -/* Copyright 2018-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,6 +14,7 @@ */ using System; +using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver { @@ -26,6 +27,7 @@ public class TransactionOptions private readonly TimeSpan? _maxCommitTime; private readonly ReadConcern _readConcern; private readonly ReadPreference _readPreference; + private readonly TimeSpan? _timeout; private readonly WriteConcern _writeConcern; // public constructors @@ -41,7 +43,27 @@ public TransactionOptions( Optional<ReadPreference> readPreference = default(Optional<ReadPreference>), Optional<WriteConcern> writeConcern = default(Optional<WriteConcern>), Optional<TimeSpan?> maxCommitTime = default(Optional<TimeSpan?>)) + : this(null, readConcern, readPreference, writeConcern, maxCommitTime) { + } + + /// <summary> + /// Initializes a new instance of the <see cref="TransactionOptions" /> class. + /// </summary> + /// <param name="timeout">The per operation timeout</param> + /// <param name="readConcern">The read concern.</param> + /// <param name="readPreference">The read preference.</param> + /// <param name="writeConcern">The write concern.</param> + /// <param name="maxCommitTime">The max commit time.</param> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TransactionOptions( + TimeSpan? timeout, + Optional<ReadConcern> readConcern = default(Optional<ReadConcern>), + Optional<ReadPreference> readPreference = default(Optional<ReadPreference>), + Optional<WriteConcern> writeConcern = default(Optional<WriteConcern>), + Optional<TimeSpan?> maxCommitTime = default(Optional<TimeSpan?>)) + { + _timeout = Ensure.IsNullOrValidTimeout(timeout, nameof(timeout)); _readConcern = readConcern.WithDefault(null); _readPreference = readPreference.WithDefault(null); _writeConcern = writeConcern.WithDefault(null); @@ -73,6 +95,12 @@ public TransactionOptions( /// </value> public ReadPreference ReadPreference => _readPreference; + /// <summary> + /// Gets the per operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout => _timeout; + /// <summary> /// Gets the write concern. /// </summary> diff --git a/src/MongoDB.Driver/Core/WireProtocol/CommandMessageFieldEncryptor.cs b/src/MongoDB.Driver/Core/WireProtocol/CommandMessageFieldEncryptor.cs index 3e8fc8a3bc3..4b4239fb533 100644 --- a/src/MongoDB.Driver/Core/WireProtocol/CommandMessageFieldEncryptor.cs +++ b/src/MongoDB.Driver/Core/WireProtocol/CommandMessageFieldEncryptor.cs @@ -155,7 +155,7 @@ private CommandRequestMessage CreateEncryptedRequestMessage(CommandRequestMessag unencryptedCommandMessage.ResponseTo, encryptedSections, unencryptedCommandMessage.MoreToCome); - return new CommandRequestMessage(encryptedCommandMessage, unencryptedRequestMessage.ShouldBeSent); + return new CommandRequestMessage(encryptedCommandMessage); } private byte[] GetUnencryptedCommandBytes(CommandRequestMessage unencryptedRequestMessage) diff --git a/src/MongoDB.Driver/Core/WireProtocol/CommandUsingCommandMessageWireProtocol.cs b/src/MongoDB.Driver/Core/WireProtocol/CommandUsingCommandMessageWireProtocol.cs index 9f88a3a7001..4b7af6b7209 100644 --- a/src/MongoDB.Driver/Core/WireProtocol/CommandUsingCommandMessageWireProtocol.cs +++ b/src/MongoDB.Driver/Core/WireProtocol/CommandUsingCommandMessageWireProtocol.cs @@ -49,6 +49,7 @@ internal sealed class CommandUsingCommandMessageWireProtocol<TCommandResult> : I private readonly CommandResponseHandling _responseHandling; private readonly IBsonSerializer<TCommandResult> _resultSerializer; private readonly ServerApi _serverApi; + private readonly TimeSpan _roundTripTime; private readonly ICoreSession _session; // streamable fields private bool _moreToCome = false; // MoreToCome from the previous response @@ -67,7 +68,8 @@ public CommandUsingCommandMessageWireProtocol( IBsonSerializer<TCommandResult> resultSerializer, MessageEncoderSettings messageEncoderSettings, Action<IMessageEncoderPostProcessor> postWriteAction, - ServerApi serverApi) + ServerApi serverApi, + TimeSpan roundTripTime) { if (responseHandling != CommandResponseHandling.Return && responseHandling != CommandResponseHandling.NoResponseExpected && @@ -88,6 +90,7 @@ public CommandUsingCommandMessageWireProtocol( _messageEncoderSettings = messageEncoderSettings; _postWriteAction = postWriteAction; // can be null _serverApi = serverApi; // can be null + _roundTripTime = roundTripTime; if (messageEncoderSettings != null) { @@ -100,7 +103,7 @@ public CommandUsingCommandMessageWireProtocol( public bool MoreToCome => _moreToCome; // public methods - public TCommandResult Execute(IConnection connection, CancellationToken cancellationToken) + public TCommandResult Execute(OperationContext operationContext, IConnection connection) { try { @@ -113,19 +116,20 @@ public TCommandResult Execute(IConnection connection, CancellationToken cancella } else { - message = CreateCommandMessage(connection.Description); - message = AutoEncryptFieldsIfNecessary(message, connection, cancellationToken); + message = CreateCommandMessage(operationContext, connection.Description); + // TODO: CSOT: Propagate operationContext into Encryption + message = AutoEncryptFieldsIfNecessary(message, connection, operationContext.CancellationToken); responseTo = message.WrappedMessage.RequestId; } try { - return SendMessageAndProcessResponse(message, responseTo, connection, cancellationToken); + return SendMessageAndProcessResponse(operationContext, message, responseTo, connection); } catch (MongoCommandException commandException) when (RetryabilityHelper.IsReauthenticationRequested(commandException, _command)) { - connection.Reauthenticate(cancellationToken); - return SendMessageAndProcessResponse(message, responseTo, connection, cancellationToken); + connection.Reauthenticate(operationContext); + return SendMessageAndProcessResponse(operationContext, message, responseTo, connection); } } catch (Exception exception) @@ -137,7 +141,7 @@ public TCommandResult Execute(IConnection connection, CancellationToken cancella } } - public async Task<TCommandResult> ExecuteAsync(IConnection connection, CancellationToken cancellationToken) + public async Task<TCommandResult> ExecuteAsync(OperationContext operationContext, IConnection connection) { try { @@ -150,19 +154,20 @@ public async Task<TCommandResult> ExecuteAsync(IConnection connection, Cancellat } else { - message = CreateCommandMessage(connection.Description); - message = await AutoEncryptFieldsIfNecessaryAsync(message, connection, cancellationToken).ConfigureAwait(false); + message = CreateCommandMessage(operationContext, connection.Description); + // TODO: CSOT: Propagate operationContext into Encryption + message = await AutoEncryptFieldsIfNecessaryAsync(message, connection, operationContext.CancellationToken).ConfigureAwait(false); responseTo = message.WrappedMessage.RequestId; } try { - return await SendMessageAndProcessResponseAsync(message, responseTo, connection, cancellationToken).ConfigureAwait(false); + return await SendMessageAndProcessResponseAsync(operationContext, message, responseTo, connection).ConfigureAwait(false); } catch (MongoCommandException commandException) when (RetryabilityHelper.IsReauthenticationRequested(commandException, _command)) { - await connection.ReauthenticateAsync(cancellationToken).ConfigureAwait(false); - return await SendMessageAndProcessResponseAsync(message, responseTo, connection, cancellationToken).ConfigureAwait(false); + await connection.ReauthenticateAsync(operationContext).ConfigureAwait(false); + return await SendMessageAndProcessResponseAsync(operationContext, message, responseTo, connection).ConfigureAwait(false); } } catch (Exception exception) @@ -253,11 +258,11 @@ private async Task<CommandRequestMessage> AutoEncryptFieldsIfNecessaryAsync(Comm } } - private CommandRequestMessage CreateCommandMessage(ConnectionDescription connectionDescription) + private CommandRequestMessage CreateCommandMessage(OperationContext operationContext, ConnectionDescription connectionDescription) { var requestId = RequestMessage.GetNextRequestId(); var responseTo = 0; - var sections = CreateSections(connectionDescription); + var sections = CreateSections(operationContext, connectionDescription); var moreToComeRequest = _responseHandling == CommandResponseHandling.NoResponseExpected; @@ -266,14 +271,13 @@ private CommandRequestMessage CreateCommandMessage(ConnectionDescription connect PostWriteAction = _postWriteAction, ExhaustAllowed = _responseHandling == CommandResponseHandling.ExhaustAllowed, }; - var shouldBeSent = (Func<bool>)(() => true); - return new CommandRequestMessage(wrappedMessage, shouldBeSent); + return new CommandRequestMessage(wrappedMessage); } - private IEnumerable<CommandMessageSection> CreateSections(ConnectionDescription connectionDescription) + private IEnumerable<CommandMessageSection> CreateSections(OperationContext operationContext, ConnectionDescription connectionDescription) { - var type0Section = CreateType0Section(connectionDescription); + var type0Section = CreateType0Section(operationContext, connectionDescription); if (_commandPayloads == null) { return new[] { type0Section }; @@ -284,7 +288,7 @@ private IEnumerable<CommandMessageSection> CreateSections(ConnectionDescription } } - private Type0CommandMessageSection<BsonDocument> CreateType0Section(ConnectionDescription connectionDescription) + private Type0CommandMessageSection<BsonDocument> CreateType0Section(OperationContext operationContext, ConnectionDescription connectionDescription) { var extraElements = new List<BsonElement>(); @@ -370,6 +374,24 @@ private Type0CommandMessageSection<BsonDocument> CreateType0Section(ConnectionDe } } + if (operationContext.IsRootContextTimeoutConfigured() && _roundTripTime > TimeSpan.Zero) + { + var serverTimeout = operationContext.RemainingTimeout; + if (serverTimeout != Timeout.InfiniteTimeSpan) + { + serverTimeout -= _roundTripTime; + // Server expects maxTimeMS as an integer, we should truncate it to give server a chance to reply with Timeout. + // Do not want to use MaxTimeHelper here, because it has different logic (rounds up, allow zero value and throw ArgumentException on negative values instead of TimeoutException). + var maxtimeMs = (int)serverTimeout.TotalMilliseconds; + if (maxtimeMs <= 0) + { + throw new TimeoutException(); + } + + AddIfNotAlreadyAdded("maxTimeMS", maxtimeMs); + } + } + var elementAppendingSerializer = new ElementAppendingSerializer<BsonDocument>(BsonDocumentSerializer.Instance, extraElements); return new Type0CommandMessageSection<BsonDocument>(_command, elementAppendingSerializer); @@ -527,14 +549,15 @@ private void SaveResponseInfo(CommandResponseMessage response) _moreToCome = response.WrappedMessage.MoreToCome; } - private TCommandResult SendMessageAndProcessResponse(CommandRequestMessage message, int responseTo, IConnection connection, CancellationToken cancellationToken) + private TCommandResult SendMessageAndProcessResponse(OperationContext operationContext, CommandRequestMessage message, int responseTo, IConnection connection) { var responseExpected = true; if (message != null) { try { - connection.SendMessage(message, _messageEncoderSettings, cancellationToken); + ThrowIfRemainingTimeoutLessThenRoundTripTime(operationContext); + connection.SendMessage(operationContext, message, _messageEncoderSettings); } finally { @@ -550,8 +573,9 @@ private TCommandResult SendMessageAndProcessResponse(CommandRequestMessage messa if (responseExpected) { var encoderSelector = new CommandResponseMessageEncoderSelector(); - var response = (CommandResponseMessage)connection.ReceiveMessage(responseTo, encoderSelector, _messageEncoderSettings, cancellationToken); - response = AutoDecryptFieldsIfNecessary(response, cancellationToken); + var response = (CommandResponseMessage)connection.ReceiveMessage(operationContext, responseTo, encoderSelector, _messageEncoderSettings); + // TODO: CSOT: Propagate operationContext into Encryption + response = AutoDecryptFieldsIfNecessary(response, operationContext.CancellationToken); var result = ProcessResponse(connection.ConnectionId, response.WrappedMessage); SaveResponseInfo(response); return result; @@ -562,14 +586,15 @@ private TCommandResult SendMessageAndProcessResponse(CommandRequestMessage messa } } - private async Task<TCommandResult> SendMessageAndProcessResponseAsync(CommandRequestMessage message, int responseTo, IConnection connection, CancellationToken cancellationToken) + private async Task<TCommandResult> SendMessageAndProcessResponseAsync(OperationContext operationContext, CommandRequestMessage message, int responseTo, IConnection connection) { var responseExpected = true; if (message != null) { try { - await connection.SendMessageAsync(message, _messageEncoderSettings, cancellationToken).ConfigureAwait(false); + ThrowIfRemainingTimeoutLessThenRoundTripTime(operationContext); + await connection.SendMessageAsync(operationContext, message, _messageEncoderSettings).ConfigureAwait(false); } finally { @@ -584,8 +609,9 @@ private async Task<TCommandResult> SendMessageAndProcessResponseAsync(CommandReq if (responseExpected) { var encoderSelector = new CommandResponseMessageEncoderSelector(); - var response = (CommandResponseMessage)await connection.ReceiveMessageAsync(responseTo, encoderSelector, _messageEncoderSettings, cancellationToken).ConfigureAwait(false); - response = await AutoDecryptFieldsIfNecessaryAsync(response, cancellationToken).ConfigureAwait(false); + var response = (CommandResponseMessage)await connection.ReceiveMessageAsync(operationContext, responseTo, encoderSelector, _messageEncoderSettings).ConfigureAwait(false); + // TODO: CSOT: Propagate operationContext into Encryption + response = await AutoDecryptFieldsIfNecessaryAsync(response, operationContext.CancellationToken).ConfigureAwait(false); var result = ProcessResponse(connection.ConnectionId, response.WrappedMessage); SaveResponseInfo(response); return result; @@ -609,6 +635,18 @@ private bool ShouldAddTransientTransactionError(MongoException exception) return false; } + private void ThrowIfRemainingTimeoutLessThenRoundTripTime(OperationContext operationContext) + { + if (operationContext.RemainingTimeout == Timeout.InfiniteTimeSpan || + _roundTripTime == TimeSpan.Zero || + operationContext.RemainingTimeout > _roundTripTime) + { + return; + } + + throw new TimeoutException(); + } + private MongoException WrapNotSupportedRetryableWriteException(MongoCommandException exception) { const string friendlyErrorMessage = diff --git a/src/MongoDB.Driver/Core/WireProtocol/CommandUsingQueryMessageWireProtocol.cs b/src/MongoDB.Driver/Core/WireProtocol/CommandUsingQueryMessageWireProtocol.cs index 6d18ada9747..ecfb53f0f70 100644 --- a/src/MongoDB.Driver/Core/WireProtocol/CommandUsingQueryMessageWireProtocol.cs +++ b/src/MongoDB.Driver/Core/WireProtocol/CommandUsingQueryMessageWireProtocol.cs @@ -18,7 +18,6 @@ using System.Linq; using System.Reflection; using System.Text; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.IO; @@ -116,11 +115,11 @@ private QueryMessage CreateMessage(ConnectionDescription connectionDescription, #pragma warning restore 618 } - public TCommandResult Execute(IConnection connection, CancellationToken cancellationToken) + public TCommandResult Execute(OperationContext operationContext, IConnection connection) { bool messageContainsSessionId; var message = CreateMessage(connection.Description, out messageContainsSessionId); - connection.SendMessage(message, _messageEncoderSettings, cancellationToken); + connection.SendMessage(operationContext, message, _messageEncoderSettings); if (messageContainsSessionId) { _session.WasUsed(); @@ -129,20 +128,20 @@ public TCommandResult Execute(IConnection connection, CancellationToken cancella switch (message.ResponseHandling) { case CommandResponseHandling.Ignore: - IgnoreResponse(connection, message, cancellationToken); + IgnoreResponse(operationContext, connection, message); return default(TCommandResult); default: var encoderSelector = new ReplyMessageEncoderSelector<RawBsonDocument>(RawBsonDocumentSerializer.Instance); - var reply = connection.ReceiveMessage(message.RequestId, encoderSelector, _messageEncoderSettings, cancellationToken); + var reply = connection.ReceiveMessage(operationContext, message.RequestId, encoderSelector, _messageEncoderSettings); return ProcessReply(connection.ConnectionId, (ReplyMessage<RawBsonDocument>)reply); } } - public async Task<TCommandResult> ExecuteAsync(IConnection connection, CancellationToken cancellationToken) + public async Task<TCommandResult> ExecuteAsync(OperationContext operationContext, IConnection connection) { bool messageContainsSessionId; var message = CreateMessage(connection.Description, out messageContainsSessionId); - await connection.SendMessageAsync(message, _messageEncoderSettings, cancellationToken).ConfigureAwait(false); + await connection.SendMessageAsync(operationContext, message, _messageEncoderSettings).ConfigureAwait(false); if (messageContainsSessionId) { _session.WasUsed(); @@ -151,11 +150,11 @@ public async Task<TCommandResult> ExecuteAsync(IConnection connection, Cancellat switch (message.ResponseHandling) { case CommandResponseHandling.Ignore: - IgnoreResponse(connection, message, cancellationToken); + IgnoreResponse(operationContext, connection, message); return default(TCommandResult); default: var encoderSelector = new ReplyMessageEncoderSelector<RawBsonDocument>(RawBsonDocumentSerializer.Instance); - var reply = await connection.ReceiveMessageAsync(message.RequestId, encoderSelector, _messageEncoderSettings, cancellationToken).ConfigureAwait(false); + var reply = await connection.ReceiveMessageAsync(operationContext, message.RequestId, encoderSelector, _messageEncoderSettings).ConfigureAwait(false); return ProcessReply(connection.ConnectionId, (ReplyMessage<RawBsonDocument>)reply); } } @@ -230,10 +229,10 @@ private IBsonSerializer CreateSizeLimitingPayloadSerializer(Type1CommandMessageS return (IBsonSerializer)constructorInfo.Invoke(new object[] { itemSerializer, itemElementNameValidator, maxBatchCount, maxItemSize, maxBatchSize }); } - private void IgnoreResponse(IConnection connection, QueryMessage message, CancellationToken cancellationToken) + private void IgnoreResponse(OperationContext operationContext, IConnection connection, QueryMessage message) { var encoderSelector = new ReplyMessageEncoderSelector<IgnoredReply>(IgnoredReplySerializer.Instance); - connection.ReceiveMessageAsync(message.RequestId, encoderSelector, _messageEncoderSettings, cancellationToken).IgnoreExceptions(); + connection.ReceiveMessageAsync(operationContext, message.RequestId, encoderSelector, _messageEncoderSettings).IgnoreExceptions(); } [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Usage", "CA2202:Do not dispose objects multiple times")] diff --git a/src/MongoDB.Driver/Core/WireProtocol/CommandWireProtocol.cs b/src/MongoDB.Driver/Core/WireProtocol/CommandWireProtocol.cs index 3844f13aec0..3871fdddca8 100644 --- a/src/MongoDB.Driver/Core/WireProtocol/CommandWireProtocol.cs +++ b/src/MongoDB.Driver/Core/WireProtocol/CommandWireProtocol.cs @@ -16,7 +16,6 @@ using System; using System.Collections.Generic; using System.Linq; -using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; using MongoDB.Bson.IO; @@ -45,6 +44,7 @@ internal sealed class CommandWireProtocol<TCommandResult> : IWireProtocol<TComma private readonly CommandResponseHandling _responseHandling; private readonly IBsonSerializer<TCommandResult> _resultSerializer; private readonly ServerApi _serverApi; + private readonly TimeSpan _roundTripTime; private readonly ICoreSession _session; // constructors @@ -86,7 +86,8 @@ public CommandWireProtocol( commandResponseHandling, resultSerializer, messageEncoderSettings, - serverApi) + serverApi, + roundTripTime: TimeSpan.Zero) { } @@ -102,7 +103,8 @@ public CommandWireProtocol( CommandResponseHandling responseHandling, IBsonSerializer<TCommandResult> resultSerializer, MessageEncoderSettings messageEncoderSettings, - ServerApi serverApi) + ServerApi serverApi, + TimeSpan roundTripTime) { if (responseHandling != CommandResponseHandling.Return && responseHandling != CommandResponseHandling.NoResponseExpected && @@ -123,22 +125,23 @@ public CommandWireProtocol( _messageEncoderSettings = messageEncoderSettings; _postWriteAction = postWriteAction; // can be null _serverApi = serverApi; // can be null + _roundTripTime = roundTripTime; } // public properties public bool MoreToCome => _cachedWireProtocol?.MoreToCome ?? false; // public methods - public TCommandResult Execute(IConnection connection, CancellationToken cancellationToken) + public TCommandResult Execute(OperationContext operationContext, IConnection connection) { var supportedProtocol = CreateSupportedWireProtocol(connection); - return supportedProtocol.Execute(connection, cancellationToken); + return supportedProtocol.Execute(operationContext, connection); } - public Task<TCommandResult> ExecuteAsync(IConnection connection, CancellationToken cancellationToken) + public Task<TCommandResult> ExecuteAsync(OperationContext operationContext, IConnection connection) { var supportedProtocol = CreateSupportedWireProtocol(connection); - return supportedProtocol.ExecuteAsync(connection, cancellationToken); + return supportedProtocol.ExecuteAsync(operationContext, connection); } // private methods @@ -156,7 +159,8 @@ private IWireProtocol<TCommandResult> CreateCommandUsingCommandMessageWireProtoc _resultSerializer, _messageEncoderSettings, _postWriteAction, - _serverApi); + _serverApi, + _roundTripTime); } private IWireProtocol<TCommandResult> CreateCommandUsingQueryMessageWireProtocol() @@ -205,7 +209,7 @@ private IWireProtocol<TCommandResult> CreateSupportedWireProtocol(IConnection co } else { - // The driver doesn't support servers less than 4.0. However it's still useful to support OP_QUERY for initial handshake. + // The driver doesn't support servers less than 4.2. However it's still useful to support OP_QUERY for initial handshake. // For pre-3.6 servers, it will allow throwing unsupported wire protocol exception on the driver side. // If we only supported OP_MSG, we would throw a general server error about closing connection without actual reason of why it happened return _cachedWireProtocol = CreateCommandUsingQueryMessageWireProtocol(); diff --git a/src/MongoDB.Driver/Core/WireProtocol/IWireProtocol.cs b/src/MongoDB.Driver/Core/WireProtocol/IWireProtocol.cs index 025e7dd5acd..dee26e7dd87 100644 --- a/src/MongoDB.Driver/Core/WireProtocol/IWireProtocol.cs +++ b/src/MongoDB.Driver/Core/WireProtocol/IWireProtocol.cs @@ -13,7 +13,6 @@ * limitations under the License. */ -using System.Threading; using System.Threading.Tasks; using MongoDB.Driver.Core.Connections; @@ -22,14 +21,14 @@ namespace MongoDB.Driver.Core.WireProtocol internal interface IWireProtocol { bool MoreToCome { get; } - void Execute(IConnection connection, CancellationToken cancellationToken = default(CancellationToken)); - Task ExecuteAsync(IConnection connection, CancellationToken cancellationToken = default(CancellationToken)); + void Execute(OperationContext operationContext, IConnection connection); + Task ExecuteAsync(OperationContext operationContext, IConnection connection); } internal interface IWireProtocol<TResult> { bool MoreToCome { get; } - TResult Execute(IConnection connection, CancellationToken cancellationToken = default(CancellationToken)); - Task<TResult> ExecuteAsync(IConnection connection, CancellationToken cancellationToken = default(CancellationToken)); + TResult Execute(OperationContext operationContext, IConnection connection); + Task<TResult> ExecuteAsync(OperationContext operationContext, IConnection connection); } } diff --git a/src/MongoDB.Driver/Core/WireProtocol/Messages/CommandMessageSection.cs b/src/MongoDB.Driver/Core/WireProtocol/Messages/CommandMessageSection.cs index 0cfbf749696..384ff5425f6 100644 --- a/src/MongoDB.Driver/Core/WireProtocol/Messages/CommandMessageSection.cs +++ b/src/MongoDB.Driver/Core/WireProtocol/Messages/CommandMessageSection.cs @@ -133,7 +133,7 @@ internal sealed class ClientBulkWriteOpsCommandMessageSection : BatchableCommand { public ClientBulkWriteOpsCommandMessageSection( IBatchableSource<BulkWriteModel> operations, - Dictionary<int, BsonValue> idsMap, + Dictionary<int, object> idsMap, int? maxBatchCount, int? maxDocumentSize, RenderArgs<BsonDocument> renderArgs) @@ -144,7 +144,7 @@ public ClientBulkWriteOpsCommandMessageSection( RenderArgs = renderArgs; } - public Dictionary<int, BsonValue> IdsMap { get; } + public Dictionary<int, object> IdsMap { get; } public new IBatchableSource<BulkWriteModel> Documents { get; } public RenderArgs<BsonDocument> RenderArgs { get; } } diff --git a/src/MongoDB.Driver/Core/WireProtocol/Messages/CommandRequestMessage.cs b/src/MongoDB.Driver/Core/WireProtocol/Messages/CommandRequestMessage.cs index 98c2a4fb6a5..d488eeb3e8e 100644 --- a/src/MongoDB.Driver/Core/WireProtocol/Messages/CommandRequestMessage.cs +++ b/src/MongoDB.Driver/Core/WireProtocol/Messages/CommandRequestMessage.cs @@ -25,8 +25,8 @@ internal sealed class CommandRequestMessage : RequestMessage private readonly CommandMessage _wrappedMessage; // constructors - public CommandRequestMessage(CommandMessage wrappedMessage, Func<bool> shouldBeSent) - : base(Ensure.IsNotNull(wrappedMessage, nameof(wrappedMessage)).RequestId, shouldBeSent) + public CommandRequestMessage(CommandMessage wrappedMessage) + : base(Ensure.IsNotNull(wrappedMessage, nameof(wrappedMessage)).RequestId) { _wrappedMessage = Ensure.IsNotNull(wrappedMessage, nameof(wrappedMessage)); } diff --git a/src/MongoDB.Driver/Core/WireProtocol/Messages/Encoders/BinaryEncoders/ClientBulkWriteOpsSectionFormatter.cs b/src/MongoDB.Driver/Core/WireProtocol/Messages/Encoders/BinaryEncoders/ClientBulkWriteOpsSectionFormatter.cs index 8fce04fb521..353f45dab5e 100644 --- a/src/MongoDB.Driver/Core/WireProtocol/Messages/Encoders/BinaryEncoders/ClientBulkWriteOpsSectionFormatter.cs +++ b/src/MongoDB.Driver/Core/WireProtocol/Messages/Encoders/BinaryEncoders/ClientBulkWriteOpsSectionFormatter.cs @@ -32,7 +32,7 @@ internal sealed class ClientBulkWriteOpsSectionFormatter : ICommandMessageSectio private MemoryStream _nsInfoMemoryStream; private BsonBinaryWriter _nsInfoWriter; private IBsonSerializerRegistry _serializerRegistry; - private Dictionary<int, BsonValue> _idsMap; + private Dictionary<int, object> _idsMap; private int _currentIndex; public ClientBulkWriteOpsSectionFormatter(long? maxSize) @@ -150,7 +150,7 @@ public void RenderInsertOne<TDocument>(RenderArgs<BsonDocument> renderArgs, Bson WriteStartModel(serializationContext, "insert", model); var documentSerializer = _serializerRegistry.GetSerializer<TDocument>(); var documentId = documentSerializer.SetDocumentIdIfMissing(null, model.Document); - _idsMap[_currentIndex] = BsonValue.Create(documentId); + _idsMap[_currentIndex] = documentId; serializationContext.Writer.WriteName("document"); documentSerializer.Serialize(serializationContext, model.Document); WriteEndModel(serializationContext); diff --git a/src/MongoDB.Driver/Core/WireProtocol/Messages/Encoders/BinaryEncoders/CommandRequestMessageBinaryEncoder.cs b/src/MongoDB.Driver/Core/WireProtocol/Messages/Encoders/BinaryEncoders/CommandRequestMessageBinaryEncoder.cs index a73e4ddcf0d..563d2e26ca0 100644 --- a/src/MongoDB.Driver/Core/WireProtocol/Messages/Encoders/BinaryEncoders/CommandRequestMessageBinaryEncoder.cs +++ b/src/MongoDB.Driver/Core/WireProtocol/Messages/Encoders/BinaryEncoders/CommandRequestMessageBinaryEncoder.cs @@ -32,7 +32,7 @@ public CommandRequestMessageBinaryEncoder(CommandMessageBinaryEncoder wrappedEnc public CommandRequestMessage ReadMessage() { var wrappedMessage = (CommandMessage)_wrappedEncoder.ReadMessage(); - return new CommandRequestMessage(wrappedMessage, null); + return new CommandRequestMessage(wrappedMessage); } public void WriteMessage(CommandRequestMessage message) diff --git a/src/MongoDB.Driver/Core/WireProtocol/Messages/Encoders/JsonEncoders/CommandRequestMessageJsonEncoder.cs b/src/MongoDB.Driver/Core/WireProtocol/Messages/Encoders/JsonEncoders/CommandRequestMessageJsonEncoder.cs index 3d720858926..4ff6ab02b5d 100644 --- a/src/MongoDB.Driver/Core/WireProtocol/Messages/Encoders/JsonEncoders/CommandRequestMessageJsonEncoder.cs +++ b/src/MongoDB.Driver/Core/WireProtocol/Messages/Encoders/JsonEncoders/CommandRequestMessageJsonEncoder.cs @@ -32,7 +32,7 @@ public CommandRequestMessageJsonEncoder(CommandMessageJsonEncoder wrappedEncoder public CommandRequestMessage ReadMessage() { var wrappedMessage = (CommandMessage)_wrappedEncoder.ReadMessage(); - return new CommandRequestMessage(wrappedMessage, null); + return new CommandRequestMessage(wrappedMessage); } public void WriteMessage(CommandRequestMessage message) diff --git a/src/MongoDB.Driver/Core/WireProtocol/Messages/QueryMessage.cs b/src/MongoDB.Driver/Core/WireProtocol/Messages/QueryMessage.cs index 0753233425e..feb0b0ffcf5 100644 --- a/src/MongoDB.Driver/Core/WireProtocol/Messages/QueryMessage.cs +++ b/src/MongoDB.Driver/Core/WireProtocol/Messages/QueryMessage.cs @@ -52,8 +52,7 @@ public QueryMessage( bool partialOk, bool noCursorTimeout, bool tailableCursor, - bool awaitData, - Func<bool> shouldBeSent = null) + bool awaitData) #pragma warning disable 618 : this( requestId, @@ -68,8 +67,7 @@ public QueryMessage( noCursorTimeout, oplogReplay: false, tailableCursor, - awaitData, - shouldBeSent) + awaitData) #pragma warning restore 618 { } @@ -88,9 +86,8 @@ public QueryMessage( bool noCursorTimeout, bool oplogReplay, // obsolete: OplogReplay is ignored by server versions 4.4.0 and newer bool tailableCursor, - bool awaitData, - Func<bool> shouldBeSent = null) - : base(requestId, shouldBeSent) + bool awaitData) + : base(requestId) { _collectionNamespace = Ensure.IsNotNull(collectionNamespace, nameof(collectionNamespace)); _query = Ensure.IsNotNull(query, nameof(query)); diff --git a/src/MongoDB.Driver/Core/WireProtocol/Messages/RequestMessage.cs b/src/MongoDB.Driver/Core/WireProtocol/Messages/RequestMessage.cs index 95189ad8d7b..a1e0d7bbd70 100644 --- a/src/MongoDB.Driver/Core/WireProtocol/Messages/RequestMessage.cs +++ b/src/MongoDB.Driver/Core/WireProtocol/Messages/RequestMessage.cs @@ -13,7 +13,6 @@ * limitations under the License. */ -using System; using System.Threading; namespace MongoDB.Driver.Core.WireProtocol.Messages @@ -39,14 +38,12 @@ public static int GetNextRequestId() // fields private readonly int _requestId; - private readonly Func<bool> _shouldBeSent; private bool _wasSent; // constructors - protected RequestMessage(int requestId, Func<bool> shouldBeSent = null) + protected RequestMessage(int requestId) { _requestId = requestId; - _shouldBeSent = shouldBeSent; } // properties @@ -55,11 +52,6 @@ public int RequestId get { return _requestId; } } - public Func<bool> ShouldBeSent - { - get { return _shouldBeSent; } - } - public bool WasSent { get { return _wasSent; } diff --git a/src/MongoDB.Driver/Core/WireProtocol/QueryWireProtocol.cs b/src/MongoDB.Driver/Core/WireProtocol/QueryWireProtocol.cs deleted file mode 100644 index 405fb21608f..00000000000 --- a/src/MongoDB.Driver/Core/WireProtocol/QueryWireProtocol.cs +++ /dev/null @@ -1,182 +0,0 @@ -/* Copyright 2010-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System; -using System.Threading; -using System.Threading.Tasks; -using MongoDB.Bson; -using MongoDB.Bson.IO; -using MongoDB.Bson.Serialization; -using MongoDB.Driver.Core.Connections; -using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.WireProtocol.Messages; -using MongoDB.Driver.Core.WireProtocol.Messages.Encoders; - -namespace MongoDB.Driver.Core.WireProtocol -{ - internal sealed class QueryWireProtocol<TDocument> : IWireProtocol<CursorBatch<TDocument>> - { - // fields - private readonly bool _awaitData; - private readonly int _batchSize; - private readonly CollectionNamespace _collectionNamespace; - private readonly MessageEncoderSettings _messageEncoderSettings; - private readonly BsonDocument _fields; - private readonly bool _noCursorTimeout; - private readonly bool _oplogReplay; - private readonly bool _partialOk; - private readonly BsonDocument _query; - private readonly IElementNameValidator _queryValidator; - private readonly IBsonSerializer<TDocument> _serializer; - private readonly int _skip; - private readonly bool _secondaryOk; - private readonly bool _tailableCursor; - - // constructors - public QueryWireProtocol( - CollectionNamespace collectionNamespace, - BsonDocument query, - BsonDocument fields, - IElementNameValidator queryValidator, - int skip, - int batchSize, - bool secondaryOk, - bool partialOk, - bool noCursorTimeout, - bool tailableCursor, - bool awaitData, - IBsonSerializer<TDocument> serializer, - MessageEncoderSettings messageEncoderSettings) -#pragma warning disable 618 - : this( - collectionNamespace, - query, - fields, - queryValidator, - skip, - batchSize, - secondaryOk, - partialOk, - noCursorTimeout, - oplogReplay: false, - tailableCursor, - awaitData, - serializer, - messageEncoderSettings) -#pragma warning restore 618 - { - } - - [Obsolete("Use a constructor that does not have an oplogReplay parameter instead.")] - public QueryWireProtocol( - CollectionNamespace collectionNamespace, - BsonDocument query, - BsonDocument fields, - IElementNameValidator queryValidator, - int skip, - int batchSize, - bool secondaryOk, - bool partialOk, - bool noCursorTimeout, - bool oplogReplay, // obsolete: OplogReplay is ignored by server versions 4.4.0 and newer - bool tailableCursor, - bool awaitData, - IBsonSerializer<TDocument> serializer, - MessageEncoderSettings messageEncoderSettings) - { - _collectionNamespace = Ensure.IsNotNull(collectionNamespace, nameof(collectionNamespace)); - _query = Ensure.IsNotNull(query, nameof(query)); - _fields = fields; // can be null - _queryValidator = Ensure.IsNotNull(queryValidator, nameof(queryValidator)); - _skip = Ensure.IsGreaterThanOrEqualToZero(skip, nameof(skip)); - _batchSize = batchSize; // can be negative - _secondaryOk = secondaryOk; - _partialOk = partialOk; - _noCursorTimeout = noCursorTimeout; - _oplogReplay = oplogReplay; - _tailableCursor = tailableCursor; - _awaitData = awaitData; - _serializer = Ensure.IsNotNull(serializer, nameof(serializer)); - _messageEncoderSettings = messageEncoderSettings; - } - - // public properties - public bool MoreToCome => false; - - // methods - private QueryMessage CreateMessage() - { -#pragma warning disable 618 - return new QueryMessage( - RequestMessage.GetNextRequestId(), - _collectionNamespace, - _query, - _fields, - _queryValidator, - _skip, - _batchSize, - _secondaryOk, - _partialOk, - _noCursorTimeout, - _oplogReplay, - _tailableCursor, - _awaitData); -#pragma warning restore 618 - } - - public CursorBatch<TDocument> Execute(IConnection connection, CancellationToken cancellationToken) - { - var message = CreateMessage(); - connection.SendMessage(message, _messageEncoderSettings, cancellationToken); - var encoderSelector = new ReplyMessageEncoderSelector<TDocument>(_serializer); - var reply = connection.ReceiveMessage(message.RequestId, encoderSelector, _messageEncoderSettings, cancellationToken); - return ProcessReply(connection.ConnectionId, (ReplyMessage<TDocument>)reply); - } - - public async Task<CursorBatch<TDocument>> ExecuteAsync(IConnection connection, CancellationToken cancellationToken) - { - var message = CreateMessage(); - await connection.SendMessageAsync(message, _messageEncoderSettings, cancellationToken).ConfigureAwait(false); - var encoderSelector = new ReplyMessageEncoderSelector<TDocument>(_serializer); - var reply = await connection.ReceiveMessageAsync(message.RequestId, encoderSelector, _messageEncoderSettings, cancellationToken).ConfigureAwait(false); - return ProcessReply(connection.ConnectionId, (ReplyMessage<TDocument>)reply); - } - - private CursorBatch<TDocument> ProcessReply(ConnectionId connectionId, ReplyMessage<TDocument> reply) - { - if (reply.QueryFailure) - { - var response = reply.QueryFailureDocument; - - var notPrimaryOrNodeIsRecoveringException = ExceptionMapper.MapNotPrimaryOrNodeIsRecovering(connectionId, _query, response, "$err"); - if (notPrimaryOrNodeIsRecoveringException != null) - { - throw notPrimaryOrNodeIsRecoveringException; - } - - var mappedException = ExceptionMapper.Map(connectionId, response); - if (mappedException != null) - { - throw mappedException; - } - - var message = string.Format("QueryFailure flag was true (response was {0}).", response.ToJson()); - throw new MongoQueryException(connectionId, message, _query, response); - } - - return new CursorBatch<TDocument>(reply.CursorId, reply.Documents); - } - } -} diff --git a/src/MongoDB.Driver/Core/WireProtocol/WriteWireProtocolBase.cs b/src/MongoDB.Driver/Core/WireProtocol/WriteWireProtocolBase.cs deleted file mode 100644 index 83efcc179ec..00000000000 --- a/src/MongoDB.Driver/Core/WireProtocol/WriteWireProtocolBase.cs +++ /dev/null @@ -1,185 +0,0 @@ -/* Copyright 2010-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System; -using System.Collections.Generic; -using System.Linq; -using System.Threading; -using System.Threading.Tasks; -using MongoDB.Bson; -using MongoDB.Bson.IO; -using MongoDB.Bson.Serialization.Serializers; -using MongoDB.Driver.Core.Connections; -using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.WireProtocol.Messages; -using MongoDB.Driver.Core.WireProtocol.Messages.Encoders; - -namespace MongoDB.Driver.Core.WireProtocol -{ - internal abstract class WriteWireProtocolBase : IWireProtocol<WriteConcernResult> - { - // fields - private readonly CollectionNamespace _collectionNamespace; - private readonly MessageEncoderSettings _messageEncoderSettings; - private readonly Func<bool> _shouldSendGetLastError; - private readonly WriteConcern _writeConcern; - - // constructors - protected WriteWireProtocolBase( - CollectionNamespace collectionNamespace, - MessageEncoderSettings messageEncoderSettings, - WriteConcern writeConcern, - Func<bool> shouldSendGetLastError = null) - { - _collectionNamespace = Ensure.IsNotNull(collectionNamespace, nameof(collectionNamespace)); - _messageEncoderSettings = messageEncoderSettings; - _writeConcern = Ensure.IsNotNull(writeConcern, nameof(writeConcern)); - _shouldSendGetLastError = shouldSendGetLastError; - } - - // properties - protected CollectionNamespace CollectionNamespace - { - get { return _collectionNamespace; } - } - - protected WriteConcern WriteConcern - { - get { return _writeConcern; } - } - - // public properties - public bool MoreToCome => false; - - // methods - private BsonDocument CreateGetLastErrorCommand() - { - var command = _writeConcern.ToBsonDocument(); - command.InsertAt(0, new BsonElement("getLastError", 1)); - return command; - } - - private QueryMessage CreateGetLastErrorMessage(BsonDocument getLastErrorCommand) - { -#pragma warning disable 618 - return new QueryMessage( - RequestMessage.GetNextRequestId(), - _collectionNamespace.DatabaseNamespace.CommandCollection, - getLastErrorCommand, - null, - NoOpElementNameValidator.Instance, - 0, - -1, - true, - false, - false, - false, - false, - false, - _shouldSendGetLastError); -#pragma warning restore 618 - } - - private List<RequestMessage> CreateMessages(IConnection connection, out QueryMessage getLastErrorMessage) - { - var messages = new List<RequestMessage>(); - - var writeMessage = CreateWriteMessage(connection); - messages.Add(writeMessage); - - getLastErrorMessage = null; - if (_writeConcern.IsAcknowledged) - { - var getLastErrorCommand = CreateGetLastErrorCommand(); - getLastErrorMessage = CreateGetLastErrorMessage(getLastErrorCommand); - messages.Add(getLastErrorMessage); - } - - return messages; - } - - protected abstract RequestMessage CreateWriteMessage(IConnection connection); - - public WriteConcernResult Execute(IConnection connection, CancellationToken cancellationToken) - { - QueryMessage getLastErrorMessage; - var messages = CreateMessages(connection, out getLastErrorMessage); - - connection.SendMessages(messages, _messageEncoderSettings, cancellationToken); - if (getLastErrorMessage != null && getLastErrorMessage.WasSent) - { - var encoderSelector = new ReplyMessageEncoderSelector<BsonDocument>(BsonDocumentSerializer.Instance); - var reply = connection.ReceiveMessage(getLastErrorMessage.RequestId, encoderSelector, _messageEncoderSettings, cancellationToken); - return ProcessReply(connection.ConnectionId, getLastErrorMessage.Query, (ReplyMessage<BsonDocument>)reply); - } - else - { - return null; - } - } - - public async Task<WriteConcernResult> ExecuteAsync(IConnection connection, CancellationToken cancellationToken) - { - QueryMessage getLastErrorMessage; - var messages = CreateMessages(connection, out getLastErrorMessage); - - await connection.SendMessagesAsync(messages, _messageEncoderSettings, cancellationToken).ConfigureAwait(false); - if (getLastErrorMessage != null && getLastErrorMessage.WasSent) - { - var encoderSelector = new ReplyMessageEncoderSelector<BsonDocument>(BsonDocumentSerializer.Instance); - var reply = await connection.ReceiveMessageAsync(getLastErrorMessage.RequestId, encoderSelector, _messageEncoderSettings, cancellationToken).ConfigureAwait(false); - return ProcessReply(connection.ConnectionId, getLastErrorMessage.Query, (ReplyMessage<BsonDocument>)reply); - } - else - { - return null; - } - } - - private WriteConcernResult ProcessReply(ConnectionId connectionId, BsonDocument getLastErrorCommand, ReplyMessage<BsonDocument> reply) - { - if (reply.NumberReturned == 0) - { - throw new MongoCommandException(connectionId, "GetLastError reply had no documents.", getLastErrorCommand); - } - if (reply.NumberReturned > 1) - { - throw new MongoCommandException(connectionId, "GetLastError reply had more than one document.", getLastErrorCommand); - } - if (reply.QueryFailure) - { - throw new MongoCommandException(connectionId, "GetLastError reply had QueryFailure flag set.", getLastErrorCommand, reply.QueryFailureDocument); - } - - var response = reply.Documents.Single(); - - var notPrimaryOrNodeIsRecoveringException = ExceptionMapper.MapNotPrimaryOrNodeIsRecovering(connectionId, getLastErrorCommand, response, "err"); - if (notPrimaryOrNodeIsRecoveringException != null) - { - throw notPrimaryOrNodeIsRecoveringException; - } - - var writeConcernResult = new WriteConcernResult(response); - - var mappedException = ExceptionMapper.Map(connectionId, writeConcernResult); - if (mappedException != null) - { - throw mappedException; - } - - return writeConcernResult; - } - } -} diff --git a/src/MongoDB.Driver/CountOptions.cs b/src/MongoDB.Driver/CountOptions.cs index 2a85963cda7..e33bbb44908 100644 --- a/src/MongoDB.Driver/CountOptions.cs +++ b/src/MongoDB.Driver/CountOptions.cs @@ -31,6 +31,7 @@ public sealed class CountOptions private long? _limit; private TimeSpan? _maxTime; private long? _skip; + private TimeSpan? _timeout; // properties /// <summary> @@ -86,5 +87,15 @@ public long? Skip get { return _skip; } set { _skip = value; } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } } diff --git a/src/MongoDB.Driver/CreateCollectionOptions.cs b/src/MongoDB.Driver/CreateCollectionOptions.cs index 9e0e19e5a44..a91a13cf1fa 100644 --- a/src/MongoDB.Driver/CreateCollectionOptions.cs +++ b/src/MongoDB.Driver/CreateCollectionOptions.cs @@ -16,6 +16,7 @@ using System; using MongoDB.Bson; using MongoDB.Bson.Serialization; +using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver { @@ -35,6 +36,7 @@ public class CreateCollectionOptions private long? _maxSize; private bool? _noPadding; private BsonDocument _storageEngine; + private TimeSpan? _timeout; private TimeSeriesOptions _timeSeriesOptions; private bool? _usePowerOf2Sizes; private IBsonSerializerRegistry _serializerRegistry; @@ -120,6 +122,7 @@ public long? MaxSize /// <summary> /// Gets or sets whether padding should not be used. /// </summary> + [Obsolete("This option was removed in server version 4.2. As such, this property will be removed in a later release.")] public bool? NoPadding { get { return _noPadding; } @@ -144,6 +147,16 @@ public BsonDocument StorageEngine set { _storageEngine = value; } } + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } + /// <summary> /// Gets or sets the <see cref="TimeSeriesOptions"/> to use when creating a time series collection. /// </summary> @@ -156,6 +169,7 @@ public TimeSeriesOptions TimeSeriesOptions /// <summary> /// Gets or sets a value indicating whether to use power of 2 sizes. /// </summary> + [Obsolete("This option was removed in server version 4.2. As such, this property will be removed in a later release.")] public bool? UsePowerOf2Sizes { get { return _usePowerOf2Sizes; } @@ -201,11 +215,9 @@ public virtual CreateCollectionOptions Clone() => _indexOptionDefaults = _indexOptionDefaults, _maxDocuments = _maxDocuments, _maxSize = _maxSize, - _noPadding = _noPadding, _serializerRegistry = _serializerRegistry, _storageEngine = _storageEngine, _timeSeriesOptions = _timeSeriesOptions, - _usePowerOf2Sizes = _usePowerOf2Sizes, _validationAction = _validationAction, _validationLevel = _validationLevel }; @@ -243,11 +255,9 @@ internal static CreateCollectionOptions<TDocument> CoercedFrom(CreateCollectionO IndexOptionDefaults = options.IndexOptionDefaults, MaxDocuments = options.MaxDocuments, MaxSize = options.MaxSize, - NoPadding = options.NoPadding, SerializerRegistry = options.SerializerRegistry, StorageEngine = options.StorageEngine, TimeSeriesOptions = options.TimeSeriesOptions, - UsePowerOf2Sizes = options.UsePowerOf2Sizes, ValidationAction = options.ValidationAction, ValidationLevel = options.ValidationLevel }; @@ -308,11 +318,9 @@ public override CreateCollectionOptions Clone() => IndexOptionDefaults = base.IndexOptionDefaults, MaxDocuments = base.MaxDocuments, MaxSize = base.MaxSize, - NoPadding = base.NoPadding, SerializerRegistry = base.SerializerRegistry, StorageEngine = base.StorageEngine, TimeSeriesOptions = base.TimeSeriesOptions, - UsePowerOf2Sizes = base.UsePowerOf2Sizes, ValidationAction = base.ValidationAction, ValidationLevel = base.ValidationLevel, diff --git a/src/MongoDB.Driver/CreateManyIndexesOptions.cs b/src/MongoDB.Driver/CreateManyIndexesOptions.cs index 7122d65847b..9cf604953da 100644 --- a/src/MongoDB.Driver/CreateManyIndexesOptions.cs +++ b/src/MongoDB.Driver/CreateManyIndexesOptions.cs @@ -1,4 +1,4 @@ -/* Copyright 2018-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,6 +28,7 @@ public class CreateManyIndexesOptions private BsonValue _comment; private CreateIndexCommitQuorum _commitQuorum; private TimeSpan? _maxTime; + private TimeSpan? _timeout; // public properties /// <summary> @@ -59,5 +60,15 @@ public TimeSpan? MaxTime get { return _maxTime; } set { _maxTime = Ensure.IsNullOrInfiniteOrGreaterThanOrEqualToZero(value, nameof(value)); } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } } diff --git a/src/MongoDB.Driver/CreateOneIndexOptions.cs b/src/MongoDB.Driver/CreateOneIndexOptions.cs index 06ab3bf76d7..5ffdb48118f 100644 --- a/src/MongoDB.Driver/CreateOneIndexOptions.cs +++ b/src/MongoDB.Driver/CreateOneIndexOptions.cs @@ -1,4 +1,4 @@ -/* Copyright 2018-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ public class CreateOneIndexOptions // private fields private CreateIndexCommitQuorum _commitQuorum; private TimeSpan? _maxTime; + private TimeSpan? _timeout; // public properties /// <summary> @@ -47,5 +48,15 @@ public TimeSpan? MaxTime get { return _maxTime; } set { _maxTime = Ensure.IsNullOrInfiniteOrGreaterThanOrEqualToZero(value, nameof(value)); } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } } diff --git a/src/MongoDB.Driver/CreateViewOptions.cs b/src/MongoDB.Driver/CreateViewOptions.cs index 1226e714841..5120f2efd72 100644 --- a/src/MongoDB.Driver/CreateViewOptions.cs +++ b/src/MongoDB.Driver/CreateViewOptions.cs @@ -1,4 +1,4 @@ -/* Copyright 2016-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,7 +13,9 @@ * limitations under the License. */ +using System; using MongoDB.Bson.Serialization; +using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver { @@ -27,6 +29,7 @@ public class CreateViewOptions<TDocument> private Collation _collation; private IBsonSerializer<TDocument> _documentSerializer; private IBsonSerializerRegistry _serializerRegistry; + private TimeSpan? _timeout; // properties /// <summary> @@ -64,5 +67,15 @@ public IBsonSerializerRegistry SerializerRegistry get { return _serializerRegistry; } set { _serializerRegistry = value; } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } } diff --git a/src/MongoDB.Driver/DeleteOptions.cs b/src/MongoDB.Driver/DeleteOptions.cs index e8e31e98758..e87e76b9e48 100644 --- a/src/MongoDB.Driver/DeleteOptions.cs +++ b/src/MongoDB.Driver/DeleteOptions.cs @@ -13,7 +13,9 @@ * limitations under the License. */ +using System; using MongoDB.Bson; +using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver { @@ -27,6 +29,7 @@ public sealed class DeleteOptions private BsonValue _comment; private BsonValue _hint; private BsonDocument _let; + private TimeSpan? _timeout; // properties /// <summary> @@ -64,5 +67,15 @@ public BsonDocument Let get { return _let; } set { _let = value; } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } } diff --git a/src/MongoDB.Driver/DistinctOptions.cs b/src/MongoDB.Driver/DistinctOptions.cs index 5aee87e814e..5d6b947e78e 100644 --- a/src/MongoDB.Driver/DistinctOptions.cs +++ b/src/MongoDB.Driver/DistinctOptions.cs @@ -28,6 +28,7 @@ public sealed class DistinctOptions private Collation _collation; private BsonValue _comment; private TimeSpan? _maxTime; + private TimeSpan? _timeout; // properties /// <summary> @@ -56,5 +57,15 @@ public TimeSpan? MaxTime get { return _maxTime; } set { _maxTime = Ensure.IsNullOrInfiniteOrGreaterThanOrEqualToZero(value, nameof(value)); } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } } diff --git a/src/MongoDB.Driver/DropCollectionOptions.cs b/src/MongoDB.Driver/DropCollectionOptions.cs index 8c3e13bb483..441a865ae12 100644 --- a/src/MongoDB.Driver/DropCollectionOptions.cs +++ b/src/MongoDB.Driver/DropCollectionOptions.cs @@ -13,7 +13,9 @@ * limitations under the License. */ +using System; using MongoDB.Bson; +using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver { @@ -23,6 +25,7 @@ namespace MongoDB.Driver public class DropCollectionOptions { private BsonDocument _encryptedFields; + private TimeSpan? _timeout; /// <summary> /// Gets or sets encrypted fields. @@ -32,5 +35,15 @@ public BsonDocument EncryptedFields get { return _encryptedFields; } set { _encryptedFields = value; } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } } diff --git a/src/MongoDB.Driver/DropIndexOptions.cs b/src/MongoDB.Driver/DropIndexOptions.cs index d345fc0cb48..02ce9f531a8 100644 --- a/src/MongoDB.Driver/DropIndexOptions.cs +++ b/src/MongoDB.Driver/DropIndexOptions.cs @@ -1,4 +1,4 @@ -/* Copyright 2018-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ public class DropIndexOptions { private BsonValue _comment; private TimeSpan? _maxTime; + private TimeSpan? _timeout; /// <summary> /// Gets or sets the comment. @@ -48,5 +49,15 @@ public TimeSpan? MaxTime get { return _maxTime; } set { _maxTime = Ensure.IsNullOrInfiniteOrGreaterThanOrEqualToZero(value, nameof(value)); } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } } diff --git a/src/MongoDB.Driver/EstimatedDocumentCountOptions.cs b/src/MongoDB.Driver/EstimatedDocumentCountOptions.cs index 2789601d1da..984e850930c 100644 --- a/src/MongoDB.Driver/EstimatedDocumentCountOptions.cs +++ b/src/MongoDB.Driver/EstimatedDocumentCountOptions.cs @@ -1,4 +1,4 @@ -/* Copyright 2018-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,6 +27,7 @@ public sealed class EstimatedDocumentCountOptions // private fields private BsonValue _comment; private TimeSpan? _maxTime; + private TimeSpan? _timeout; // public properties /// <summary> @@ -46,5 +47,15 @@ public TimeSpan? MaxTime get { return _maxTime; } set { _maxTime = Ensure.IsNullOrInfiniteOrGreaterThanOrEqualToZero(value, nameof(value)); } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } } diff --git a/src/MongoDB.Driver/ExternalEvidence.cs b/src/MongoDB.Driver/ExternalEvidence.cs index ce1b0c9aeef..ab714bcfe14 100644 --- a/src/MongoDB.Driver/ExternalEvidence.cs +++ b/src/MongoDB.Driver/ExternalEvidence.cs @@ -21,7 +21,7 @@ namespace MongoDB.Driver { /// <summary> - /// Evidence of a MongoIdentity via an external mechanism. For example, on windows this may + /// Evidence of a MongoIdentity via an external mechanism. For example, on windows this may /// be the current process' user or, on linux, via kinit. /// </summary> public sealed class ExternalEvidence : MongoIdentityEvidence @@ -48,7 +48,7 @@ public override bool Equals(object obj) /// Returns a hash code for this instance. /// </summary> /// <returns> - /// A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + /// A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. /// </returns> public override int GetHashCode() { diff --git a/src/MongoDB.Driver/FilteredMongoCollectionBase.cs b/src/MongoDB.Driver/FilteredMongoCollectionBase.cs index 65d95d2f7b5..17a7a3e1901 100644 --- a/src/MongoDB.Driver/FilteredMongoCollectionBase.cs +++ b/src/MongoDB.Driver/FilteredMongoCollectionBase.cs @@ -304,22 +304,22 @@ public override Task<IAsyncCursor<TItem>> DistinctManyAsync<TItem>(IClientSessio public override TProjection FindOneAndUpdate<TProjection>(FilterDefinition<TDocument> filter, UpdateDefinition<TDocument> update, FindOneAndUpdateOptions<TDocument, TProjection> options = null, CancellationToken cancellationToken = default(CancellationToken)) { - return _wrappedCollection.FindOneAndUpdate(CombineFilters(filter), update, options, cancellationToken); + return _wrappedCollection.FindOneAndUpdate(CombineFilters(filter), AdjustUpdateDefinition(update, options.IsUpsert), options, cancellationToken); } public override TProjection FindOneAndUpdate<TProjection>(IClientSessionHandle session, FilterDefinition<TDocument> filter, UpdateDefinition<TDocument> update, FindOneAndUpdateOptions<TDocument, TProjection> options = null, CancellationToken cancellationToken = default(CancellationToken)) { - return _wrappedCollection.FindOneAndUpdate(session, CombineFilters(filter), update, options, cancellationToken); + return _wrappedCollection.FindOneAndUpdate(session, CombineFilters(filter), AdjustUpdateDefinition(update, options.IsUpsert), options, cancellationToken); } public override Task<TProjection> FindOneAndUpdateAsync<TProjection>(FilterDefinition<TDocument> filter, UpdateDefinition<TDocument> update, FindOneAndUpdateOptions<TDocument, TProjection> options = null, CancellationToken cancellationToken = default(CancellationToken)) { - return _wrappedCollection.FindOneAndUpdateAsync(CombineFilters(filter), update, options, cancellationToken); + return _wrappedCollection.FindOneAndUpdateAsync(CombineFilters(filter), AdjustUpdateDefinition(update, options.IsUpsert), options, cancellationToken); } public override Task<TProjection> FindOneAndUpdateAsync<TProjection>(IClientSessionHandle session, FilterDefinition<TDocument> filter, UpdateDefinition<TDocument> update, FindOneAndUpdateOptions<TDocument, TProjection> options = null, CancellationToken cancellationToken = default(CancellationToken)) { - return _wrappedCollection.FindOneAndUpdateAsync(session, CombineFilters(filter), update, options, cancellationToken); + return _wrappedCollection.FindOneAndUpdateAsync(session, CombineFilters(filter), AdjustUpdateDefinition(update, options.IsUpsert), options, cancellationToken); } [Obsolete("Use Aggregation pipeline instead.")] diff --git a/src/MongoDB.Driver/FindFluent.cs b/src/MongoDB.Driver/FindFluent.cs index cc25e841b29..939aec7da71 100644 --- a/src/MongoDB.Driver/FindFluent.cs +++ b/src/MongoDB.Driver/FindFluent.cs @@ -144,6 +144,7 @@ public override IFindFluent<TDocument, TNewProjection> Project<TNewProjection>(P ShowRecordId = _options.ShowRecordId, Skip = _options.Skip, Sort = _options.Sort, + Timeout = _options.Timeout, TranslationOptions = _options.TranslationOptions }; return new FindFluent<TDocument, TNewProjection>(_session, _collection, _filter, newOptions); @@ -274,7 +275,8 @@ private CountOptions CreateCountOptions() Hint = _options.Hint, Limit = _options.Limit, MaxTime = _options.MaxTime, - Skip = _options.Skip + Skip = _options.Skip, + Timeout = _options.Timeout }; } diff --git a/src/MongoDB.Driver/FindOneAndDeleteOptions.cs b/src/MongoDB.Driver/FindOneAndDeleteOptions.cs index 7cb6f5317fa..c1377215dd3 100644 --- a/src/MongoDB.Driver/FindOneAndDeleteOptions.cs +++ b/src/MongoDB.Driver/FindOneAndDeleteOptions.cs @@ -34,6 +34,7 @@ public class FindOneAndDeleteOptions<TDocument, TProjection> private TimeSpan? _maxTime; private ProjectionDefinition<TDocument, TProjection> _projection; private SortDefinition<TDocument> _sort; + private TimeSpan? _timeout; // properties /// <summary> @@ -98,6 +99,16 @@ public SortDefinition<TDocument> Sort get { return _sort; } set { _sort = value; } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } /// <summary> diff --git a/src/MongoDB.Driver/FindOneAndReplaceOptions.cs b/src/MongoDB.Driver/FindOneAndReplaceOptions.cs index bca48a2e621..2501609b799 100644 --- a/src/MongoDB.Driver/FindOneAndReplaceOptions.cs +++ b/src/MongoDB.Driver/FindOneAndReplaceOptions.cs @@ -37,6 +37,7 @@ public class FindOneAndReplaceOptions<TDocument, TProjection> private ProjectionDefinition<TDocument, TProjection> _projection; private ReturnDocument _returnDocument; private SortDefinition<TDocument> _sort; + private TimeSpan? _timeout; // constructors /// <summary> @@ -137,6 +138,16 @@ public SortDefinition<TDocument> Sort get { return _sort; } set { _sort = value; } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } /// <summary> diff --git a/src/MongoDB.Driver/FindOneAndUpdateOptions.cs b/src/MongoDB.Driver/FindOneAndUpdateOptions.cs index 683156671da..7945a9a19bd 100644 --- a/src/MongoDB.Driver/FindOneAndUpdateOptions.cs +++ b/src/MongoDB.Driver/FindOneAndUpdateOptions.cs @@ -39,6 +39,7 @@ public class FindOneAndUpdateOptions<TDocument, TProjection> private ProjectionDefinition<TDocument, TProjection> _projection; private ReturnDocument _returnDocument; private SortDefinition<TDocument> _sort; + private TimeSpan? _timeout; // constructors /// <summary> @@ -151,6 +152,16 @@ public SortDefinition<TDocument> Sort get { return _sort; } set { _sort = value; } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } /// <summary> diff --git a/src/MongoDB.Driver/FindOptions.cs b/src/MongoDB.Driver/FindOptions.cs index 3ceb5cdfdaa..3c4e85ee86f 100644 --- a/src/MongoDB.Driver/FindOptions.cs +++ b/src/MongoDB.Driver/FindOptions.cs @@ -1,4 +1,4 @@ -/* Copyright 2015-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -41,6 +41,7 @@ public abstract class FindOptionsBase private bool? _oplogReplay; private bool? _returnKey; private bool? _showRecordId; + private TimeSpan? _timeout; private ExpressionTranslationOptions _translationOptions; // constructors @@ -198,6 +199,16 @@ public bool? ShowRecordId set { _showRecordId = value; } } + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } + /// <summary> /// Gets or sets the translation options. /// </summary> diff --git a/src/MongoDB.Driver/GeoJsonObjectModel/GeoJsonCoordinates.cs b/src/MongoDB.Driver/GeoJsonObjectModel/GeoJsonCoordinates.cs index b20ebe7e4a2..41c42424e23 100644 --- a/src/MongoDB.Driver/GeoJsonObjectModel/GeoJsonCoordinates.cs +++ b/src/MongoDB.Driver/GeoJsonObjectModel/GeoJsonCoordinates.cs @@ -92,7 +92,7 @@ public override bool Equals(object obj) /// Returns a hash code for this instance. /// </summary> /// <returns> - /// A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + /// A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. /// </returns> public override int GetHashCode() { diff --git a/src/MongoDB.Driver/GridFS/GridFSBucket.cs b/src/MongoDB.Driver/GridFS/GridFSBucket.cs index 262939e4f85..7e65af290a3 100644 --- a/src/MongoDB.Driver/GridFS/GridFSBucket.cs +++ b/src/MongoDB.Driver/GridFS/GridFSBucket.cs @@ -83,13 +83,15 @@ public ImmutableGridFSBucketOptions Options public void Delete(TFileId id, CancellationToken cancellationToken = default(CancellationToken)) { Ensure.IsNotNull((object)id, nameof(id)); - using (var binding = GetSingleServerReadWriteBinding(cancellationToken)) + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); + using (var binding = GetSingleServerReadWriteBinding(operationContext)) { var filesCollectionDeleteOperation = CreateDeleteFileOperation(id); - var filesCollectionDeleteResult = filesCollectionDeleteOperation.Execute(binding, cancellationToken); + var filesCollectionDeleteResult = filesCollectionDeleteOperation.Execute(operationContext, binding); var chunksDeleteOperation = CreateDeleteChunksOperation(id); - chunksDeleteOperation.Execute(binding, cancellationToken); + chunksDeleteOperation.Execute(operationContext, binding); if (filesCollectionDeleteResult.DeletedCount == 0) { @@ -102,13 +104,15 @@ public ImmutableGridFSBucketOptions Options public async Task DeleteAsync(TFileId id, CancellationToken cancellationToken = default(CancellationToken)) { Ensure.IsNotNull((object)id, nameof(id)); - using (var binding = await GetSingleServerReadWriteBindingAsync(cancellationToken).ConfigureAwait(false)) + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); + using (var binding = await GetSingleServerReadWriteBindingAsync(operationContext).ConfigureAwait(false)) { var filesCollectionDeleteOperation = CreateDeleteFileOperation(id); - var filesCollectionDeleteResult = await filesCollectionDeleteOperation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false); + var filesCollectionDeleteResult = await filesCollectionDeleteOperation.ExecuteAsync(operationContext, binding).ConfigureAwait(false); var chunksDeleteOperation = CreateDeleteChunksOperation(id); - await chunksDeleteOperation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false); + await chunksDeleteOperation.ExecuteAsync(operationContext, binding).ConfigureAwait(false); if (filesCollectionDeleteResult.DeletedCount == 0) { @@ -121,10 +125,12 @@ public ImmutableGridFSBucketOptions Options public byte[] DownloadAsBytes(TFileId id, GridFSDownloadOptions options = null, CancellationToken cancellationToken = default(CancellationToken)) { Ensure.IsNotNull((object)id, nameof(id)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); options = options ?? new GridFSDownloadOptions(); - using (var binding = GetSingleServerReadBinding(cancellationToken)) + using (var binding = GetSingleServerReadBinding(operationContext)) { - var fileInfo = GetFileInfo(binding, id, cancellationToken); + var fileInfo = GetFileInfo(operationContext, binding, id); return DownloadAsBytesHelper(binding, fileInfo, options, cancellationToken); } } @@ -133,10 +139,12 @@ public ImmutableGridFSBucketOptions Options public async Task<byte[]> DownloadAsBytesAsync(TFileId id, GridFSDownloadOptions options = null, CancellationToken cancellationToken = default(CancellationToken)) { Ensure.IsNotNull((object)id, nameof(id)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); options = options ?? new GridFSDownloadOptions(); - using (var binding = await GetSingleServerReadBindingAsync(cancellationToken).ConfigureAwait(false)) + using (var binding = await GetSingleServerReadBindingAsync(operationContext).ConfigureAwait(false)) { - var fileInfo = await GetFileInfoAsync(binding, id, cancellationToken).ConfigureAwait(false); + var fileInfo = await GetFileInfoAsync(operationContext, binding, id).ConfigureAwait(false); return await DownloadAsBytesHelperAsync(binding, fileInfo, options, cancellationToken).ConfigureAwait(false); } } @@ -145,11 +153,13 @@ public ImmutableGridFSBucketOptions Options public byte[] DownloadAsBytesByName(string filename, GridFSDownloadByNameOptions options = null, CancellationToken cancellationToken = default(CancellationToken)) { Ensure.IsNotNull(filename, nameof(filename)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); options = options ?? new GridFSDownloadByNameOptions(); - using (var binding = GetSingleServerReadBinding(cancellationToken)) + using (var binding = GetSingleServerReadBinding(operationContext)) { - var fileInfo = GetFileInfoByName(binding, filename, options.Revision, cancellationToken); + var fileInfo = GetFileInfoByName(operationContext, binding, filename, options.Revision); return DownloadAsBytesHelper(binding, fileInfo, options, cancellationToken); } } @@ -158,11 +168,13 @@ public ImmutableGridFSBucketOptions Options public async Task<byte[]> DownloadAsBytesByNameAsync(string filename, GridFSDownloadByNameOptions options = null, CancellationToken cancellationToken = default(CancellationToken)) { Ensure.IsNotNull(filename, nameof(filename)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); options = options ?? new GridFSDownloadByNameOptions(); - using (var binding = await GetSingleServerReadBindingAsync(cancellationToken).ConfigureAwait(false)) + using (var binding = await GetSingleServerReadBindingAsync(operationContext).ConfigureAwait(false)) { - var fileInfo = await GetFileInfoByNameAsync(binding, filename, options.Revision, cancellationToken).ConfigureAwait(false); + var fileInfo = await GetFileInfoByNameAsync(operationContext, binding, filename, options.Revision).ConfigureAwait(false); return await DownloadAsBytesHelperAsync(binding, fileInfo, options, cancellationToken).ConfigureAwait(false); } } @@ -172,10 +184,12 @@ public ImmutableGridFSBucketOptions Options { Ensure.IsNotNull((object)id, nameof(id)); Ensure.IsNotNull(destination, nameof(destination)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); options = options ?? new GridFSDownloadOptions(); - using (var binding = GetSingleServerReadBinding(cancellationToken)) + using (var binding = GetSingleServerReadBinding(operationContext)) { - var fileInfo = GetFileInfo(binding, id, cancellationToken); + var fileInfo = GetFileInfo(operationContext, binding, id); DownloadToStreamHelper(binding, fileInfo, destination, options, cancellationToken); } } @@ -185,10 +199,12 @@ public ImmutableGridFSBucketOptions Options { Ensure.IsNotNull((object)id, nameof(id)); Ensure.IsNotNull(destination, nameof(destination)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); options = options ?? new GridFSDownloadOptions(); - using (var binding = await GetSingleServerReadBindingAsync(cancellationToken).ConfigureAwait(false)) + using (var binding = await GetSingleServerReadBindingAsync(operationContext).ConfigureAwait(false)) { - var fileInfo = await GetFileInfoAsync(binding, id, cancellationToken).ConfigureAwait(false); + var fileInfo = await GetFileInfoAsync(operationContext, binding, id).ConfigureAwait(false); await DownloadToStreamHelperAsync(binding, fileInfo, destination, options, cancellationToken).ConfigureAwait(false); } } @@ -198,11 +214,13 @@ public ImmutableGridFSBucketOptions Options { Ensure.IsNotNull(filename, nameof(filename)); Ensure.IsNotNull(destination, nameof(destination)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); options = options ?? new GridFSDownloadByNameOptions(); - using (var binding = GetSingleServerReadBinding(cancellationToken)) + using (var binding = GetSingleServerReadBinding(operationContext)) { - var fileInfo = GetFileInfoByName(binding, filename, options.Revision, cancellationToken); + var fileInfo = GetFileInfoByName(operationContext, binding, filename, options.Revision); DownloadToStreamHelper(binding, fileInfo, destination, options, cancellationToken); } } @@ -212,11 +230,13 @@ public ImmutableGridFSBucketOptions Options { Ensure.IsNotNull(filename, nameof(filename)); Ensure.IsNotNull(destination, nameof(destination)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); options = options ?? new GridFSDownloadByNameOptions(); - using (var binding = await GetSingleServerReadBindingAsync(cancellationToken).ConfigureAwait(false)) + using (var binding = await GetSingleServerReadBindingAsync(operationContext).ConfigureAwait(false)) { - var fileInfo = await GetFileInfoByNameAsync(binding, filename, options.Revision, cancellationToken).ConfigureAwait(false); + var fileInfo = await GetFileInfoByNameAsync(operationContext, binding, filename, options.Revision).ConfigureAwait(false); await DownloadToStreamHelperAsync(binding, fileInfo, destination, options, cancellationToken).ConfigureAwait(false); } } @@ -224,34 +244,38 @@ public ImmutableGridFSBucketOptions Options /// <inheritdoc /> public void Drop(CancellationToken cancellationToken = default(CancellationToken)) { + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); var filesCollectionNamespace = this.GetFilesCollectionNamespace(); var chunksCollectionNamespace = this.GetChunksCollectionNamespace(); var messageEncoderSettings = this.GetMessageEncoderSettings(); - using (var binding = GetSingleServerReadWriteBinding(cancellationToken)) + using (var binding = GetSingleServerReadWriteBinding(operationContext)) { var filesCollectionDropOperation = CreateDropCollectionOperation(filesCollectionNamespace, messageEncoderSettings); - filesCollectionDropOperation.Execute(binding, cancellationToken); + filesCollectionDropOperation.Execute(operationContext, binding); var chunksCollectionDropOperation = CreateDropCollectionOperation(chunksCollectionNamespace, messageEncoderSettings); - chunksCollectionDropOperation.Execute(binding, cancellationToken); + chunksCollectionDropOperation.Execute(operationContext, binding); } } /// <inheritdoc /> public async Task DropAsync(CancellationToken cancellationToken = default(CancellationToken)) { + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); var filesCollectionNamespace = this.GetFilesCollectionNamespace(); var chunksCollectionNamespace = this.GetChunksCollectionNamespace(); var messageEncoderSettings = this.GetMessageEncoderSettings(); - using (var binding = await GetSingleServerReadWriteBindingAsync(cancellationToken).ConfigureAwait(false)) + using (var binding = await GetSingleServerReadWriteBindingAsync(operationContext).ConfigureAwait(false)) { var filesCollectionDropOperation = CreateDropCollectionOperation(filesCollectionNamespace, messageEncoderSettings); - await filesCollectionDropOperation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false); + await filesCollectionDropOperation.ExecuteAsync(operationContext, binding).ConfigureAwait(false); var chunksCollectionDropOperation = CreateDropCollectionOperation(chunksCollectionNamespace, messageEncoderSettings); - await chunksCollectionDropOperation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false); + await chunksCollectionDropOperation.ExecuteAsync(operationContext, binding).ConfigureAwait(false); } } @@ -259,13 +283,15 @@ public ImmutableGridFSBucketOptions Options public IAsyncCursor<GridFSFileInfo<TFileId>> Find(FilterDefinition<GridFSFileInfo<TFileId>> filter, GridFSFindOptions<TFileId> options = null, CancellationToken cancellationToken = default(CancellationToken)) { Ensure.IsNotNull(filter, nameof(filter)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); options = options ?? new GridFSFindOptions<TFileId>(); var translationOptions = _database.Client.Settings.TranslationOptions; var operation = CreateFindOperation(filter, options, translationOptions); - using (var binding = GetSingleServerReadBinding(cancellationToken)) + using (var binding = GetSingleServerReadBinding(operationContext)) { - return operation.Execute(binding, cancellationToken); + return operation.Execute(operationContext, binding); } } @@ -273,13 +299,15 @@ public ImmutableGridFSBucketOptions Options public async Task<IAsyncCursor<GridFSFileInfo<TFileId>>> FindAsync(FilterDefinition<GridFSFileInfo<TFileId>> filter, GridFSFindOptions<TFileId> options = null, CancellationToken cancellationToken = default(CancellationToken)) { Ensure.IsNotNull(filter, nameof(filter)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); options = options ?? new GridFSFindOptions<TFileId>(); var translationOptions = _database.Client.Settings.TranslationOptions; var operation = CreateFindOperation(filter, options, translationOptions); - using (var binding = await GetSingleServerReadBindingAsync(cancellationToken).ConfigureAwait(false)) + using (var binding = await GetSingleServerReadBindingAsync(operationContext).ConfigureAwait(false)) { - return await operation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false); + return await operation.ExecuteAsync(operationContext, binding).ConfigureAwait(false); } } @@ -287,10 +315,12 @@ public ImmutableGridFSBucketOptions Options public GridFSDownloadStream<TFileId> OpenDownloadStream(TFileId id, GridFSDownloadOptions options = null, CancellationToken cancellationToken = default(CancellationToken)) { Ensure.IsNotNull((object)id, nameof(id)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); options = options ?? new GridFSDownloadOptions(); - using (var binding = GetSingleServerReadBinding(cancellationToken)) + using (var binding = GetSingleServerReadBinding(operationContext)) { - var fileInfo = GetFileInfo(binding, id, cancellationToken); + var fileInfo = GetFileInfo(operationContext, binding, id); return CreateDownloadStream(binding.Fork(), fileInfo, options, cancellationToken); } } @@ -299,10 +329,12 @@ public ImmutableGridFSBucketOptions Options public async Task<GridFSDownloadStream<TFileId>> OpenDownloadStreamAsync(TFileId id, GridFSDownloadOptions options = null, CancellationToken cancellationToken = default(CancellationToken)) { Ensure.IsNotNull((object)id, nameof(id)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); options = options ?? new GridFSDownloadOptions(); - using (var binding = await GetSingleServerReadBindingAsync(cancellationToken).ConfigureAwait(false)) + using (var binding = await GetSingleServerReadBindingAsync(operationContext).ConfigureAwait(false)) { - var fileInfo = await GetFileInfoAsync(binding, id, cancellationToken).ConfigureAwait(false); + var fileInfo = await GetFileInfoAsync(operationContext, binding, id).ConfigureAwait(false); return CreateDownloadStream(binding.Fork(), fileInfo, options, cancellationToken); } } @@ -311,11 +343,13 @@ public ImmutableGridFSBucketOptions Options public GridFSDownloadStream<TFileId> OpenDownloadStreamByName(string filename, GridFSDownloadByNameOptions options = null, CancellationToken cancellationToken = default(CancellationToken)) { Ensure.IsNotNull(filename, nameof(filename)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); options = options ?? new GridFSDownloadByNameOptions(); - using (var binding = GetSingleServerReadBinding(cancellationToken)) + using (var binding = GetSingleServerReadBinding(operationContext)) { - var fileInfo = GetFileInfoByName(binding, filename, options.Revision, cancellationToken); + var fileInfo = GetFileInfoByName(operationContext, binding, filename, options.Revision); return CreateDownloadStream(binding.Fork(), fileInfo, options); } } @@ -324,11 +358,13 @@ public ImmutableGridFSBucketOptions Options public async Task<GridFSDownloadStream<TFileId>> OpenDownloadStreamByNameAsync(string filename, GridFSDownloadByNameOptions options = null, CancellationToken cancellationToken = default(CancellationToken)) { Ensure.IsNotNull(filename, nameof(filename)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); options = options ?? new GridFSDownloadByNameOptions(); - using (var binding = await GetSingleServerReadBindingAsync(cancellationToken).ConfigureAwait(false)) + using (var binding = await GetSingleServerReadBindingAsync(operationContext).ConfigureAwait(false)) { - var fileInfo = await GetFileInfoByNameAsync(binding, filename, options.Revision, cancellationToken).ConfigureAwait(false); + var fileInfo = await GetFileInfoByNameAsync(operationContext, binding, filename, options.Revision).ConfigureAwait(false); return CreateDownloadStream(binding.Fork(), fileInfo, options); } } @@ -338,11 +374,13 @@ public ImmutableGridFSBucketOptions Options { Ensure.IsNotNull((object)id, nameof(id)); Ensure.IsNotNull(filename, nameof(filename)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); options = options ?? new GridFSUploadOptions(); - using (var binding = GetSingleServerReadWriteBinding(cancellationToken)) + using (var binding = GetSingleServerReadWriteBinding(operationContext)) { - EnsureIndexes(binding, cancellationToken); + EnsureIndexes(operationContext, binding); return CreateUploadStream(binding, id, filename, options); } } @@ -352,11 +390,13 @@ public ImmutableGridFSBucketOptions Options { Ensure.IsNotNull((object)id, nameof(id)); Ensure.IsNotNull(filename, nameof(filename)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); options = options ?? new GridFSUploadOptions(); - using (var binding = await GetSingleServerReadWriteBindingAsync(cancellationToken).ConfigureAwait(false)) + using (var binding = await GetSingleServerReadWriteBindingAsync(operationContext).ConfigureAwait(false)) { - await EnsureIndexesAsync(binding, cancellationToken).ConfigureAwait(false); + await EnsureIndexesAsync(operationContext, binding).ConfigureAwait(false); return CreateUploadStream(binding, id, filename, options); } } @@ -366,10 +406,12 @@ public ImmutableGridFSBucketOptions Options { Ensure.IsNotNull((object)id, nameof(id)); Ensure.IsNotNull(newFilename, nameof(newFilename)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); var renameOperation = CreateRenameOperation(id, newFilename); - using (var binding = GetSingleServerReadWriteBinding(cancellationToken)) + using (var binding = GetSingleServerReadWriteBinding(operationContext)) { - var result = renameOperation.Execute(binding, cancellationToken); + var result = renameOperation.Execute(operationContext, binding); if (result.IsModifiedCountAvailable && result.ModifiedCount == 0) { @@ -383,10 +425,12 @@ public ImmutableGridFSBucketOptions Options { Ensure.IsNotNull((object)id, nameof(id)); Ensure.IsNotNull(newFilename, nameof(newFilename)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); var renameOperation = CreateRenameOperation(id, newFilename); - using (var binding = await GetSingleServerReadWriteBindingAsync(cancellationToken).ConfigureAwait(false)) + using (var binding = await GetSingleServerReadWriteBindingAsync(operationContext).ConfigureAwait(false)) { - var result = await renameOperation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false); + var result = await renameOperation.ExecuteAsync(operationContext, binding).ConfigureAwait(false); if (result.IsModifiedCountAvailable && result.ModifiedCount == 0) { @@ -401,6 +445,8 @@ public ImmutableGridFSBucketOptions Options Ensure.IsNotNull((object)id, nameof(id)); Ensure.IsNotNull(filename, nameof(filename)); Ensure.IsNotNull(source, nameof(source)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); options = options ?? new GridFSUploadOptions(); using (var sourceStream = new MemoryStream(source)) @@ -415,6 +461,8 @@ public ImmutableGridFSBucketOptions Options Ensure.IsNotNull((object)id, nameof(id)); Ensure.IsNotNull(filename, nameof(filename)); Ensure.IsNotNull(source, nameof(source)); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); options = options ?? new GridFSUploadOptions(); using (var sourceStream = new MemoryStream(source)) @@ -522,28 +570,28 @@ private bool ChunksCollectionIndexesExist(List<BsonDocument> indexes) return IndexExists(indexes, key); } - private bool ChunksCollectionIndexesExist(IReadBindingHandle binding, CancellationToken cancellationToken) + private bool ChunksCollectionIndexesExist(OperationContext operationContext, IReadBindingHandle binding) { - var indexes = ListIndexes(binding, this.GetChunksCollectionNamespace(), cancellationToken); + var indexes = ListIndexes(operationContext, binding, this.GetChunksCollectionNamespace()); return ChunksCollectionIndexesExist(indexes); } - private async Task<bool> ChunksCollectionIndexesExistAsync(IReadBindingHandle binding, CancellationToken cancellationToken) + private async Task<bool> ChunksCollectionIndexesExistAsync(OperationContext operationContext, IReadBindingHandle binding) { - var indexes = await ListIndexesAsync(binding, this.GetChunksCollectionNamespace(), cancellationToken).ConfigureAwait(false); + var indexes = await ListIndexesAsync(operationContext, binding, this.GetChunksCollectionNamespace()).ConfigureAwait(false); return ChunksCollectionIndexesExist(indexes); } - private void CreateChunksCollectionIndexes(IReadWriteBindingHandle binding, CancellationToken cancellationToken) + private void CreateChunksCollectionIndexes(OperationContext operationContext, IReadWriteBindingHandle binding) { var operation = CreateCreateChunksCollectionIndexesOperation(); - operation.Execute(binding, cancellationToken); + operation.Execute(operationContext, binding); } - private async Task CreateChunksCollectionIndexesAsync(IReadWriteBindingHandle binding, CancellationToken cancellationToken) + private async Task CreateChunksCollectionIndexesAsync(OperationContext operationContext, IReadWriteBindingHandle binding) { var operation = CreateCreateChunksCollectionIndexesOperation(); - await operation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false); + await operation.ExecuteAsync(operationContext, binding).ConfigureAwait(false); } internal CreateIndexesOperation CreateCreateChunksCollectionIndexesOperation() @@ -608,16 +656,16 @@ private BulkMixedWriteOperation CreateDeleteFileOperation(TFileId id) this.GetMessageEncoderSettings()); } - private void CreateFilesCollectionIndexes(IReadWriteBindingHandle binding, CancellationToken cancellationToken) + private void CreateFilesCollectionIndexes(OperationContext operationContext, IReadWriteBindingHandle binding) { var operation = CreateCreateFilesCollectionIndexesOperation(); - operation.Execute(binding, cancellationToken); + operation.Execute(operationContext, binding); } - private async Task CreateFilesCollectionIndexesAsync(IReadWriteBindingHandle binding, CancellationToken cancellationToken) + private async Task CreateFilesCollectionIndexesAsync(OperationContext operationContext, IReadWriteBindingHandle binding) { var operation = CreateCreateFilesCollectionIndexesOperation(); - await operation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false); + await operation.ExecuteAsync(operationContext, binding).ConfigureAwait(false); } private FindOperation<GridFSFileInfo<TFileId>> CreateFindOperation( @@ -810,23 +858,23 @@ private GridFSUploadStream<TFileId> CreateUploadStream(IReadWriteBindingHandle b } } - private void EnsureIndexes(IReadWriteBindingHandle binding, CancellationToken cancellationToken) + private void EnsureIndexes(OperationContext operationContext, IReadWriteBindingHandle binding) { - _ensureIndexesSemaphore.Wait(cancellationToken); + _ensureIndexesSemaphore.Wait(operationContext.RemainingTimeout, operationContext.CancellationToken); try { if (!_ensureIndexesDone) { - var isFilesCollectionEmpty = IsFilesCollectionEmpty(binding, cancellationToken); + var isFilesCollectionEmpty = IsFilesCollectionEmpty(operationContext, binding); if (isFilesCollectionEmpty) { - if (!FilesCollectionIndexesExist(binding, cancellationToken)) + if (!FilesCollectionIndexesExist(operationContext, binding)) { - CreateFilesCollectionIndexes(binding, cancellationToken); + CreateFilesCollectionIndexes(operationContext, binding); } - if (!ChunksCollectionIndexesExist(binding, cancellationToken)) + if (!ChunksCollectionIndexesExist(operationContext, binding)) { - CreateChunksCollectionIndexes(binding, cancellationToken); + CreateChunksCollectionIndexes(operationContext, binding); } } @@ -839,23 +887,23 @@ private void EnsureIndexes(IReadWriteBindingHandle binding, CancellationToken ca } } - private async Task EnsureIndexesAsync(IReadWriteBindingHandle binding, CancellationToken cancellationToken) + private async Task EnsureIndexesAsync(OperationContext operationContext, IReadWriteBindingHandle binding) { - await _ensureIndexesSemaphore.WaitAsync(cancellationToken).ConfigureAwait(false); + await _ensureIndexesSemaphore.WaitAsync(operationContext.RemainingTimeout, operationContext.CancellationToken).ConfigureAwait(false); try { if (!_ensureIndexesDone) { - var isFilesCollectionEmpty = await IsFilesCollectionEmptyAsync(binding, cancellationToken).ConfigureAwait(false); + var isFilesCollectionEmpty = await IsFilesCollectionEmptyAsync(operationContext, binding).ConfigureAwait(false); if (isFilesCollectionEmpty) { - if (!(await FilesCollectionIndexesExistAsync(binding, cancellationToken).ConfigureAwait(false))) + if (!(await FilesCollectionIndexesExistAsync(operationContext, binding).ConfigureAwait(false))) { - await CreateFilesCollectionIndexesAsync(binding, cancellationToken).ConfigureAwait(false); + await CreateFilesCollectionIndexesAsync(operationContext, binding).ConfigureAwait(false); } - if (!(await ChunksCollectionIndexesExistAsync(binding, cancellationToken).ConfigureAwait(false))) + if (!(await ChunksCollectionIndexesExistAsync(operationContext, binding).ConfigureAwait(false))) { - await CreateChunksCollectionIndexesAsync(binding, cancellationToken).ConfigureAwait(false); + await CreateChunksCollectionIndexesAsync(operationContext, binding).ConfigureAwait(false); } } @@ -874,24 +922,25 @@ private bool FilesCollectionIndexesExist(List<BsonDocument> indexes) return IndexExists(indexes, key); } - private bool FilesCollectionIndexesExist(IReadBindingHandle binding, CancellationToken cancellationToken) + private bool FilesCollectionIndexesExist(OperationContext operationContext, IReadBindingHandle binding) { - var indexes = ListIndexes(binding, this.GetFilesCollectionNamespace(), cancellationToken); + var indexes = ListIndexes(operationContext, binding, this.GetFilesCollectionNamespace()); return FilesCollectionIndexesExist(indexes); } - private async Task<bool> FilesCollectionIndexesExistAsync(IReadBindingHandle binding, CancellationToken cancellationToken) + private async Task<bool> FilesCollectionIndexesExistAsync(OperationContext operationContext, IReadBindingHandle binding) { - var indexes = await ListIndexesAsync(binding, this.GetFilesCollectionNamespace(), cancellationToken).ConfigureAwait(false); + var indexes = await ListIndexesAsync(operationContext, binding, this.GetFilesCollectionNamespace()).ConfigureAwait(false); return FilesCollectionIndexesExist(indexes); } - private GridFSFileInfo<TFileId> GetFileInfo(IReadBindingHandle binding, TFileId id, CancellationToken cancellationToken) + private GridFSFileInfo<TFileId> GetFileInfo(OperationContext operationContext, IReadBindingHandle binding, TFileId id) { var operation = CreateGetFileInfoOperation(id); - using (var cursor = operation.Execute(binding, cancellationToken)) + using (var cursor = operation.Execute(operationContext, binding)) { - var fileInfo = cursor.FirstOrDefault(cancellationToken); + // TODO: CSOT add a way to propagate cancellationContext into cursor methods. + var fileInfo = cursor.FirstOrDefault(operationContext.CancellationToken); if (fileInfo == null) { throw new GridFSFileNotFoundException(_idSerializationInfo.SerializeValue(id)); @@ -900,12 +949,13 @@ private GridFSFileInfo<TFileId> GetFileInfo(IReadBindingHandle binding, TFileId } } - private async Task<GridFSFileInfo<TFileId>> GetFileInfoAsync(IReadBindingHandle binding, TFileId id, CancellationToken cancellationToken) + private async Task<GridFSFileInfo<TFileId>> GetFileInfoAsync(OperationContext operationContext, IReadBindingHandle binding, TFileId id) { var operation = CreateGetFileInfoOperation(id); - using (var cursor = await operation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false)) + using (var cursor = await operation.ExecuteAsync(operationContext, binding).ConfigureAwait(false)) { - var fileInfo = await cursor.FirstOrDefaultAsync(cancellationToken).ConfigureAwait(false); + // TODO: CSOT add a way to propagate cancellationContext into cursor methods. + var fileInfo = await cursor.FirstOrDefaultAsync(operationContext.CancellationToken).ConfigureAwait(false); if (fileInfo == null) { throw new GridFSFileNotFoundException(_idSerializationInfo.SerializeValue(id)); @@ -914,12 +964,13 @@ private async Task<GridFSFileInfo<TFileId>> GetFileInfoAsync(IReadBindingHandle } } - private GridFSFileInfo<TFileId> GetFileInfoByName(IReadBindingHandle binding, string filename, int revision, CancellationToken cancellationToken) + private GridFSFileInfo<TFileId> GetFileInfoByName(OperationContext operationContext, IReadBindingHandle binding, string filename, int revision) { var operation = CreateGetFileInfoByNameOperation(filename, revision); - using (var cursor = operation.Execute(binding, cancellationToken)) + using (var cursor = operation.Execute(operationContext, binding)) { - var fileInfo = cursor.FirstOrDefault(cancellationToken); + // TODO: CSOT add a way to propagate cancellationContext into cursor methods. + var fileInfo = cursor.FirstOrDefault(operationContext.CancellationToken); if (fileInfo == null) { throw new GridFSFileNotFoundException(filename, revision); @@ -928,12 +979,13 @@ private GridFSFileInfo<TFileId> GetFileInfoByName(IReadBindingHandle binding, st } } - private async Task<GridFSFileInfo<TFileId>> GetFileInfoByNameAsync(IReadBindingHandle binding, string filename, int revision, CancellationToken cancellationToken) + private async Task<GridFSFileInfo<TFileId>> GetFileInfoByNameAsync(OperationContext operationContext, IReadBindingHandle binding, string filename, int revision) { var operation = CreateGetFileInfoByNameOperation(filename, revision); - using (var cursor = await operation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false)) + using (var cursor = await operation.ExecuteAsync(operationContext, binding).ConfigureAwait(false)) { - var fileInfo = await cursor.FirstOrDefaultAsync(cancellationToken).ConfigureAwait(false); + // TODO: CSOT add a way to propagate cancellationContext into cursor methods. + var fileInfo = await cursor.FirstOrDefaultAsync(operationContext.CancellationToken).ConfigureAwait(false); if (fileInfo == null) { throw new GridFSFileNotFoundException(filename, revision); @@ -947,36 +999,36 @@ private ReadConcern GetReadConcern() return _options.ReadConcern ?? _database.Settings.ReadConcern; } - private IReadBindingHandle GetSingleServerReadBinding(CancellationToken cancellationToken) + private IReadBindingHandle GetSingleServerReadBinding(OperationContext operationContext) { var readPreference = _options.ReadPreference ?? _database.Settings.ReadPreference; var selector = new ReadPreferenceServerSelector(readPreference); - var server = _cluster.SelectServer(selector, cancellationToken); + var server = _cluster.SelectServer(operationContext, selector); var binding = new SingleServerReadBinding(server, readPreference, NoCoreSession.NewHandle()); return new ReadBindingHandle(binding); } - private async Task<IReadBindingHandle> GetSingleServerReadBindingAsync(CancellationToken cancellationToken) + private async Task<IReadBindingHandle> GetSingleServerReadBindingAsync(OperationContext operationContext) { var readPreference = _options.ReadPreference ?? _database.Settings.ReadPreference; var selector = new ReadPreferenceServerSelector(readPreference); - var server = await _cluster.SelectServerAsync(selector, cancellationToken).ConfigureAwait(false); + var server = await _cluster.SelectServerAsync(operationContext, selector).ConfigureAwait(false); var binding = new SingleServerReadBinding(server, readPreference, NoCoreSession.NewHandle()); return new ReadBindingHandle(binding); } - private IReadWriteBindingHandle GetSingleServerReadWriteBinding(CancellationToken cancellationToken) + private IReadWriteBindingHandle GetSingleServerReadWriteBinding(OperationContext operationContext) { var selector = WritableServerSelector.Instance; - var server = _cluster.SelectServer(selector, cancellationToken); + var server = _cluster.SelectServer(operationContext, selector); var binding = new SingleServerReadWriteBinding(server, NoCoreSession.NewHandle()); return new ReadWriteBindingHandle(binding); } - private async Task<IReadWriteBindingHandle> GetSingleServerReadWriteBindingAsync(CancellationToken cancellationToken) + private async Task<IReadWriteBindingHandle> GetSingleServerReadWriteBindingAsync(OperationContext operationContext) { var selector = WritableServerSelector.Instance; - var server = await _cluster.SelectServerAsync(selector, cancellationToken).ConfigureAwait(false); + var server = await _cluster.SelectServerAsync(operationContext, selector).ConfigureAwait(false); var binding = new SingleServerReadWriteBinding(server, NoCoreSession.NewHandle()); return new ReadWriteBindingHandle(binding); } @@ -993,37 +1045,39 @@ private bool IndexExists(List<BsonDocument> indexes, BsonDocument key) return false; } - private bool IsFilesCollectionEmpty(IReadWriteBindingHandle binding, CancellationToken cancellationToken) + private bool IsFilesCollectionEmpty(OperationContext operationContext, IReadWriteBindingHandle binding) { var operation = CreateIsFilesCollectionEmptyOperation(); - using (var cursor = operation.Execute(binding, cancellationToken)) + using (var cursor = operation.Execute(operationContext, binding)) { - var firstOrDefault = cursor.FirstOrDefault(cancellationToken); + // TODO: CSOT add a way to propagate cancellationContext into cursor methods. + var firstOrDefault = cursor.FirstOrDefault(operationContext.CancellationToken); return firstOrDefault == null; } } - private async Task<bool> IsFilesCollectionEmptyAsync(IReadWriteBindingHandle binding, CancellationToken cancellationToken) + private async Task<bool> IsFilesCollectionEmptyAsync(OperationContext operationContext, IReadWriteBindingHandle binding) { var operation = CreateIsFilesCollectionEmptyOperation(); - using (var cursor = await operation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false)) + using (var cursor = await operation.ExecuteAsync(operationContext, binding).ConfigureAwait(false)) { - var firstOrDefault = await cursor.FirstOrDefaultAsync(cancellationToken).ConfigureAwait(false); + // TODO: CSOT add a way to propagate cancellationContext into cursor methods. + var firstOrDefault = await cursor.FirstOrDefaultAsync(operationContext.CancellationToken).ConfigureAwait(false); return firstOrDefault == null; } } - private List<BsonDocument> ListIndexes(IReadBinding binding, CollectionNamespace collectionNamespace, CancellationToken cancellationToken) + private List<BsonDocument> ListIndexes(OperationContext operationContext, IReadBinding binding, CollectionNamespace collectionNamespace) { var operation = CreateListIndexesOperation(collectionNamespace); - return operation.Execute(binding, cancellationToken).ToList(); + return operation.Execute(operationContext, binding).ToList(); } - private async Task<List<BsonDocument>> ListIndexesAsync(IReadBinding binding, CollectionNamespace collectionNamespace, CancellationToken cancellationToken) + private async Task<List<BsonDocument>> ListIndexesAsync(OperationContext operationContext, IReadBinding binding, CollectionNamespace collectionNamespace) { var operation = CreateListIndexesOperation(collectionNamespace); - var cursor = await operation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false); - return await cursor.ToListAsync(cancellationToken).ConfigureAwait(false); + var cursor = await operation.ExecuteAsync(operationContext, binding).ConfigureAwait(false); + return await cursor.ToListAsync(operationContext.CancellationToken).ConfigureAwait(false); } } } diff --git a/src/MongoDB.Driver/GridFS/GridFSForwardOnlyDownloadStream.cs b/src/MongoDB.Driver/GridFS/GridFSForwardOnlyDownloadStream.cs index 0f9c09b96fe..d3c400afd76 100644 --- a/src/MongoDB.Driver/GridFS/GridFSForwardOnlyDownloadStream.cs +++ b/src/MongoDB.Driver/GridFS/GridFSForwardOnlyDownloadStream.cs @@ -196,14 +196,18 @@ private FindOperation<BsonDocument> CreateFirstBatchOperation() private void GetFirstBatch(CancellationToken cancellationToken) { var operation = CreateFirstBatchOperation(); - _cursor = operation.Execute(Binding, cancellationToken); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); + _cursor = operation.Execute(operationContext, Binding); GetNextBatch(cancellationToken); } private async Task GetFirstBatchAsync(CancellationToken cancellationToken) { var operation = CreateFirstBatchOperation(); - _cursor = await operation.ExecuteAsync(Binding, cancellationToken).ConfigureAwait(false); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); + _cursor = await operation.ExecuteAsync(operationContext, Binding).ConfigureAwait(false); await GetNextBatchAsync(cancellationToken).ConfigureAwait(false); } diff --git a/src/MongoDB.Driver/GridFS/GridFSForwardOnlyUploadStream.cs b/src/MongoDB.Driver/GridFS/GridFSForwardOnlyUploadStream.cs index 388b60e1fe8..9b21d21e476 100644 --- a/src/MongoDB.Driver/GridFS/GridFSForwardOnlyUploadStream.cs +++ b/src/MongoDB.Driver/GridFS/GridFSForwardOnlyUploadStream.cs @@ -122,7 +122,9 @@ public override long Position _aborted = true; var operation = CreateAbortOperation(); - operation.Execute(_binding, cancellationToken); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); + operation.Execute(operationContext, _binding); } public override async Task AbortAsync(CancellationToken cancellationToken = default(CancellationToken)) @@ -135,7 +137,9 @@ public override long Position _aborted = true; var operation = CreateAbortOperation(); - await operation.ExecuteAsync(_binding, cancellationToken).ConfigureAwait(false); + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); + await operation.ExecuteAsync(operationContext, _binding).ConfigureAwait(false); } public override void Close(CancellationToken cancellationToken) diff --git a/src/MongoDB.Driver/GridFS/GridFSSeekableDownloadStream.cs b/src/MongoDB.Driver/GridFS/GridFSSeekableDownloadStream.cs index afbe4a0b6b6..921bbac4232 100644 --- a/src/MongoDB.Driver/GridFS/GridFSSeekableDownloadStream.cs +++ b/src/MongoDB.Driver/GridFS/GridFSSeekableDownloadStream.cs @@ -16,8 +16,6 @@ using System; using System.Collections.Generic; using System.IO; -using System.Linq; -using System.Text; using System.Threading; using System.Threading.Tasks; using MongoDB.Bson; @@ -174,7 +172,9 @@ private FindOperation<BsonDocument> CreateGetChunkOperation(long n) private void GetChunk(long n, CancellationToken cancellationToken) { var operation = CreateGetChunkOperation(n); - using (var cursor = operation.Execute(Binding, cancellationToken)) + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); + using (var cursor = operation.Execute(operationContext, Binding)) { var documents = cursor.ToList(); _chunk = GetChunkHelper(n, documents); @@ -185,7 +185,9 @@ private void GetChunk(long n, CancellationToken cancellationToken) private async Task GetChunkAsync(long n, CancellationToken cancellationToken) { var operation = CreateGetChunkOperation(n); - using (var cursor = await operation.ExecuteAsync(Binding, cancellationToken).ConfigureAwait(false)) + // TODO: CSOT implement proper way to obtain the operationContext + var operationContext = new OperationContext(null, cancellationToken); + using (var cursor = await operation.ExecuteAsync(operationContext, Binding).ConfigureAwait(false)) { var documents = await cursor.ToListAsync().ConfigureAwait(false); _chunk = GetChunkHelper(n, documents); diff --git a/src/MongoDB.Driver/GridFS/StreamExtensions.cs b/src/MongoDB.Driver/GridFS/StreamExtensions.cs deleted file mode 100644 index 6f2d8242cc5..00000000000 --- a/src/MongoDB.Driver/GridFS/StreamExtensions.cs +++ /dev/null @@ -1,52 +0,0 @@ -/* Copyright 2015-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System.IO; -using System.Threading; -using System.Threading.Tasks; - -namespace MongoDB.Driver.GridFS -{ - internal static class StreamExtensions - { - public static void ReadBytes(this Stream stream, byte[] destination, int offset, int count, CancellationToken cancellationToken) - { - while (count > 0) - { - var bytesRead = stream.Read(destination, offset, count); // TODO: honor cancellationToken? - if (bytesRead == 0) - { - throw new EndOfStreamException(); - } - offset += bytesRead; - count -= bytesRead; - } - } - - public static async Task ReadBytesAsync(this Stream stream, byte[] destination, int offset, int count, CancellationToken cancellationToken) - { - while (count > 0) - { - var bytesRead = await stream.ReadAsync(destination, offset, count, cancellationToken).ConfigureAwait(false); - if (bytesRead == 0) - { - throw new EndOfStreamException(); - } - offset += bytesRead; - count -= bytesRead; - } - } - } -} diff --git a/src/MongoDB.Driver/IClientSessionExtensions.cs b/src/MongoDB.Driver/IClientSessionExtensions.cs new file mode 100644 index 00000000000..1c16957a8db --- /dev/null +++ b/src/MongoDB.Driver/IClientSessionExtensions.cs @@ -0,0 +1,81 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System.Threading; +using System.Threading.Tasks; + +namespace MongoDB.Driver +{ + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal static class IClientSessionExtensions + { + // TODO: Merge these extension methods in IClientSession interface on major release + public static void AbortTransaction(this IClientSession session, AbortTransactionOptions options, CancellationToken cancellationToken = default) + { + if (options?.Timeout == null || session.Options.DefaultTransactionOptions?.Timeout == options.Timeout) + { + session.AbortTransaction(cancellationToken); + return; + } + + ((IClientSessionInternal)session).AbortTransaction(options, cancellationToken); + } + + public static Task AbortTransactionAsync(this IClientSession session, AbortTransactionOptions options, CancellationToken cancellationToken = default) + { + if (options?.Timeout == null || session.Options.DefaultTransactionOptions?.Timeout == options.Timeout) + { + return session.AbortTransactionAsync(cancellationToken); + } + + return ((IClientSessionInternal)session).AbortTransactionAsync(options, cancellationToken); + } + + public static void CommitTransaction(this IClientSession session, CommitTransactionOptions options, CancellationToken cancellationToken = default) + { + if (options?.Timeout == null || session.Options.DefaultTransactionOptions?.Timeout == options.Timeout) + { + session.CommitTransaction(cancellationToken); + return; + } + + ((IClientSessionInternal)session).CommitTransaction(options, cancellationToken); + } + + public static Task CommitTransactionAsync(this IClientSession session, CommitTransactionOptions options, CancellationToken cancellationToken = default) + { + if (options?.Timeout == null || session.Options.DefaultTransactionOptions?.Timeout == options.Timeout) + { + return session.CommitTransactionAsync(cancellationToken); + } + + return ((IClientSessionInternal)session).CommitTransactionAsync(options, cancellationToken); + } + + internal static ReadPreference GetEffectiveReadPreference(this IClientSession session, ReadPreference defaultReadPreference) + { + if (session.IsInTransaction) + { + var transactionReadPreference = session.WrappedCoreSession.CurrentTransaction.TransactionOptions?.ReadPreference; + if (transactionReadPreference != null) + { + return transactionReadPreference; + } + } + + return defaultReadPreference ?? ReadPreference.Primary; + } + } +} diff --git a/src/MongoDB.Driver/IClientSessionInternal.cs b/src/MongoDB.Driver/IClientSessionInternal.cs new file mode 100644 index 00000000000..4107b7b811c --- /dev/null +++ b/src/MongoDB.Driver/IClientSessionInternal.cs @@ -0,0 +1,28 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System.Threading; +using System.Threading.Tasks; + +namespace MongoDB.Driver; + +// TODO: Merge this interface into ICoreSession on major release +internal interface IClientSessionInternal +{ + void AbortTransaction(AbortTransactionOptions options, CancellationToken cancellationToken = default); + Task AbortTransactionAsync(AbortTransactionOptions options, CancellationToken cancellationToken = default); + void CommitTransaction(CommitTransactionOptions options, CancellationToken cancellationToken = default); + Task CommitTransactionAsync(CommitTransactionOptions options, CancellationToken cancellationToken = default); +} diff --git a/src/MongoDB.Driver/IInheritableMongoClientSettings.cs b/src/MongoDB.Driver/IInheritableMongoClientSettings.cs index 04eb99990f3..2e54fda9d4c 100644 --- a/src/MongoDB.Driver/IInheritableMongoClientSettings.cs +++ b/src/MongoDB.Driver/IInheritableMongoClientSettings.cs @@ -15,7 +15,6 @@ using System; using System.Text; -using MongoDB.Bson; namespace MongoDB.Driver { @@ -24,6 +23,7 @@ internal interface IInheritableMongoClientSettings ReadConcern ReadConcern { get; } UTF8Encoding ReadEncoding { get; } ReadPreference ReadPreference { get; } + TimeSpan? Timeout { get; } WriteConcern WriteConcern { get; } UTF8Encoding WriteEncoding { get; } } diff --git a/src/MongoDB.Driver/IMongoCollectionExtensions.cs b/src/MongoDB.Driver/IMongoCollectionExtensions.cs index fe5277a10d3..29c44e2d427 100644 --- a/src/MongoDB.Driver/IMongoCollectionExtensions.cs +++ b/src/MongoDB.Driver/IMongoCollectionExtensions.cs @@ -1200,6 +1200,7 @@ private static IFindFluent<TDocument, TDocument> FindHelper<TDocument>(IClientSe #pragma warning restore 618 ReturnKey = options.ReturnKey, ShowRecordId = options.ShowRecordId, + Timeout = options.Timeout, TranslationOptions = options.TranslationOptions }; } diff --git a/src/MongoDB.Driver/IMongoIndexManager.cs b/src/MongoDB.Driver/IMongoIndexManager.cs index 08e1c04e2a1..3321723f226 100644 --- a/src/MongoDB.Driver/IMongoIndexManager.cs +++ b/src/MongoDB.Driver/IMongoIndexManager.cs @@ -166,7 +166,7 @@ string CreateOne( /// <summary> /// Creates an index. /// </summary> - /// <param name="keys">The keys.</param> + /// <param name="keys">The keys.</param> /// <param name="options">The create index request options.</param> /// <param name="cancellationToken">The cancellation token.</param> /// <returns> diff --git a/src/MongoDB.Driver/IOperationExecutor.cs b/src/MongoDB.Driver/IOperationExecutor.cs index 1af86d6ff62..6dfd8e741c5 100644 --- a/src/MongoDB.Driver/IOperationExecutor.cs +++ b/src/MongoDB.Driver/IOperationExecutor.cs @@ -14,22 +14,39 @@ */ using System; -using System.Threading; using System.Threading.Tasks; -using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Operations; namespace MongoDB.Driver { - internal interface IOperationExecutor + internal interface IOperationExecutor : IDisposable { - TResult ExecuteReadOperation<TResult>(IReadBinding binding, IReadOperation<TResult> operation, CancellationToken cancellationToken); - Task<TResult> ExecuteReadOperationAsync<TResult>(IReadBinding binding, IReadOperation<TResult> operation, CancellationToken cancellationToken); + TResult ExecuteReadOperation<TResult>( + OperationContext operationContext, + IClientSessionHandle session, + IReadOperation<TResult> operation, + ReadPreference readPreference, + bool allowChannelPinning); - TResult ExecuteWriteOperation<TResult>(IWriteBinding binding, IWriteOperation<TResult> operation, CancellationToken cancellationToken); - Task<TResult> ExecuteWriteOperationAsync<TResult>(IWriteBinding binding, IWriteOperation<TResult> operation, CancellationToken cancellationToken); + Task<TResult> ExecuteReadOperationAsync<TResult>( + OperationContext operationContext, + IClientSessionHandle session, + IReadOperation<TResult> operation, + ReadPreference readPreference, + bool allowChannelPinning); - IClientSessionHandle StartImplicitSession(CancellationToken cancellationToken); - Task<IClientSessionHandle> StartImplicitSessionAsync(CancellationToken cancellationToken); + TResult ExecuteWriteOperation<TResult>( + OperationContext operationContext, + IClientSessionHandle session, + IWriteOperation<TResult> operation, + bool allowChannelPinning); + + Task<TResult> ExecuteWriteOperationAsync<TResult>( + OperationContext operationContext, + IClientSessionHandle session, + IWriteOperation<TResult> operation, + bool allowChannelPinning); + + IClientSessionHandle StartImplicitSession(); } } diff --git a/src/MongoDB.Driver/IndexKeysDefinitionBuilder.cs b/src/MongoDB.Driver/IndexKeysDefinitionBuilder.cs index 6e500f4c9c2..1b42c938411 100644 --- a/src/MongoDB.Driver/IndexKeysDefinitionBuilder.cs +++ b/src/MongoDB.Driver/IndexKeysDefinitionBuilder.cs @@ -487,20 +487,7 @@ public override BsonDocument Render(RenderArgs<TDocument> args) { var renderedField = _field.Render(args); - BsonValue value; - switch (_direction) - { - case SortDirection.Ascending: - value = 1; - break; - case SortDirection.Descending: - value = -1; - break; - default: - throw new InvalidOperationException("Unknown value for " + typeof(SortDirection) + "."); - } - - return new BsonDocument(renderedField.FieldName, value); + return new BsonDocument(renderedField.FieldName, _direction.Render()); } } diff --git a/src/MongoDB.Driver/InsertManyOptions.cs b/src/MongoDB.Driver/InsertManyOptions.cs index 088fda4723a..0847ec94d42 100644 --- a/src/MongoDB.Driver/InsertManyOptions.cs +++ b/src/MongoDB.Driver/InsertManyOptions.cs @@ -11,9 +11,12 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * */ + +using System; using MongoDB.Bson; +using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver { @@ -26,6 +29,7 @@ public sealed class InsertManyOptions private bool? _bypassDocumentValidation; private BsonValue _comment; private bool _isOrdered; + private TimeSpan? _timeout; // constructors /// <summary> @@ -63,5 +67,15 @@ public bool IsOrdered get { return _isOrdered; } set { _isOrdered = value; } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } } diff --git a/src/MongoDB.Driver/InsertOneOptions.cs b/src/MongoDB.Driver/InsertOneOptions.cs index 3f2ddf773ba..2faea76ccbc 100644 --- a/src/MongoDB.Driver/InsertOneOptions.cs +++ b/src/MongoDB.Driver/InsertOneOptions.cs @@ -11,9 +11,12 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * */ + +using System; using MongoDB.Bson; +using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver { @@ -22,10 +25,10 @@ namespace MongoDB.Driver /// </summary> public sealed class InsertOneOptions { - private BsonValue _comment; - // private fields private bool? _bypassDocumentValidation; + private BsonValue _comment; + private TimeSpan? _timeout; // public properties /// <summary> @@ -45,5 +48,15 @@ public BsonValue Comment get { return _comment; } set { _comment = value; } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } } diff --git a/src/MongoDB.Driver/Linq/ISetWindowFieldsPartitionExtensions.cs b/src/MongoDB.Driver/Linq/ISetWindowFieldsPartitionExtensions.cs index ca8aade374b..ecba3d5e81e 100644 --- a/src/MongoDB.Driver/Linq/ISetWindowFieldsPartitionExtensions.cs +++ b/src/MongoDB.Driver/Linq/ISetWindowFieldsPartitionExtensions.cs @@ -16,7 +16,6 @@ using System; using System.Collections.Generic; -using MongoDB.Bson; namespace MongoDB.Driver.Linq { @@ -879,6 +878,136 @@ public static TValue Max<TInput, TValue>(this ISetWindowFieldsPartition<TInput> throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); } + /// <summary> + /// Returns the median of the numeric values. Median ignores non-numeric values. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The median of the selected values.</returns> + public static decimal Median<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, decimal> selector, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + + /// <summary> + /// Returns the median of the numeric values. Median ignores non-numeric values. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The median of the selected values.</returns> + public static decimal? Median<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, decimal?> selector, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + + /// <summary> + /// Returns the median of the numeric values. Median ignores non-numeric values. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The median of the selected values.</returns> + public static double Median<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, double> selector, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + + /// <summary> + /// Returns the median of the numeric values. Median ignores non-numeric values. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The median of the selected values.</returns> + public static double? Median<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, double?> selector, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + + /// <summary> + /// Returns the median of the numeric values. Median ignores non-numeric values. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The median of the selected values.</returns> + public static float Median<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, float> selector, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + + /// <summary> + /// Returns the median of the numeric values. Median ignores non-numeric values. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The median of the selected values.</returns> + public static float? Median<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, float?> selector, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + + /// <summary> + /// Returns the median of the numeric values. Median ignores non-numeric values. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The median of the selected values.</returns> + public static double Median<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, int> selector, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + + /// <summary> + /// Returns the median of the numeric values. Median ignores non-numeric values. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The median of the selected values.</returns> + public static double? Median<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, int?> selector, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + + /// <summary> + /// Returns the median of the numeric values. Median ignores non-numeric values. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The median of the selected values.</returns> + public static double Median<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, long> selector, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + + /// <summary> + /// Returns the median of the numeric values. Median ignores non-numeric values. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The median of the selected values.</returns> + public static double? Median<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, long?> selector, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + /// <summary> /// Returns the minimum value. /// </summary> @@ -893,6 +1022,146 @@ public static TValue Min<TInput, TValue>(this ISetWindowFieldsPartition<TInput> throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); } + /// <summary> + /// Returns the values at the given percentiles. Percentile ignores non-numeric values. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="percentiles">The percentiles (between 0.0 and 1.0).</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The values at the given percentiles.</returns> + public static decimal[] Percentile<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, decimal> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + + /// <summary> + /// Returns the values at the given percentiles. Percentile ignores non-numeric values. Percentile returns results in the same order as the given percentiles. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="percentiles">The percentiles (between 0.0 and 1.0).</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The values at the given percentiles.</returns> + public static decimal?[] Percentile<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, decimal?> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + + /// <summary> + /// Returns the values at the given percentiles. Percentile ignores non-numeric values. Percentile returns results in the same order as the given percentiles. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="percentiles">The percentiles (between 0.0 and 1.0).</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The values at the given percentiles.</returns> + public static double[] Percentile<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, double> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + + /// <summary> + /// Returns the values at the given percentiles. Percentile ignores non-numeric values. Percentile returns results in the same order as the given percentiles. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="percentiles">The percentiles (between 0.0 and 1.0).</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The values at the given percentiles.</returns> + public static double?[] Percentile<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, double?> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + + /// <summary> + /// Returns the values at the given percentiles. Percentile ignores non-numeric values. Percentile returns results in the same order as the given percentiles. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="percentiles">The percentiles (between 0.0 and 1.0).</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The values at the given percentiles.</returns> + public static float[] Percentile<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, float> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + + /// <summary> + /// Returns the values at the given percentiles. Percentile ignores non-numeric values. Percentile returns results in the same order as the given percentiles. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="percentiles">The percentiles (between 0.0 and 1.0).</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The values at the given percentiles.</returns> + public static float?[] Percentile<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, float?> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + + /// <summary> + /// Returns the values at the given percentiles. Percentile ignores non-numeric values. Percentile returns results in the same order as the given percentiles. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="percentiles">The percentiles (between 0.0 and 1.0).</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The values at the given percentiles.</returns> + public static double[] Percentile<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, int> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + + /// <summary> + /// Returns the values at the given percentiles. Percentile ignores non-numeric values. Percentile returns results in the same order as the given percentiles. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="percentiles">The percentiles (between 0.0 and 1.0).</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The values at the given percentiles.</returns> + public static double?[] Percentile<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, int?> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + + /// <summary> + /// Returns the values at the given percentiles. Percentile ignores non-numeric values. Percentile returns results in the same order as the given percentiles. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="percentiles">The percentiles (between 0.0 and 1.0).</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The values at the given percentiles.</returns> + public static double[] Percentile<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, long> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + + /// <summary> + /// Returns the values at the given percentiles. Percentile ignores non-numeric values. Percentile returns results in the same order as the given percentiles. + /// </summary> + /// <typeparam name="TInput">The type of the input documents in the partition.</typeparam> + /// <param name="partition">The partition.</param> + /// <param name="selector">The selector that selects a value from the input document.</param> + /// <param name="percentiles">The percentiles (between 0.0 and 1.0).</param> + /// <param name="window">The window boundaries.</param> + /// <returns>The values at the given percentiles.</returns> + public static double?[] Percentile<TInput>(this ISetWindowFieldsPartition<TInput> partition, Func<TInput, long?> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window = null) + { + throw new InvalidOperationException("This method is only intended to be used with SetWindowFields."); + } + /// <summary> /// Returns a sequence of values. /// </summary> diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/AstEnumExtensions.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/AstEnumExtensions.cs index 740cc624f94..be6200a004f 100644 --- a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/AstEnumExtensions.cs +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/AstEnumExtensions.cs @@ -15,38 +15,13 @@ using System; using MongoDB.Bson; +using MongoDB.Bson.Serialization; namespace MongoDB.Driver.Linq.Linq3Implementation.Ast { internal static class AstEnumExtensions { - public static string Render(this BsonType type) - { - return type switch - { - BsonType.Array => "array", - BsonType.Binary => "binData", - BsonType.Boolean => "bool", - BsonType.DateTime => "date", - BsonType.Decimal128 => "decimal", - BsonType.Document => "object", - BsonType.Double => "double", - BsonType.Int32 => "int", - BsonType.Int64 => "long", - BsonType.JavaScript => "javascript", - BsonType.JavaScriptWithScope => "javascriptWithScope", - BsonType.MaxKey => "maxKey", - BsonType.MinKey => "minKey", - BsonType.Null => "null", - BsonType.ObjectId => "objectId", - BsonType.RegularExpression => "regex", - BsonType.String => "string", - BsonType.Symbol => "symbol", - BsonType.Timestamp => "timestamp", - BsonType.Undefined => "undefined", - _ => throw new ArgumentException($"Unexpected BSON type: {type}.", nameof(type)) - }; - } + public static string Render(this BsonType type) => type.ToServerString(); public static string Render(this ByteOrder byteOrder) { diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/AstNodeType.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/AstNodeType.cs index 2b8ce448dd3..147bd427729 100644 --- a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/AstNodeType.cs +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/AstNodeType.cs @@ -93,6 +93,9 @@ internal enum AstNodeType MatchesEverythingFilter, MatchesNothingFilter, MatchStage, + MedianExpression, + MedianAccumulatorExpression, + MedianWindowExpression, MergeStage, ModFilterOperation, NaryExpression, @@ -104,6 +107,9 @@ internal enum AstNodeType NullaryWindowExpression, OrFilter, OutStage, + PercentileExpression, + PercentileAccumulatorExpression, + PercentileWindowExpression, PickAccumulatorExpression, PickExpression, Pipeline, diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstExpression.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstExpression.cs index 90512554fc6..bf729df32a9 100644 --- a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstExpression.cs +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstExpression.cs @@ -597,6 +597,21 @@ public static AstExpression Max(AstExpression arg1, AstExpression arg2) return new AstNaryExpression(AstNaryOperator.Max, [arg1, arg2]); } + public static AstExpression Median(AstExpression input) + { + return new AstMedianExpression(input); + } + + public static AstAccumulatorExpression MedianAccumulator(AstExpression input) + { + return new AstMedianAccumulatorExpression(input); + } + + public static AstWindowExpression MedianWindowExpression(AstExpression input, AstWindow window) + { + return new AstMedianWindowExpression(input, window); + } + public static AstExpression Min(AstExpression array) { return new AstUnaryExpression(AstUnaryOperator.Min, array); @@ -653,6 +668,21 @@ public static AstExpression Or(params AstExpression[] args) return new AstNaryExpression(AstNaryOperator.Or, flattenedArgs); } + public static AstExpression Percentile(AstExpression input, AstExpression percentiles) + { + return new AstPercentileExpression(input, percentiles); + } + + public static AstAccumulatorExpression PercentileAccumulator(AstExpression input, AstExpression percentiles) + { + return new AstPercentileAccumulatorExpression(input, percentiles); + } + + public static AstWindowExpression PercentileWindowExpression(AstExpression input, AstExpression percentiles, AstWindow window) + { + return new AstPercentileWindowExpression(input, percentiles, window); + } + public static AstExpression PickExpression(AstPickOperator @operator, AstExpression source, AstSortFields sortBy, AstVarExpression @as, AstExpression selector, AstExpression n) { return new AstPickExpression(@operator, source, sortBy, @as, selector, n); diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstMedianAccumulatorExpression.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstMedianAccumulatorExpression.cs new file mode 100644 index 00000000000..d7cc2bb085a --- /dev/null +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstMedianAccumulatorExpression.cs @@ -0,0 +1,63 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using MongoDB.Bson; +using MongoDB.Driver.Core.Misc; +using MongoDB.Driver.Linq.Linq3Implementation.Ast.Visitors; + +namespace MongoDB.Driver.Linq.Linq3Implementation.Ast.Expressions +{ + internal sealed class AstMedianAccumulatorExpression : AstAccumulatorExpression + { + private readonly AstExpression _input; + + public AstMedianAccumulatorExpression(AstExpression input) + { + _input = Ensure.IsNotNull(input, nameof(input)); + } + + public AstExpression Input => _input; + + public override AstNodeType NodeType => AstNodeType.MedianAccumulatorExpression; + + public override AstNode Accept(AstNodeVisitor visitor) + { + return visitor.VisitMedianAccumulatorExpression(this); + } + + public override BsonValue Render() + { + return new BsonDocument + { + { + "$median", new BsonDocument + { + { "input", _input.Render() }, + { "method", "approximate" } // server requires this parameter but currently only allows this value + } + } + }; + } + + public AstMedianAccumulatorExpression Update(AstExpression input) + { + if (input == _input) + { + return this; + } + return new AstMedianAccumulatorExpression(input); + } + } +} \ No newline at end of file diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstMedianExpression.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstMedianExpression.cs new file mode 100644 index 00000000000..a171bb9fa02 --- /dev/null +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstMedianExpression.cs @@ -0,0 +1,63 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using MongoDB.Bson; +using MongoDB.Driver.Core.Misc; +using MongoDB.Driver.Linq.Linq3Implementation.Ast.Visitors; + +namespace MongoDB.Driver.Linq.Linq3Implementation.Ast.Expressions +{ + internal sealed class AstMedianExpression : AstExpression + { + private readonly AstExpression _input; + + public AstMedianExpression(AstExpression input) + { + _input = Ensure.IsNotNull(input, nameof(input)); + } + + public AstExpression Input => _input; + + public override AstNodeType NodeType => AstNodeType.MedianExpression; + + public override AstNode Accept(AstNodeVisitor visitor) + { + return visitor.VisitMedianExpression(this); + } + + public override BsonValue Render() + { + return new BsonDocument + { + { + "$median", new BsonDocument + { + { "input", _input.Render() }, + { "method", "approximate" } // server requires this parameter but currently only allows this value + } + } + }; + } + + public AstMedianExpression Update(AstExpression input) + { + if (input == _input) + { + return this; + } + return new AstMedianExpression(input); + } + } +} \ No newline at end of file diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstMedianWindowExpression.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstMedianWindowExpression.cs new file mode 100644 index 00000000000..fbe71c3f278 --- /dev/null +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstMedianWindowExpression.cs @@ -0,0 +1,69 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using MongoDB.Bson; +using MongoDB.Driver.Core.Misc; +using MongoDB.Driver.Linq.Linq3Implementation.Ast.Visitors; + +namespace MongoDB.Driver.Linq.Linq3Implementation.Ast.Expressions +{ + internal sealed class AstMedianWindowExpression : AstWindowExpression + { + private readonly AstExpression _input; + private readonly AstWindow _window; + + public AstMedianWindowExpression(AstExpression input, AstWindow window) + { + _input = Ensure.IsNotNull(input, nameof(input)); + _window = window; + } + + public AstExpression Input => _input; + + public AstWindow Window => _window; + + public override AstNodeType NodeType => AstNodeType.MedianWindowExpression; + + public override AstNode Accept(AstNodeVisitor visitor) + { + return visitor.VisitMedianWindowExpression(this); + } + + public override BsonValue Render() + { + return new BsonDocument + { + { + "$median", new BsonDocument + { + { "input", _input.Render() }, + { "method", "approximate" } // server requires this parameter but currently only allows this value + } + }, + { "window", _window?.Render(), _window != null } + }; + } + + public AstMedianWindowExpression Update(AstExpression input, AstWindow window) + { + if (input == _input && window == _window) + { + return this; + } + + return new AstMedianWindowExpression(input, window); + } + } +} \ No newline at end of file diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstPercentileAccumulatorExpression.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstPercentileAccumulatorExpression.cs new file mode 100644 index 00000000000..77cee275402 --- /dev/null +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstPercentileAccumulatorExpression.cs @@ -0,0 +1,68 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using MongoDB.Bson; +using MongoDB.Driver.Core.Misc; +using MongoDB.Driver.Linq.Linq3Implementation.Ast.Visitors; + +namespace MongoDB.Driver.Linq.Linq3Implementation.Ast.Expressions +{ + internal sealed class AstPercentileAccumulatorExpression : AstAccumulatorExpression + { + private readonly AstExpression _input; + private readonly AstExpression _percentiles; + + public AstPercentileAccumulatorExpression(AstExpression input, AstExpression percentiles) + { + _input = Ensure.IsNotNull(input, nameof(input)); + _percentiles = Ensure.IsNotNull(percentiles, nameof(percentiles)); + } + + public AstExpression Input => _input; + + public AstExpression Percentiles => _percentiles; + + public override AstNodeType NodeType => AstNodeType.PercentileAccumulatorExpression; + + public override AstNode Accept(AstNodeVisitor visitor) + { + return visitor.VisitPercentileAccumulatorExpression(this); + } + + public override BsonValue Render() + { + return new BsonDocument + { + { + "$percentile", new BsonDocument + { + { "input", _input.Render() }, + { "p", _percentiles.Render() }, + { "method", "approximate" } // server requires this parameter but currently only allows this value + } + } + }; + } + + public AstPercentileAccumulatorExpression Update(AstExpression input, AstExpression percentiles) + { + if (input == _input && percentiles == _percentiles) + { + return this; + } + return new AstPercentileAccumulatorExpression(input, percentiles); + } + } +} \ No newline at end of file diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstPercentileExpression.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstPercentileExpression.cs new file mode 100644 index 00000000000..aab4920143d --- /dev/null +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstPercentileExpression.cs @@ -0,0 +1,68 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using MongoDB.Bson; +using MongoDB.Driver.Core.Misc; +using MongoDB.Driver.Linq.Linq3Implementation.Ast.Visitors; + +namespace MongoDB.Driver.Linq.Linq3Implementation.Ast.Expressions +{ + internal sealed class AstPercentileExpression : AstExpression + { + private readonly AstExpression _input; + private readonly AstExpression _percentiles; + + public AstPercentileExpression(AstExpression input, AstExpression percentiles) + { + _input = Ensure.IsNotNull(input, nameof(input)); + _percentiles = Ensure.IsNotNull(percentiles, nameof(percentiles)); + } + + public AstExpression Input => _input; + + public AstExpression Percentiles => _percentiles; + + public override AstNodeType NodeType => AstNodeType.PercentileExpression; + + public override AstNode Accept(AstNodeVisitor visitor) + { + return visitor.VisitPercentileExpression(this); + } + + public override BsonValue Render() + { + return new BsonDocument + { + { + "$percentile", new BsonDocument + { + { "input", _input.Render() }, + { "p", _percentiles.Render() }, + { "method", "approximate" } // server requires this parameter but currently only allows this value + } + } + }; + } + + public AstPercentileExpression Update(AstExpression input, AstExpression percentiles) + { + if (input == _input && percentiles == _percentiles) + { + return this; + } + return new AstPercentileExpression(input, percentiles); + } + } +} \ No newline at end of file diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstPercentileWindowExpression.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstPercentileWindowExpression.cs new file mode 100644 index 00000000000..055bcfb8ef3 --- /dev/null +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstPercentileWindowExpression.cs @@ -0,0 +1,74 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using MongoDB.Bson; +using MongoDB.Driver.Core.Misc; +using MongoDB.Driver.Linq.Linq3Implementation.Ast.Visitors; + +namespace MongoDB.Driver.Linq.Linq3Implementation.Ast.Expressions +{ + internal sealed class AstPercentileWindowExpression : AstWindowExpression + { + private readonly AstExpression _input; + private readonly AstExpression _percentiles; + private readonly AstWindow _window; + + public AstPercentileWindowExpression(AstExpression input, AstExpression percentiles, AstWindow window) + { + _input = Ensure.IsNotNull(input, nameof(input)); + _percentiles = Ensure.IsNotNull(percentiles, nameof(percentiles)); + _window = window; + } + + public AstExpression Input => _input; + + public AstExpression Percentiles => _percentiles; + + public AstWindow Window => _window; + + public override AstNodeType NodeType => AstNodeType.PercentileWindowExpression; + + public override AstNode Accept(AstNodeVisitor visitor) + { + return visitor.VisitPercentileWindowExpression(this); + } + + public override BsonValue Render() + { + return new BsonDocument + { + { + "$percentile", new BsonDocument + { + { "input", _input.Render() }, + { "p", _percentiles.Render() }, + { "method", "approximate" } // server requires this parameter but currently only allows this value + } + }, + { "window", _window?.Render(), _window != null } + }; + } + + public AstPercentileWindowExpression Update(AstExpression input, AstExpression percentiles, AstWindow window) + { + if (input == _input && percentiles == _percentiles && window == _window) + { + return this; + } + + return new AstPercentileWindowExpression(input, percentiles, window); + } + } +} \ No newline at end of file diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstRangeExpression.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstRangeExpression.cs index 9102691dc0b..cb6f8c97103 100644 --- a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstRangeExpression.cs +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Expressions/AstRangeExpression.cs @@ -52,7 +52,7 @@ public override BsonValue Render() { args.Add(_step.Render()); } - + return new BsonDocument("$range", args); } diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Filters/AstTypeFilterOperation.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Filters/AstTypeFilterOperation.cs index f44ae90b500..c927f778a63 100644 --- a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Filters/AstTypeFilterOperation.cs +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Filters/AstTypeFilterOperation.cs @@ -17,6 +17,7 @@ using System.Collections.Generic; using System.Linq; using MongoDB.Bson; +using MongoDB.Bson.Serialization; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Linq.Linq3Implementation.Ast.Visitors; using MongoDB.Driver.Linq.Linq3Implementation.Misc; @@ -51,39 +52,11 @@ public override BsonValue Render() if (_types.Count == 1) { var type = _types[0]; - return new BsonDocument("$type", MapBsonTypeToString(type)); + return new BsonDocument("$type", type.ToServerString()); } else { - return new BsonDocument("$type", new BsonArray(_types.Select(type => MapBsonTypeToString(type)))); - } - } - - private string MapBsonTypeToString(BsonType type) - { - switch (type) - { - case BsonType.Array: return "array"; - case BsonType.Binary: return "binData"; - case BsonType.Boolean: return "bool"; - case BsonType.DateTime: return "date"; - case BsonType.Decimal128: return "decimal"; - case BsonType.Document: return "object"; - case BsonType.Double: return "double"; - case BsonType.Int32: return "int"; - case BsonType.Int64: return "long"; - case BsonType.JavaScript: return "javascript"; - case BsonType.JavaScriptWithScope: return "javascriptWithScope"; - case BsonType.MaxKey: return "maxKey"; - case BsonType.MinKey: return "minKey"; - case BsonType.Null: return "null"; - case BsonType.ObjectId: return "objectId"; - case BsonType.RegularExpression: return "regex"; - case BsonType.String: return "string"; - case BsonType.Symbol: return "symbol"; - case BsonType.Timestamp: return "timestamp"; - case BsonType.Undefined: return "undefined"; - default: throw new ArgumentException($"Unexpected BSON type: {type}.", nameof(type)); + return new BsonDocument("$type", new BsonArray(_types.Select(type => type.ToServerString()))); } } } diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Optimizers/AstGroupingPipelineOptimizer.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Optimizers/AstGroupingPipelineOptimizer.cs index 5967215ee25..37de0673850 100644 --- a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Optimizers/AstGroupingPipelineOptimizer.cs +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Optimizers/AstGroupingPipelineOptimizer.cs @@ -404,32 +404,67 @@ unaryExpression.Arg is AstGetFieldExpression innerMostGetFieldExpression && public override AstNode VisitMapExpression(AstMapExpression node) { // { $map : { input : { $getField : { input : "$$ROOT", field : "_elements" } }, as : "x", in : f(x) } } => { __agg0 : { $push : f(x => element) } } + "$__agg0" - if (node.Input is AstGetFieldExpression mapInputGetFieldExpression && - mapInputGetFieldExpression.FieldName.IsStringConstant("_elements") && - mapInputGetFieldExpression.Input.IsRootVar()) + if (IsMappedElementsField(node, out var rewrittenArg)) { - var rewrittenArg = (AstExpression)AstNodeReplacer.Replace(node.In, (node.As, _element)); var accumulatorExpression = AstExpression.UnaryAccumulator(AstUnaryAccumulatorOperator.Push, rewrittenArg); - var accumulatorFieldName = _accumulators.AddAccumulatorExpression(accumulatorExpression); - return AstExpression.GetField(AstExpression.RootVar, accumulatorFieldName); + return CreateGetAccumulatorFieldExpression(accumulatorExpression); } return base.VisitMapExpression(node); } + public override AstNode VisitMedianExpression(AstMedianExpression node) + { + // { $median : { input: { $getField : { input : "$$ROOT", field : "_elements" } }, method: "approximate" } } + // => { __agg0 : { $median : { input: element, method: "approximate" } } } + "$__agg0" + if (IsElementsField(node.Input)) + { + var accumulatorExpression = AstExpression.MedianAccumulator(_element); + return CreateGetAccumulatorFieldExpression(accumulatorExpression); + } + + // { $median : { input: { $map : { input : { $getField : { input : "$$ROOT", field : "_elements" } }, as : "x", in : f(x) } }, method: "approximate" } } + // => { __agg0 : { $median : { input: f(x => element), method: "approximate" } } } + "$__agg0" + if (IsMappedElementsField(node.Input, out var rewrittenArg)) + { + var accumulatorExpression = AstExpression.MedianAccumulator(rewrittenArg); + return CreateGetAccumulatorFieldExpression(accumulatorExpression); + } + + return base.VisitMedianExpression(node); + } + + public override AstNode VisitPercentileExpression(AstPercentileExpression node) + { + // { $percentile : { input: { $getField : { input : "$$ROOT", field : "_elements" } }, p: [...], method: "approximate" } } + // => { __agg0 : { $percentile : { input: element, p: [...], method: "approximate" } } } + "$__agg0" + if (IsElementsField(node.Input)) + { + var accumulatorExpression = AstExpression.PercentileAccumulator(_element, node.Percentiles); + return CreateGetAccumulatorFieldExpression(accumulatorExpression); + } + + // { $percentile : { input: { $map : { input : { $getField : { input : "$$ROOT", field : "_elements" } }, as : "x", in : f(x) } }, p: [...], method: "approximate" } } + // => { __agg0 : { $percentile : { input: f(x => element), p: [...], method: "approximate" } } } + "$__agg0" + if (IsMappedElementsField(node.Input, out var rewrittenArg)) + { + var accumulatorExpression = AstExpression.PercentileAccumulator(rewrittenArg, node.Percentiles); + return CreateGetAccumulatorFieldExpression(accumulatorExpression); + } + + return base.VisitPercentileExpression(node); + } + public override AstNode VisitPickExpression(AstPickExpression node) { // { $pickOperator : { source : { $getField : { input : "$$ROOT", field : "_elements" } }, as : "x", sortBy : s, selector : f(x) } } // => { __agg0 : { $pickAccumulatorOperator : { sortBy : s, selector : f(x => element) } } } + "$__agg0" - if (node.Source is AstGetFieldExpression getFieldExpression && - getFieldExpression.Input.IsRootVar() && - getFieldExpression.FieldName.IsStringConstant("_elements")) + if (IsElementsField(node.Source)) { var @operator = node.Operator.ToAccumulatorOperator(); var rewrittenSelector = (AstExpression)AstNodeReplacer.Replace(node.Selector, (node.As, _element)); var accumulatorExpression = new AstPickAccumulatorExpression(@operator, node.SortBy, rewrittenSelector, node.N); - var accumulatorFieldName = _accumulators.AddAccumulatorExpression(accumulatorExpression); - return AstExpression.GetField(AstExpression.RootVar, accumulatorFieldName); + return CreateGetAccumulatorFieldExpression(accumulatorExpression); } return base.VisitPickExpression(node); @@ -437,80 +472,60 @@ public override AstNode VisitPickExpression(AstPickExpression node) public override AstNode VisitUnaryExpression(AstUnaryExpression node) { - if (TryOptimizeSizeOfElements(out var optimizedExpression)) + // { $size : "$_elements" } => { __agg0 : { $sum : 1 } } + "$__agg0" + if (node.Operator == AstUnaryOperator.Size) { - return optimizedExpression; + if (node.Arg is AstGetFieldExpression argGetFieldExpression && + argGetFieldExpression.FieldName.IsStringConstant("_elements")) + { + var accumulatorExpression = AstExpression.UnaryAccumulator(AstUnaryAccumulatorOperator.Sum, 1); + return CreateGetAccumulatorFieldExpression(accumulatorExpression); + } } - if (TryOptimizeAccumulatorOfElements(out optimizedExpression)) + // { $accumulator : { $getField : { input : "$$ROOT", field : "_elements" } } } => { __agg0 : { $accumulator : element } } + "$__agg0" + if (node.Operator.IsAccumulator(out var accumulatorOperator) && IsElementsField(node.Arg)) { - return optimizedExpression; + var accumulatorExpression = AstExpression.UnaryAccumulator(accumulatorOperator, _element); + return CreateGetAccumulatorFieldExpression(accumulatorExpression); } - if (TryOptimizeAccumulatorOfMappedElements(out optimizedExpression)) + // { $accumulator : { $map : { input : { $getField : { input : "$$ROOT", field : "_elements" } }, as : "x", in : f(x) } } } + // => { __agg0 : { $accumulator : f(x => element) } } + "$__agg0" + if (node.Operator.IsAccumulator(out accumulatorOperator) && + IsMappedElementsField(node.Arg, out var rewrittenArg)) { - return optimizedExpression; + var accumulatorExpression = AstExpression.UnaryAccumulator(accumulatorOperator, rewrittenArg); + return CreateGetAccumulatorFieldExpression(accumulatorExpression); } return base.VisitUnaryExpression(node); + } - bool TryOptimizeSizeOfElements(out AstExpression optimizedExpression) - { - // { $size : "$_elements" } => { __agg0 : { $sum : 1 } } + "$__agg0" - if (node.Operator == AstUnaryOperator.Size) - { - if (node.Arg is AstGetFieldExpression argGetFieldExpression && - argGetFieldExpression.FieldName.IsStringConstant("_elements")) - { - var accumulatorExpression = AstExpression.UnaryAccumulator(AstUnaryAccumulatorOperator.Sum, 1); - var accumulatorFieldName = _accumulators.AddAccumulatorExpression(accumulatorExpression); - optimizedExpression = AstExpression.GetField(AstExpression.RootVar, accumulatorFieldName); - return true; - } - } - - optimizedExpression = null; - return false; - } + private bool IsElementsField(AstExpression expression) + { + return + expression is AstGetFieldExpression getFieldExpression && + getFieldExpression.FieldName.IsStringConstant("_elements") && + getFieldExpression.Input.IsRootVar(); + } - bool TryOptimizeAccumulatorOfElements(out AstExpression optimizedExpression) + private bool IsMappedElementsField(AstExpression expression, out AstExpression rewrittenArg) + { + if (expression is AstMapExpression mapExpression && IsElementsField(mapExpression.Input)) { - // { $accumulator : { $getField : { input : "$$ROOT", field : "_elements" } } } => { __agg0 : { $accumulator : element } } + "$__agg0" - if (node.Operator.IsAccumulator(out var accumulatorOperator) && - node.Arg is AstGetFieldExpression getFieldExpression && - getFieldExpression.FieldName.IsStringConstant("_elements") && - getFieldExpression.Input.IsRootVar()) - { - var accumulatorExpression = AstExpression.UnaryAccumulator(accumulatorOperator, _element); - var accumulatorFieldName = _accumulators.AddAccumulatorExpression(accumulatorExpression); - optimizedExpression = AstExpression.GetField(AstExpression.RootVar, accumulatorFieldName); - return true; - } - - optimizedExpression = null; - return false; - + rewrittenArg = (AstExpression)AstNodeReplacer.Replace(mapExpression.In, (mapExpression.As, _element)); + return true; } - bool TryOptimizeAccumulatorOfMappedElements(out AstExpression optimizedExpression) - { - // { $accumulator : { $map : { input : { $getField : { input : "$$ROOT", field : "_elements" } }, as : "x", in : f(x) } } } => { __agg0 : { $accumulator : f(x => element) } } + "$__agg0" - if (node.Operator.IsAccumulator(out var accumulatorOperator) && - node.Arg is AstMapExpression mapExpression && - mapExpression.Input is AstGetFieldExpression mapInputGetFieldExpression && - mapInputGetFieldExpression.FieldName.IsStringConstant("_elements") && - mapInputGetFieldExpression.Input.IsRootVar()) - { - var rewrittenArg = (AstExpression)AstNodeReplacer.Replace(mapExpression.In, (mapExpression.As, _element)); - var accumulatorExpression = AstExpression.UnaryAccumulator(accumulatorOperator, rewrittenArg); - var accumulatorFieldName = _accumulators.AddAccumulatorExpression(accumulatorExpression); - optimizedExpression = AstExpression.GetField(AstExpression.RootVar, accumulatorFieldName); - return true; - } + rewrittenArg = null; + return false; + } - optimizedExpression = null; - return false; - } + private AstExpression CreateGetAccumulatorFieldExpression(AstAccumulatorExpression accumulatorExpression) + { + var fieldName = _accumulators.AddAccumulatorExpression(accumulatorExpression); + return AstExpression.GetField(AstExpression.RootVar, fieldName); } } diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Stages/AstMergeStage.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Stages/AstMergeStage.cs index b3e4e016885..7894d87af11 100644 --- a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Stages/AstMergeStage.cs +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Stages/AstMergeStage.cs @@ -105,7 +105,7 @@ public AstMergeStage Update(IEnumerable<AstVarBinding> let) private BsonValue RenderInto() { - return + return _intoDatabase == null ? _intoCollection : new BsonDocument { { "db", _intoDatabase }, { "coll", _intoCollection } }; diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Stages/AstReplaceRootStage.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Stages/AstReplaceRootStage.cs index 2241de2a97f..7a0dbc3d892 100644 --- a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Stages/AstReplaceRootStage.cs +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Stages/AstReplaceRootStage.cs @@ -25,7 +25,7 @@ internal sealed class AstReplaceRootStage : AstStage private readonly AstExpression _expression; public AstReplaceRootStage(AstExpression expression) - { + { _expression = Ensure.IsNotNull(expression, nameof(expression)); } diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Visitors/AstNodeVisitor.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Visitors/AstNodeVisitor.cs index 1222d0060e9..83a50bd6f44 100644 --- a/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Visitors/AstNodeVisitor.cs +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Ast/Visitors/AstNodeVisitor.cs @@ -504,6 +504,21 @@ public virtual AstNode VisitMatchStage(AstMatchStage node) return node.Update(VisitAndConvert(node.Filter)); } + public virtual AstNode VisitMedianExpression(AstMedianExpression node) + { + return node.Update(VisitAndConvert(node.Input)); + } + + public virtual AstNode VisitMedianAccumulatorExpression(AstMedianAccumulatorExpression node) + { + return node.Update(VisitAndConvert(node.Input)); + } + + public virtual AstNode VisitMedianWindowExpression(AstMedianWindowExpression node) + { + return node.Update(VisitAndConvert(node.Input), node.Window); + } + public virtual AstNode VisitMergeStage(AstMergeStage node) { return node.Update(VisitAndConvert(node.Let)); @@ -559,6 +574,21 @@ public virtual AstNode VisitOutStage(AstOutStage node) return node; } + public virtual AstNode VisitPercentileExpression(AstPercentileExpression node) + { + return node.Update(VisitAndConvert(node.Input), VisitAndConvert(node.Percentiles)); + } + + public virtual AstNode VisitPercentileAccumulatorExpression(AstPercentileAccumulatorExpression node) + { + return node.Update(VisitAndConvert(node.Input), VisitAndConvert(node.Percentiles)); + } + + public virtual AstNode VisitPercentileWindowExpression(AstPercentileWindowExpression node) + { + return node.Update(VisitAndConvert(node.Input), VisitAndConvert(node.Percentiles), node.Window); + } + public virtual AstNode VisitPickAccumulatorExpression(AstPickAccumulatorExpression node) { return node.Update(node.Operator, node.SortBy, VisitAndConvert(node.Selector), VisitAndConvert(node.N)); diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Reflection/MongoEnumerableMethod.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Reflection/MongoEnumerableMethod.cs index 3773f1f92bf..c10550024c3 100644 --- a/src/MongoDB.Driver/Linq/Linq3Implementation/Reflection/MongoEnumerableMethod.cs +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Reflection/MongoEnumerableMethod.cs @@ -25,6 +25,46 @@ internal static class MongoEnumerableMethod private static readonly MethodInfo __allElements; private static readonly MethodInfo __allMatchingElements; private static readonly MethodInfo __firstMatchingElement; + private static readonly MethodInfo __medianDecimal; + private static readonly MethodInfo __medianDecimalWithSelector; + private static readonly MethodInfo __medianDouble; + private static readonly MethodInfo __medianDoubleWithSelector; + private static readonly MethodInfo __medianInt32; + private static readonly MethodInfo __medianInt32WithSelector; + private static readonly MethodInfo __medianInt64; + private static readonly MethodInfo __medianInt64WithSelector; + private static readonly MethodInfo __medianNullableDecimal; + private static readonly MethodInfo __medianNullableDecimalWithSelector; + private static readonly MethodInfo __medianNullableDouble; + private static readonly MethodInfo __medianNullableDoubleWithSelector; + private static readonly MethodInfo __medianNullableInt32; + private static readonly MethodInfo __medianNullableInt32WithSelector; + private static readonly MethodInfo __medianNullableInt64; + private static readonly MethodInfo __medianNullableInt64WithSelector; + private static readonly MethodInfo __medianNullableSingle; + private static readonly MethodInfo __medianNullableSingleWithSelector; + private static readonly MethodInfo __medianSingle; + private static readonly MethodInfo __medianSingleWithSelector; + private static readonly MethodInfo __percentileDecimal; + private static readonly MethodInfo __percentileDecimalWithSelector; + private static readonly MethodInfo __percentileDouble; + private static readonly MethodInfo __percentileDoubleWithSelector; + private static readonly MethodInfo __percentileInt32; + private static readonly MethodInfo __percentileInt32WithSelector; + private static readonly MethodInfo __percentileInt64; + private static readonly MethodInfo __percentileInt64WithSelector; + private static readonly MethodInfo __percentileNullableDecimal; + private static readonly MethodInfo __percentileNullableDecimalWithSelector; + private static readonly MethodInfo __percentileNullableDouble; + private static readonly MethodInfo __percentileNullableDoubleWithSelector; + private static readonly MethodInfo __percentileNullableInt32; + private static readonly MethodInfo __percentileNullableInt32WithSelector; + private static readonly MethodInfo __percentileNullableInt64; + private static readonly MethodInfo __percentileNullableInt64WithSelector; + private static readonly MethodInfo __percentileNullableSingle; + private static readonly MethodInfo __percentileNullableSingleWithSelector; + private static readonly MethodInfo __percentileSingle; + private static readonly MethodInfo __percentileSingleWithSelector; private static readonly MethodInfo __whereWithLimit; // static constructor @@ -33,6 +73,46 @@ static MongoEnumerableMethod() __allElements = ReflectionInfo.Method((IEnumerable<object> source) => source.AllElements()); __allMatchingElements = ReflectionInfo.Method((IEnumerable<object> source, string identifier) => source.AllMatchingElements(identifier)); __firstMatchingElement = ReflectionInfo.Method((IEnumerable<object> source) => source.FirstMatchingElement()); + __medianDecimal = ReflectionInfo.Method((IEnumerable<decimal> source) => source.Median()); + __medianDecimalWithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, decimal> selector) => source.Median(selector)); + __medianDouble = ReflectionInfo.Method((IEnumerable<double> source) => source.Median()); + __medianDoubleWithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, double> selector) => source.Median(selector)); + __medianInt32 = ReflectionInfo.Method((IEnumerable<int> source) => source.Median()); + __medianInt32WithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, int> selector) => source.Median(selector)); + __medianInt64 = ReflectionInfo.Method((IEnumerable<long> source) => source.Median()); + __medianInt64WithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, long> selector) => source.Median(selector)); + __medianNullableDecimal = ReflectionInfo.Method((IEnumerable<decimal?> source) => source.Median()); + __medianNullableDecimalWithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, decimal?> selector) => source.Median(selector)); + __medianNullableDouble = ReflectionInfo.Method((IEnumerable<double?> source) => source.Median()); + __medianNullableDoubleWithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, double?> selector) => source.Median(selector)); + __medianNullableInt32 = ReflectionInfo.Method((IEnumerable<int?> source) => source.Median()); + __medianNullableInt32WithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, int?> selector) => source.Median(selector)); + __medianNullableInt64 = ReflectionInfo.Method((IEnumerable<long?> source) => source.Median()); + __medianNullableInt64WithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, long?> selector) => source.Median(selector)); + __medianNullableSingle = ReflectionInfo.Method((IEnumerable<float?> source) => source.Median()); + __medianNullableSingleWithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, float?> selector) => source.Median(selector)); + __medianSingle = ReflectionInfo.Method((IEnumerable<float> source) => source.Median()); + __medianSingleWithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, float> selector) => source.Median(selector)); + __percentileDecimal = ReflectionInfo.Method((IEnumerable<decimal> source, IEnumerable<double> percentiles) => source.Percentile(percentiles)); + __percentileDecimalWithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, decimal> selector, IEnumerable<double> percentiles) => source.Percentile(selector, percentiles)); + __percentileDouble = ReflectionInfo.Method((IEnumerable<double> source, IEnumerable<double> percentiles) => source.Percentile(percentiles)); + __percentileDoubleWithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, double> selector, IEnumerable<double> percentiles) => source.Percentile(selector, percentiles)); + __percentileInt32 = ReflectionInfo.Method((IEnumerable<int> source, IEnumerable<double> percentiles) => source.Percentile(percentiles)); + __percentileInt32WithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, int> selector, IEnumerable<double> percentiles) => source.Percentile(selector, percentiles)); + __percentileInt64 = ReflectionInfo.Method((IEnumerable<long> source, IEnumerable<double> percentiles) => source.Percentile(percentiles)); + __percentileInt64WithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, long> selector, IEnumerable<double> percentiles) => source.Percentile(selector, percentiles)); + __percentileNullableDecimal = ReflectionInfo.Method((IEnumerable<decimal?> source, IEnumerable<double> percentiles) => source.Percentile(percentiles)); + __percentileNullableDecimalWithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, decimal?> selector, IEnumerable<double> percentiles) => source.Percentile(selector, percentiles)); + __percentileNullableDouble = ReflectionInfo.Method((IEnumerable<double?> source, IEnumerable<double> percentiles) => source.Percentile(percentiles)); + __percentileNullableDoubleWithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, double?> selector, IEnumerable<double> percentiles) => source.Percentile(selector, percentiles)); + __percentileNullableInt32 = ReflectionInfo.Method((IEnumerable<int?> source, IEnumerable<double> percentiles) => source.Percentile(percentiles)); + __percentileNullableInt32WithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, int?> selector, IEnumerable<double> percentiles) => source.Percentile(selector, percentiles)); + __percentileNullableInt64 = ReflectionInfo.Method((IEnumerable<long?> source, IEnumerable<double> percentiles) => source.Percentile(percentiles)); + __percentileNullableInt64WithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, long?> selector, IEnumerable<double> percentiles) => source.Percentile(selector, percentiles)); + __percentileNullableSingle = ReflectionInfo.Method((IEnumerable<float?> source, IEnumerable<double> percentiles) => source.Percentile(percentiles)); + __percentileNullableSingleWithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, float?> selector, IEnumerable<double> percentiles) => source.Percentile(selector, percentiles)); + __percentileSingle = ReflectionInfo.Method((IEnumerable<float> source, IEnumerable<double> percentiles) => source.Percentile(percentiles)); + __percentileSingleWithSelector = ReflectionInfo.Method((IEnumerable<object> source, Func<object, float> selector, IEnumerable<double> percentiles) => source.Percentile(selector, percentiles)); __whereWithLimit = ReflectionInfo.Method((IEnumerable<object> source, Func<object, bool> predicate, int limit) => source.Where(predicate, limit)); } @@ -40,6 +120,46 @@ static MongoEnumerableMethod() public static MethodInfo AllElements => __allElements; public static MethodInfo AllMatchingElements => __allMatchingElements; public static MethodInfo FirstMatchingElement => __firstMatchingElement; + public static MethodInfo MedianDecimal => __medianDecimal; + public static MethodInfo MedianDecimalWithSelector => __medianDecimalWithSelector; + public static MethodInfo MedianDouble => __medianDouble; + public static MethodInfo MedianDoubleWithSelector => __medianDoubleWithSelector; + public static MethodInfo MedianInt32 => __medianInt32; + public static MethodInfo MedianInt32WithSelector => __medianInt32WithSelector; + public static MethodInfo MedianInt64 => __medianInt64; + public static MethodInfo MedianInt64WithSelector => __medianInt64WithSelector; + public static MethodInfo MedianNullableDecimal => __medianNullableDecimal; + public static MethodInfo MedianNullableDecimalWithSelector => __medianNullableDecimalWithSelector; + public static MethodInfo MedianNullableDouble => __medianNullableDouble; + public static MethodInfo MedianNullableDoubleWithSelector => __medianNullableDoubleWithSelector; + public static MethodInfo MedianNullableInt32 => __medianNullableInt32; + public static MethodInfo MedianNullableInt32WithSelector => __medianNullableInt32WithSelector; + public static MethodInfo MedianNullableInt64 => __medianNullableInt64; + public static MethodInfo MedianNullableInt64WithSelector => __medianNullableInt64WithSelector; + public static MethodInfo MedianNullableSingle => __medianNullableSingle; + public static MethodInfo MedianNullableSingleWithSelector => __medianNullableSingleWithSelector; + public static MethodInfo MedianSingle => __medianSingle; + public static MethodInfo MedianSingleWithSelector => __medianSingleWithSelector; + public static MethodInfo PercentileDecimal => __percentileDecimal; + public static MethodInfo PercentileDecimalWithSelector => __percentileDecimalWithSelector; + public static MethodInfo PercentileDouble => __percentileDouble; + public static MethodInfo PercentileDoubleWithSelector => __percentileDoubleWithSelector; + public static MethodInfo PercentileInt32 => __percentileInt32; + public static MethodInfo PercentileInt32WithSelector => __percentileInt32WithSelector; + public static MethodInfo PercentileInt64 => __percentileInt64; + public static MethodInfo PercentileInt64WithSelector => __percentileInt64WithSelector; + public static MethodInfo PercentileNullableDecimal => __percentileNullableDecimal; + public static MethodInfo PercentileNullableDecimalWithSelector => __percentileNullableDecimalWithSelector; + public static MethodInfo PercentileNullableDouble => __percentileNullableDouble; + public static MethodInfo PercentileNullableDoubleWithSelector => __percentileNullableDoubleWithSelector; + public static MethodInfo PercentileNullableInt32 => __percentileNullableInt32; + public static MethodInfo PercentileNullableInt32WithSelector => __percentileNullableInt32WithSelector; + public static MethodInfo PercentileNullableInt64 => __percentileNullableInt64; + public static MethodInfo PercentileNullableInt64WithSelector => __percentileNullableInt64WithSelector; + public static MethodInfo PercentileNullableSingle => __percentileNullableSingle; + public static MethodInfo PercentileNullableSingleWithSelector => __percentileNullableSingleWithSelector; + public static MethodInfo PercentileSingle => __percentileSingle; + public static MethodInfo PercentileSingleWithSelector => __percentileSingleWithSelector; public static MethodInfo WhereWithLimit => __whereWithLimit; } } diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Reflection/WindowMethod.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Reflection/WindowMethod.cs index 374caf1f787..693e8762269 100644 --- a/src/MongoDB.Driver/Linq/Linq3Implementation/Reflection/WindowMethod.cs +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Reflection/WindowMethod.cs @@ -14,6 +14,7 @@ */ using System; +using System.Collections.Generic; using System.Reflection; namespace MongoDB.Driver.Linq.Linq3Implementation.Reflection @@ -84,7 +85,27 @@ internal static class WindowMethod private static readonly MethodInfo __last; private static readonly MethodInfo __locf; private static readonly MethodInfo __max; + private static readonly MethodInfo __medianWithDecimal; + private static readonly MethodInfo __medianWithDouble; + private static readonly MethodInfo __medianWithInt32; + private static readonly MethodInfo __medianWithInt64; + private static readonly MethodInfo __medianWithNullableDecimal; + private static readonly MethodInfo __medianWithNullableDouble; + private static readonly MethodInfo __medianWithNullableInt32; + private static readonly MethodInfo __medianWithNullableInt64; + private static readonly MethodInfo __medianWithNullableSingle; + private static readonly MethodInfo __medianWithSingle; private static readonly MethodInfo __min; + private static readonly MethodInfo __percentileWithDecimal; + private static readonly MethodInfo __percentileWithDouble; + private static readonly MethodInfo __percentileWithInt32; + private static readonly MethodInfo __percentileWithInt64; + private static readonly MethodInfo __percentileWithNullableDecimal; + private static readonly MethodInfo __percentileWithNullableDouble; + private static readonly MethodInfo __percentileWithNullableInt32; + private static readonly MethodInfo __percentileWithNullableInt64; + private static readonly MethodInfo __percentileWithNullableSingle; + private static readonly MethodInfo __percentileWithSingle; private static readonly MethodInfo __push; private static readonly MethodInfo __rank; private static readonly MethodInfo __shift; @@ -186,7 +207,27 @@ static WindowMethod() __last = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, object> selector, SetWindowFieldsWindow window) => partition.Last(selector, window)); __locf = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, object> selector, SetWindowFieldsWindow window) => partition.Locf(selector, window)); __max = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, object> selector, SetWindowFieldsWindow window) => partition.Max(selector, window)); + __medianWithDecimal = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, decimal> selector, SetWindowFieldsWindow window) => partition.Median(selector, window)); + __medianWithDouble = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, double> selector, SetWindowFieldsWindow window) => partition.Median(selector, window)); + __medianWithInt32 = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, int> selector, SetWindowFieldsWindow window) => partition.Median(selector, window)); + __medianWithInt64 = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, long> selector, SetWindowFieldsWindow window) => partition.Median(selector, window)); + __medianWithNullableDecimal = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, decimal?> selector, SetWindowFieldsWindow window) => partition.Median(selector, window)); + __medianWithNullableDouble = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, double?> selector, SetWindowFieldsWindow window) => partition.Median(selector, window)); + __medianWithNullableInt32 = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, int?> selector, SetWindowFieldsWindow window) => partition.Median(selector, window)); + __medianWithNullableInt64 = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, long?> selector, SetWindowFieldsWindow window) => partition.Median(selector, window)); + __medianWithNullableSingle = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, float?> selector, SetWindowFieldsWindow window) => partition.Median(selector, window)); + __medianWithSingle = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, float> selector, SetWindowFieldsWindow window) => partition.Median(selector, window)); __min = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, object> selector, SetWindowFieldsWindow window) => partition.Min(selector, window)); + __percentileWithDecimal = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, decimal> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window) => partition.Percentile(selector, percentiles, window)); + __percentileWithDouble = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, double> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window) => partition.Percentile(selector, percentiles, window)); + __percentileWithInt32 = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, int> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window) => partition.Percentile(selector, percentiles, window)); + __percentileWithInt64 = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, long> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window) => partition.Percentile(selector, percentiles, window)); + __percentileWithNullableDecimal = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, decimal?> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window) => partition.Percentile(selector, percentiles, window)); + __percentileWithNullableDouble = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, double?> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window) => partition.Percentile(selector, percentiles, window)); + __percentileWithNullableInt32 = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, int?> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window) => partition.Percentile(selector, percentiles, window)); + __percentileWithNullableInt64 = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, long?> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window) => partition.Percentile(selector, percentiles, window)); + __percentileWithNullableSingle = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, float?> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window) => partition.Percentile(selector, percentiles, window)); + __percentileWithSingle = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, float> selector, IEnumerable<double> percentiles, SetWindowFieldsWindow window) => partition.Percentile(selector, percentiles, window)); __push = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, object> selector, SetWindowFieldsWindow window) => partition.Push(selector, window)); __rank = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition) => partition.Rank()); __shift = ReflectionInfo.Method((ISetWindowFieldsPartition<object> partition, Func<object, object> selector, int by) => partition.Shift(selector, by)); @@ -287,7 +328,27 @@ static WindowMethod() public static MethodInfo Last => __last; public static MethodInfo Locf => __locf; public static MethodInfo Max => __max; + public static MethodInfo MedianWithDecimal => __medianWithDecimal; + public static MethodInfo MedianWithDouble => __medianWithDouble; + public static MethodInfo MedianWithInt32 => __medianWithInt32; + public static MethodInfo MedianWithInt64 => __medianWithInt64; + public static MethodInfo MedianWithNullableDecimal => __medianWithNullableDecimal; + public static MethodInfo MedianWithNullableDouble => __medianWithNullableDouble; + public static MethodInfo MedianWithNullableInt32 => __medianWithNullableInt32; + public static MethodInfo MedianWithNullableInt64 => __medianWithNullableInt64; + public static MethodInfo MedianWithNullableSingle => __medianWithNullableSingle; + public static MethodInfo MedianWithSingle => __medianWithSingle; public static MethodInfo Min => __min; + public static MethodInfo PercentileWithDecimal => __percentileWithDecimal; + public static MethodInfo PercentileWithDouble => __percentileWithDouble; + public static MethodInfo PercentileWithInt32 => __percentileWithInt32; + public static MethodInfo PercentileWithInt64 => __percentileWithInt64; + public static MethodInfo PercentileWithNullableDecimal => __percentileWithNullableDecimal; + public static MethodInfo PercentileWithNullableDouble => __percentileWithNullableDouble; + public static MethodInfo PercentileWithNullableInt32 => __percentileWithNullableInt32; + public static MethodInfo PercentileWithNullableInt64 => __percentileWithNullableInt64; + public static MethodInfo PercentileWithNullableSingle => __percentileWithNullableSingle; + public static MethodInfo PercentileWithSingle => __percentileWithSingle; public static MethodInfo Push => __push; public static MethodInfo Rank => __rank; public static MethodInfo Shift => __shift; diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Serializers/EnumUnderlyingTypeSerializer.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Serializers/EnumUnderlyingTypeSerializer.cs index 577ddef0c82..816e5fc237f 100644 --- a/src/MongoDB.Driver/Linq/Linq3Implementation/Serializers/EnumUnderlyingTypeSerializer.cs +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Serializers/EnumUnderlyingTypeSerializer.cs @@ -25,11 +25,11 @@ internal interface IEnumUnderlyingTypeSerializer IBsonSerializer EnumSerializer { get; } } - internal class EnumUnderlyingTypeSerializer<TEnum, TEnumUnderlyingType> : StructSerializerBase<TEnumUnderlyingType>, IEnumUnderlyingTypeSerializer - where TEnum : Enum + internal class EnumUnderlyingTypeSerializer<TEnum, TEnumUnderlyingType> : StructSerializerBase<TEnumUnderlyingType>, IEnumUnderlyingTypeSerializer + where TEnum : Enum where TEnumUnderlyingType : struct { - // private fields + // private fields private readonly IBsonSerializer<TEnum> _enumSerializer; // constructors diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodCallExpressionToAggregationExpressionTranslator.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodCallExpressionToAggregationExpressionTranslator.cs index ae0c1f93193..4dbbe32fdd7 100644 --- a/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodCallExpressionToAggregationExpressionTranslator.cs +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodCallExpressionToAggregationExpressionTranslator.cs @@ -64,8 +64,10 @@ public static TranslatedExpression Translate(TranslationContext context, MethodC case "IsNullOrWhiteSpace": return IsNullOrWhiteSpaceMethodToAggregationExpressionTranslator.Translate(context, expression); case "IsSubsetOf": return IsSubsetOfMethodToAggregationExpressionTranslator.Translate(context, expression); case "Locf": return LocfMethodToAggregationExpressionTranslator.Translate(context, expression); + case "Median": return MedianMethodToAggregationExpressionTranslator.Translate(context, expression); case "OfType": return OfTypeMethodToAggregationExpressionTranslator.Translate(context, expression); case "Parse": return ParseMethodToAggregationExpressionTranslator.Translate(context, expression); + case "Percentile": return PercentileMethodToAggregationExpressionTranslator.Translate(context, expression); case "Pow": return PowMethodToAggregationExpressionTranslator.Translate(context, expression); case "Push": return PushMethodToAggregationExpressionTranslator.Translate(context, expression); case "Range": return RangeMethodToAggregationExpressionTranslator.Translate(context, expression); diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/MedianMethodToAggregationExpressionTranslator.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/MedianMethodToAggregationExpressionTranslator.cs new file mode 100644 index 00000000000..0baa8709c1d --- /dev/null +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/MedianMethodToAggregationExpressionTranslator.cs @@ -0,0 +1,107 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System.Linq.Expressions; +using System.Reflection; +using MongoDB.Driver.Linq.Linq3Implementation.Ast.Expressions; +using MongoDB.Driver.Linq.Linq3Implementation.Misc; +using MongoDB.Driver.Linq.Linq3Implementation.Reflection; +using MongoDB.Driver.Linq.Linq3Implementation.Serializers; + +namespace MongoDB.Driver.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators +{ + internal class MedianMethodToAggregationExpressionTranslator + { + private static readonly MethodInfo[] __medianMethods = + [ + MongoEnumerableMethod.MedianDecimal, + MongoEnumerableMethod.MedianDecimalWithSelector, + MongoEnumerableMethod.MedianDouble, + MongoEnumerableMethod.MedianDoubleWithSelector, + MongoEnumerableMethod.MedianInt32, + MongoEnumerableMethod.MedianInt32WithSelector, + MongoEnumerableMethod.MedianInt64, + MongoEnumerableMethod.MedianInt64WithSelector, + MongoEnumerableMethod.MedianNullableDecimal, + MongoEnumerableMethod.MedianNullableDecimalWithSelector, + MongoEnumerableMethod.MedianNullableDouble, + MongoEnumerableMethod.MedianNullableDoubleWithSelector, + MongoEnumerableMethod.MedianNullableInt32, + MongoEnumerableMethod.MedianNullableInt32WithSelector, + MongoEnumerableMethod.MedianNullableInt64, + MongoEnumerableMethod.MedianNullableInt64WithSelector, + MongoEnumerableMethod.MedianNullableSingle, + MongoEnumerableMethod.MedianNullableSingleWithSelector, + MongoEnumerableMethod.MedianSingle, + MongoEnumerableMethod.MedianSingleWithSelector + ]; + + private static readonly MethodInfo[] __medianWithSelectorMethods = + [ + MongoEnumerableMethod.MedianDecimalWithSelector, + MongoEnumerableMethod.MedianDoubleWithSelector, + MongoEnumerableMethod.MedianInt32WithSelector, + MongoEnumerableMethod.MedianInt64WithSelector, + MongoEnumerableMethod.MedianNullableDecimalWithSelector, + MongoEnumerableMethod.MedianNullableDoubleWithSelector, + MongoEnumerableMethod.MedianNullableInt32WithSelector, + MongoEnumerableMethod.MedianNullableInt64WithSelector, + MongoEnumerableMethod.MedianNullableSingleWithSelector, + MongoEnumerableMethod.MedianSingleWithSelector + ]; + + public static TranslatedExpression Translate(TranslationContext context, MethodCallExpression expression) + { + var method = expression.Method; + var arguments = expression.Arguments; + + if (method.IsOneOf(__medianMethods)) + { + var sourceExpression = arguments[0]; + var sourceTranslation = ExpressionToAggregationExpressionTranslator.TranslateEnumerable(context, sourceExpression); + NestedAsQueryableHelper.EnsureQueryableMethodHasNestedAsQueryableSource(expression, sourceTranslation); + + var inputAst = sourceTranslation.Ast; + + if (method.IsOneOf(__medianWithSelectorMethods)) + { + var sourceItemSerializer = ArraySerializerHelper.GetItemSerializer(sourceTranslation.Serializer); + + var selectorLambda = (LambdaExpression)arguments[1]; + var selectorParameter = selectorLambda.Parameters[0]; + var selectorParameterSymbol = context.CreateSymbol(selectorParameter, sourceItemSerializer); + var selectorContext = context.WithSymbol(selectorParameterSymbol); + var selectorTranslation = ExpressionToAggregationExpressionTranslator.Translate(selectorContext, selectorLambda.Body); + + inputAst = AstExpression.Map( + input: sourceTranslation.Ast, + @as: selectorParameterSymbol.Var, + @in: selectorTranslation.Ast); + } + + var ast = AstExpression.Median(inputAst); + var serializer = StandardSerializers.GetSerializer(expression.Type); + return new TranslatedExpression(expression, ast, serializer); + } + + if (WindowMethodToAggregationExpressionTranslator.CanTranslate(expression)) + { + return WindowMethodToAggregationExpressionTranslator.Translate(context, expression); + } + + throw new ExpressionNotSupportedException(expression); + } + } +} \ No newline at end of file diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/PercentileMethodToAggregationExpressionTranslator.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/PercentileMethodToAggregationExpressionTranslator.cs new file mode 100644 index 00000000000..216d89f1c49 --- /dev/null +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/PercentileMethodToAggregationExpressionTranslator.cs @@ -0,0 +1,110 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System.Linq.Expressions; +using System.Reflection; +using MongoDB.Driver.Linq.Linq3Implementation.Ast.Expressions; +using MongoDB.Driver.Linq.Linq3Implementation.Misc; +using MongoDB.Driver.Linq.Linq3Implementation.Reflection; +using MongoDB.Driver.Linq.Linq3Implementation.Serializers; + +namespace MongoDB.Driver.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators +{ + internal class PercentileMethodToAggregationExpressionTranslator + { + private static readonly MethodInfo[] __percentileMethods = + [ + MongoEnumerableMethod.PercentileDecimal, + MongoEnumerableMethod.PercentileDecimalWithSelector, + MongoEnumerableMethod.PercentileDouble, + MongoEnumerableMethod.PercentileDoubleWithSelector, + MongoEnumerableMethod.PercentileInt32, + MongoEnumerableMethod.PercentileInt32WithSelector, + MongoEnumerableMethod.PercentileInt64, + MongoEnumerableMethod.PercentileInt64WithSelector, + MongoEnumerableMethod.PercentileNullableDecimal, + MongoEnumerableMethod.PercentileNullableDecimalWithSelector, + MongoEnumerableMethod.PercentileNullableDouble, + MongoEnumerableMethod.PercentileNullableDoubleWithSelector, + MongoEnumerableMethod.PercentileNullableInt32, + MongoEnumerableMethod.PercentileNullableInt32WithSelector, + MongoEnumerableMethod.PercentileNullableInt64, + MongoEnumerableMethod.PercentileNullableInt64WithSelector, + MongoEnumerableMethod.PercentileNullableSingle, + MongoEnumerableMethod.PercentileNullableSingleWithSelector, + MongoEnumerableMethod.PercentileSingle, + MongoEnumerableMethod.PercentileSingleWithSelector + ]; + + private static readonly MethodInfo[] __percentileWithSelectorMethods = + [ + MongoEnumerableMethod.PercentileDecimalWithSelector, + MongoEnumerableMethod.PercentileDoubleWithSelector, + MongoEnumerableMethod.PercentileInt32WithSelector, + MongoEnumerableMethod.PercentileInt64WithSelector, + MongoEnumerableMethod.PercentileNullableDecimalWithSelector, + MongoEnumerableMethod.PercentileNullableDoubleWithSelector, + MongoEnumerableMethod.PercentileNullableInt32WithSelector, + MongoEnumerableMethod.PercentileNullableInt64WithSelector, + MongoEnumerableMethod.PercentileNullableSingleWithSelector, + MongoEnumerableMethod.PercentileSingleWithSelector + ]; + + public static TranslatedExpression Translate(TranslationContext context, MethodCallExpression expression) + { + var method = expression.Method; + var arguments = expression.Arguments; + + if (method.IsOneOf(__percentileMethods)) + { + var sourceExpression = arguments[0]; + var sourceTranslation = ExpressionToAggregationExpressionTranslator.TranslateEnumerable(context, sourceExpression); + NestedAsQueryableHelper.EnsureQueryableMethodHasNestedAsQueryableSource(expression, sourceTranslation); + + var inputAst = sourceTranslation.Ast; + + if (method.IsOneOf(__percentileWithSelectorMethods)) + { + var sourceItemSerializer = ArraySerializerHelper.GetItemSerializer(sourceTranslation.Serializer); + + var selectorLambda = (LambdaExpression)arguments[1]; + var selectorParameter = selectorLambda.Parameters[0]; + var selectorParameterSymbol = context.CreateSymbol(selectorParameter, sourceItemSerializer); + var selectorContext = context.WithSymbol(selectorParameterSymbol); + var selectorTranslation = ExpressionToAggregationExpressionTranslator.Translate(selectorContext, selectorLambda.Body); + + inputAst = AstExpression.Map( + input: sourceTranslation.Ast, + @as: selectorParameterSymbol.Var, + @in: selectorTranslation.Ast); + } + + var percentilesExpression = arguments[arguments.Count - 1]; + var percentilesTranslation = ExpressionToAggregationExpressionTranslator.TranslateEnumerable(context, percentilesExpression); + + var ast = AstExpression.Percentile(inputAst, percentilesTranslation.Ast); + var serializer = StandardSerializers.GetSerializer(expression.Type); + return new TranslatedExpression(expression, ast, serializer); + } + + if (WindowMethodToAggregationExpressionTranslator.CanTranslate(expression)) + { + return WindowMethodToAggregationExpressionTranslator.Translate(context, expression); + } + + throw new ExpressionNotSupportedException(expression); + } + } +} \ No newline at end of file diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/WindowMethodToAggregationExpressionTranslator.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/WindowMethodToAggregationExpressionTranslator.cs index 9b54a5fdd18..f45cffc3e49 100644 --- a/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/WindowMethodToAggregationExpressionTranslator.cs +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/WindowMethodToAggregationExpressionTranslator.cs @@ -94,7 +94,27 @@ internal static class WindowMethodToAggregationExpressionTranslator WindowMethod.Last, WindowMethod.Locf, WindowMethod.Max, + WindowMethod.MedianWithDecimal, + WindowMethod.MedianWithDouble, + WindowMethod.MedianWithInt32, + WindowMethod.MedianWithInt64, + WindowMethod.MedianWithNullableDecimal, + WindowMethod.MedianWithNullableDouble, + WindowMethod.MedianWithNullableInt32, + WindowMethod.MedianWithNullableInt64, + WindowMethod.MedianWithNullableSingle, + WindowMethod.MedianWithSingle, WindowMethod.Min, + WindowMethod.PercentileWithDecimal, + WindowMethod.PercentileWithDouble, + WindowMethod.PercentileWithInt32, + WindowMethod.PercentileWithInt64, + WindowMethod.PercentileWithNullableDecimal, + WindowMethod.PercentileWithNullableDouble, + WindowMethod.PercentileWithNullableInt32, + WindowMethod.PercentileWithNullableInt64, + WindowMethod.PercentileWithNullableSingle, + WindowMethod.PercentileWithSingle, WindowMethod.Push, WindowMethod.Rank, WindowMethod.Shift, @@ -253,6 +273,30 @@ internal static class WindowMethodToAggregationExpressionTranslator WindowMethod.ShiftWithDefaultValue }; + private static readonly MethodInfo[] __quantileMethods = + [ + WindowMethod.MedianWithDecimal, + WindowMethod.MedianWithDouble, + WindowMethod.MedianWithInt32, + WindowMethod.MedianWithInt64, + WindowMethod.MedianWithNullableDecimal, + WindowMethod.MedianWithNullableDouble, + WindowMethod.MedianWithNullableInt32, + WindowMethod.MedianWithNullableInt64, + WindowMethod.MedianWithNullableSingle, + WindowMethod.MedianWithSingle, + WindowMethod.PercentileWithDecimal, + WindowMethod.PercentileWithDouble, + WindowMethod.PercentileWithInt32, + WindowMethod.PercentileWithInt64, + WindowMethod.PercentileWithNullableDecimal, + WindowMethod.PercentileWithNullableDouble, + WindowMethod.PercentileWithNullableInt32, + WindowMethod.PercentileWithNullableInt64, + WindowMethod.PercentileWithNullableSingle, + WindowMethod.PercentileWithSingle + ]; + public static bool CanTranslate(MethodCallExpression expression) { return expression.Method.IsOneOf(__windowMethods); @@ -339,6 +383,27 @@ public static TranslatedExpression Translate(TranslationContext context, MethodC return new TranslatedExpression(expression, ast, serializer); } + if (method.IsOneOf(__quantileMethods)) + { + ThrowIfSelectorTranslationIsNull(selectorTranslation); + AstExpression ast; + + if (method.Name == "Percentile") + { + // Get the percentiles parameter + var percentilesExpression = GetArgument<Expression>(parameters, "percentiles", arguments); + var percentilesTranslation = ExpressionToAggregationExpressionTranslator.TranslateEnumerable(context, percentilesExpression); + ast = AstExpression.PercentileWindowExpression(selectorTranslation.Ast, percentilesTranslation.Ast, window); + } + else + { + ast = AstExpression.MedianWindowExpression(selectorTranslation.Ast, window); + } + + var serializer = StandardSerializers.GetSerializer(method.ReturnType); + return new TranslatedExpression(expression, ast, serializer); + } + if (method.IsOneOf(__shiftMethods)) { ThrowIfSelectorTranslationIsNull(selectorTranslation); diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/NewKeyValuePairExpressionToAggregationExpressionTranslator.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/NewKeyValuePairExpressionToAggregationExpressionTranslator.cs index 8116c5b55b9..b2388206503 100644 --- a/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/NewKeyValuePairExpressionToAggregationExpressionTranslator.cs +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/NewKeyValuePairExpressionToAggregationExpressionTranslator.cs @@ -43,24 +43,32 @@ public static TranslatedExpression Translate( var keyTranslation = ExpressionToAggregationExpressionTranslator.Translate(context, keyExpression); var valueTranslation = ExpressionToAggregationExpressionTranslator.Translate(context, valueExpression); + var serializer = CreateResultSerializer(expression.Type, keyTranslation.Serializer, valueTranslation.Serializer, out var keyElementName, out var valueElementName); var ast = AstExpression.ComputedDocument([ - AstExpression.ComputedField("Key", keyTranslation.Ast), - AstExpression.ComputedField("Value", valueTranslation.Ast) + AstExpression.ComputedField(keyElementName, keyTranslation.Ast), + AstExpression.ComputedField(valueElementName, valueTranslation.Ast) ]); - var serializer = CreateResultSerializer(expression.Type, keyTranslation.Serializer, valueTranslation.Serializer); - return new TranslatedExpression(expression, ast, serializer); } - private static IBsonSerializer CreateResultSerializer(Type resultType, IBsonSerializer keySerializer, IBsonSerializer valueSerializer) + private static IBsonSerializer CreateResultSerializer( + Type resultType, + IBsonSerializer keySerializer, + IBsonSerializer valueSerializer, + out string keyElementName, + out string valueElementName) { var constructorInfo = resultType.GetConstructor([keySerializer.ValueType, valueSerializer.ValueType]); var classMap = new BsonClassMap(resultType); classMap.MapConstructor(constructorInfo); classMap.AutoMap(); - classMap.GetMemberMap("Key").SetSerializer(keySerializer); - classMap.GetMemberMap("Value").SetSerializer(valueSerializer); + var keyMemberMap = classMap.GetMemberMap("Key"); + keyElementName = keyMemberMap.ElementName; + keyMemberMap.SetSerializer(keySerializer); + var valueMemberMap = classMap.GetMemberMap("Value"); + valueElementName = valueMemberMap.ElementName; + valueMemberMap.SetSerializer(valueSerializer); classMap.Freeze(); // have to use BsonClassMapSerializer here to mimic the MemberInitExpressionToAggregationExpressionTranslator to avoid risking a behavioral breaking change diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToExecutableQueryTranslators/AverageMethodToExecutableQueryTranslator.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToExecutableQueryTranslators/AverageMethodToExecutableQueryTranslator.cs index 957f87809a2..fbcf6542d88 100644 --- a/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToExecutableQueryTranslators/AverageMethodToExecutableQueryTranslator.cs +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToExecutableQueryTranslators/AverageMethodToExecutableQueryTranslator.cs @@ -36,7 +36,8 @@ internal static class AverageMethodToExecutableQueryTranslator<TOutput> // private static fields private static readonly MethodInfo[] __averageMethods; private static readonly MethodInfo[] __averageWithSelectorMethods; - private static readonly IExecutableQueryFinalizer<TOutput, TOutput> __finalizer = new SingleFinalizer<TOutput>(); + private static readonly IExecutableQueryFinalizer<TOutput, TOutput> __singleFinalizer = new SingleFinalizer<TOutput>(); + private static readonly IExecutableQueryFinalizer<TOutput, TOutput> __singleOrDefaultFinalizer = new SingleOrDefaultFinalizer<TOutput>(); // static constructor static AverageMethodToExecutableQueryTranslator() @@ -138,11 +139,11 @@ public static ExecutableQuery<TDocument, TOutput> Translate<TDocument>(MongoQuer IBsonSerializer outputValueSerializer = expression.GetResultType() switch { - Type t when t == typeof(int) => new Int32Serializer(), - Type t when t == typeof(long) => new Int64Serializer(), - Type t when t == typeof(float) => new SingleSerializer(), - Type t when t == typeof(double) => new DoubleSerializer(), - Type t when t == typeof(decimal) => new DecimalSerializer(), + Type t when t == typeof(int) => Int32Serializer.Instance, + Type t when t == typeof(long) => Int64Serializer.Instance, + Type t when t == typeof(float) => SingleSerializer.Instance, + Type t when t == typeof(double) => DoubleSerializer.Instance, + Type t when t == typeof(decimal) => DecimalSerializer.Instance, Type { IsConstructedGenericType: true } t when t.GetGenericTypeDefinition() == typeof(Nullable<>) => (IBsonSerializer)Activator.CreateInstance(typeof(NullableSerializer<>).MakeGenericType(t.GenericTypeArguments[0])), _ => throw new ExpressionNotSupportedException(expression) }; @@ -155,10 +156,14 @@ public static ExecutableQuery<TDocument, TOutput> Translate<TDocument>(MongoQuer AstStage.Project(AstProject.ExcludeId()), outputWrappedValueSerializer); + var returnType = expression.Type; + return ExecutableQuery.Create( provider, pipeline, - __finalizer); + returnType.IsNullable() // Note: numeric types are never reference types + ? __singleOrDefaultFinalizer + : __singleFinalizer); } throw new ExpressionNotSupportedException(expression); diff --git a/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToFilterTranslators/MethodTranslators/InjectMethodToFilterTranslator.cs b/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToFilterTranslators/MethodTranslators/InjectMethodToFilterTranslator.cs index 61493819b50..a21ed9d567e 100644 --- a/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToFilterTranslators/MethodTranslators/InjectMethodToFilterTranslator.cs +++ b/src/MongoDB.Driver/Linq/Linq3Implementation/Translators/ExpressionToFilterTranslators/MethodTranslators/InjectMethodToFilterTranslator.cs @@ -13,10 +13,12 @@ * limitations under the License. */ +using System.Linq; using System.Linq.Expressions; using System.Reflection; using MongoDB.Bson; using MongoDB.Bson.Serialization; +using MongoDB.Driver.Linq.Linq3Implementation.Ast.Expressions; using MongoDB.Driver.Linq.Linq3Implementation.Ast.Filters; using MongoDB.Driver.Linq.Linq3Implementation.ExtensionMethods; using MongoDB.Driver.Linq.Linq3Implementation.Misc; @@ -44,12 +46,21 @@ public static AstFilter Translate(TranslationContext context, MethodCallExpressi var filterExpression = arguments[0]; var filterDefinition = filterExpression.GetConstantValue<object>(expression); var filterDefinitionType = filterDefinition.GetType(); // we KNOW it's a FilterDefinition<TDocument> because of the Inject method signature - var documentType = filterDefinitionType.GetGenericArguments()[0]; + var filterDefinitionDocumentType = filterDefinitionType.GetGenericArguments()[0]; + var rootSymbol = context.SymbolTable.Symbols.SingleOrDefault(s => s.Ast.IsRootVar()); + if (rootSymbol == null) + { + throw new ExpressionNotSupportedException(expression, because: "there is no current root symbol"); + } + var documentSerializer = rootSymbol.Serializer; + if (filterDefinitionDocumentType != documentSerializer.ValueType) + { + throw new ExpressionNotSupportedException(expression, because: $"FilterDefinition TDocument type: {filterDefinitionDocumentType} does not match document type {documentSerializer.ValueType} "); + } var serializerRegistry = BsonSerializer.SerializerRegistry; - var documentSerializer = serializerRegistry.GetSerializer(documentType); // TODO: is this the right serializer? - var renderFilterMethod = __renderFilterMethodInfo.MakeGenericMethod(documentType); + var renderFilterMethod = __renderFilterMethodInfo.MakeGenericMethod(filterDefinitionDocumentType); var renderedFilter = (BsonDocument)renderFilterMethod.Invoke(null, new[] { filterDefinition, documentSerializer, serializerRegistry, context.TranslationOptions }); return AstFilter.Raw(renderedFilter); diff --git a/src/MongoDB.Driver/Linq/MongoEnumerable.cs b/src/MongoDB.Driver/Linq/MongoEnumerable.cs index 787da860a4e..eda95082fe0 100644 --- a/src/MongoDB.Driver/Linq/MongoEnumerable.cs +++ b/src/MongoDB.Driver/Linq/MongoEnumerable.cs @@ -16,8 +16,6 @@ using System; using System.Collections.Generic; -using System.Linq; -using System.Runtime.CompilerServices; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Linq.Linq3Implementation.Misc; @@ -238,6 +236,226 @@ public static IEnumerable<TResult> MaxN<TSource, TKey, TResult>( throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); } + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <param name="source">The sequence of values.</param> + /// <returns>The median value.</returns> + public static decimal Median(this IEnumerable<decimal> source) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <param name="source">The sequence of values.</param> + /// <returns>The median value.</returns> + public static decimal? Median(this IEnumerable<decimal?> source) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <param name="source">The sequence of values.</param> + /// <returns>The median value.</returns> + public static double Median(this IEnumerable<double> source) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <param name="source">The sequence of values.</param> + /// <returns>The median value.</returns> + public static double? Median(this IEnumerable<double?> source) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <param name="source">The sequence of values.</param> + /// <returns>The median value.</returns> + public static float Median(this IEnumerable<float> source) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <param name="source">The sequence of values.</param> + /// <returns>The median value.</returns> + public static float? Median(this IEnumerable<float?> source) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <param name="source">The sequence of values.</param> + /// <returns>The median value.</returns> + public static double Median(this IEnumerable<int> source) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <param name="source">The sequence of values.</param> + /// <returns>The median value.</returns> + public static double? Median(this IEnumerable<int?> source) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <param name="source">The sequence of values.</param> + /// <returns>The median value.</returns> + public static double Median(this IEnumerable<long> source) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <param name="source">The sequence of values.</param> + /// <returns>The median value.</returns> + public static double? Median(this IEnumerable<long?> source) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements in the source sequence.</typeparam> + /// <param name="source">A sequence of values to calculate the median of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <returns>The median value.</returns> + public static decimal Median<TSource>(this IEnumerable<TSource> source, Func<TSource, decimal> selector) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements in the source sequence.</typeparam> + /// <param name="source">A sequence of values to calculate the median of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <returns>The median value.</returns> + public static decimal? Median<TSource>(this IEnumerable<TSource> source, Func<TSource, decimal?> selector) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements in the source sequence.</typeparam> + /// <param name="source">A sequence of values to calculate the median of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <returns>The median value.</returns> + public static double Median<TSource>(this IEnumerable<TSource> source, Func<TSource, double> selector) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements in the source sequence.</typeparam> + /// <param name="source">A sequence of values to calculate the median of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <returns>The median value.</returns> + public static double? Median<TSource>(this IEnumerable<TSource> source, Func<TSource, double?> selector) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements in the source sequence.</typeparam> + /// <param name="source">A sequence of values to calculate the median of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <returns>The median value.</returns> + public static float Median<TSource>(this IEnumerable<TSource> source, Func<TSource, float> selector) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements in the source sequence.</typeparam> + /// <param name="source">A sequence of values to calculate the median of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <returns>The median value.</returns> + public static float? Median<TSource>(this IEnumerable<TSource> source, Func<TSource, float?> selector) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements in the source sequence.</typeparam> + /// <param name="source">A sequence of values to calculate the median of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <returns>The median value.</returns> + public static double Median<TSource>(this IEnumerable<TSource> source, Func<TSource, int> selector) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements in the source sequence.</typeparam> + /// <param name="source">A sequence of values to calculate the median of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <returns>The median value.</returns> + public static double? Median<TSource>(this IEnumerable<TSource> source, Func<TSource, int?> selector) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements in the source sequence.</typeparam> + /// <param name="source">A sequence of values to calculate the median of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <returns>The median value.</returns> + public static double Median<TSource>(this IEnumerable<TSource> source, Func<TSource, long> selector) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes the median of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements in the source sequence.</typeparam> + /// <param name="source">A sequence of values to calculate the median of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <returns>The median value.</returns> + public static double? Median<TSource>(this IEnumerable<TSource> source, Func<TSource, long?> selector) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + /// <summary> /// Returns the min n results. /// </summary> @@ -275,6 +493,246 @@ public static IEnumerable<TResult> MinN<TSource, TKey, TResult>( throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); } + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static decimal[] Percentile(this IEnumerable<decimal> source, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static decimal?[] Percentile(this IEnumerable<decimal?> source, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static double[] Percentile(this IEnumerable<double> source, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static double?[] Percentile(this IEnumerable<double?> source, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static float[] Percentile(this IEnumerable<float> source, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static float?[] Percentile(this IEnumerable<float?> source, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static double[] Percentile(this IEnumerable<int> source, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static double?[] Percentile(this IEnumerable<int?> source, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static double[] Percentile(this IEnumerable<long> source, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static double?[] Percentile(this IEnumerable<long?> source, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements of <paramref name="source" />.</typeparam> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static decimal[] Percentile<TSource>(this IEnumerable<TSource> source, Func<TSource, decimal> selector, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements of <paramref name="source" />.</typeparam> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static decimal?[] Percentile<TSource>(this IEnumerable<TSource> source, Func<TSource, decimal?> selector, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements of <paramref name="source" />.</typeparam> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static double[] Percentile<TSource>(this IEnumerable<TSource> source, Func<TSource, double> selector, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements of <paramref name="source" />.</typeparam> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static double?[] Percentile<TSource>(this IEnumerable<TSource> source, Func<TSource, double?> selector, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements of <paramref name="source" />.</typeparam> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static float[] Percentile<TSource>(this IEnumerable<TSource> source, Func<TSource, float> selector, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements of <paramref name="source" />.</typeparam> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static float?[] Percentile<TSource>(this IEnumerable<TSource> source, Func<TSource, float?> selector, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements of <paramref name="source" />.</typeparam> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static double[] Percentile<TSource>(this IEnumerable<TSource> source, Func<TSource, int> selector, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements of <paramref name="source" />.</typeparam> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static double?[] Percentile<TSource>(this IEnumerable<TSource> source, Func<TSource, int?> selector, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements of <paramref name="source" />.</typeparam> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static double[] Percentile<TSource>(this IEnumerable<TSource> source, Func<TSource, long> selector, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + + /// <summary> + /// Computes multiple percentiles of a sequence of values. + /// </summary> + /// <typeparam name="TSource">The type of the elements of <paramref name="source" />.</typeparam> + /// <param name="source">A sequence of values to calculate the percentiles of.</param> + /// <param name="selector">A transform function to apply to each element.</param> + /// <param name="percentiles">The percentiles to compute (each between 0.0 and 1.0).</param> + /// <returns>The percentiles of the sequence of values.</returns> + public static double?[] Percentile<TSource>(this IEnumerable<TSource> source, Func<TSource, long?> selector, IEnumerable<double> percentiles) + { + throw CustomLinqExtensionMethodHelper.CreateNotSupportedException(); + } + /// <summary> /// Computes the population standard deviation of a sequence of values. /// </summary> diff --git a/src/MongoDB.Driver/Linq/MongoQueryable.cs b/src/MongoDB.Driver/Linq/MongoQueryable.cs index 70c385d4284..48cc283c65d 100644 --- a/src/MongoDB.Driver/Linq/MongoQueryable.cs +++ b/src/MongoDB.Driver/Linq/MongoQueryable.cs @@ -3385,6 +3385,18 @@ public static IQueryable<TSource> Take<TSource>(this IQueryable<TSource> source, Expression.Constant(count))); } + /// <summary> + /// Returns an <see cref="IAsyncEnumerable{T}" /> which can be enumerated asynchronously. + /// </summary> + /// <typeparam name="TSource">The type of the elements of <paramref name="source" />.</typeparam> + /// <param name="source">A sequence of values.</param> + /// <returns>An IAsyncEnumerable for the query results.</returns> + public static IAsyncEnumerable<TSource> ToAsyncEnumerable<TSource>(this IQueryable<TSource> source) + { + var cursorSource = GetCursorSource(source); + return cursorSource.ToAsyncEnumerable(); + } + /// <summary> /// Executes the LINQ query and returns a cursor to the results. /// </summary> diff --git a/src/MongoDB.Driver/ListCollectionNamesOptions.cs b/src/MongoDB.Driver/ListCollectionNamesOptions.cs index ae1b89308c7..137f80a8173 100644 --- a/src/MongoDB.Driver/ListCollectionNamesOptions.cs +++ b/src/MongoDB.Driver/ListCollectionNamesOptions.cs @@ -1,4 +1,4 @@ -/* Copyright 2018-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,7 +13,9 @@ * limitations under the License. */ +using System; using MongoDB.Bson; +using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver { @@ -26,6 +28,7 @@ public sealed class ListCollectionNamesOptions private bool? authorizedCollections; private BsonValue _comment; private FilterDefinition<BsonDocument> _filter; + private TimeSpan? _timeout; // properties /// <summary> @@ -54,5 +57,15 @@ public FilterDefinition<BsonDocument> Filter get { return _filter; } set { _filter = value; } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } } diff --git a/src/MongoDB.Driver/ListCollectionsOptions.cs b/src/MongoDB.Driver/ListCollectionsOptions.cs index 218fcd03655..e6f51a4a662 100644 --- a/src/MongoDB.Driver/ListCollectionsOptions.cs +++ b/src/MongoDB.Driver/ListCollectionsOptions.cs @@ -13,7 +13,9 @@ * limitations under the License. */ +using System; using MongoDB.Bson; +using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver { @@ -26,6 +28,7 @@ public sealed class ListCollectionsOptions private int? _batchSize; private BsonValue _comment; private FilterDefinition<BsonDocument> _filter; + private TimeSpan? _timeout; // properties /// <summary> @@ -54,5 +57,15 @@ public FilterDefinition<BsonDocument> Filter get { return _filter; } set { _filter = value; } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } } diff --git a/src/MongoDB.Driver/ListDatabaseNamesOptions.cs b/src/MongoDB.Driver/ListDatabaseNamesOptions.cs index 6e3d696dcf0..9b59664794f 100644 --- a/src/MongoDB.Driver/ListDatabaseNamesOptions.cs +++ b/src/MongoDB.Driver/ListDatabaseNamesOptions.cs @@ -1,4 +1,4 @@ -/* Copyright 2020-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,7 +13,9 @@ * limitations under the License. */ +using System; using MongoDB.Bson; +using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver { @@ -26,6 +28,7 @@ public sealed class ListDatabaseNamesOptions private bool? _authorizedDatabases; private BsonValue _comment; private FilterDefinition<BsonDocument> _filter; + private TimeSpan? _timeout; // properties /// <summary> @@ -54,5 +57,15 @@ public FilterDefinition<BsonDocument> Filter get { return _filter; } set { _filter = value; } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } } diff --git a/src/MongoDB.Driver/ListDatabasesOptions.cs b/src/MongoDB.Driver/ListDatabasesOptions.cs index 9fc9bb550ba..ddb52a7a921 100644 --- a/src/MongoDB.Driver/ListDatabasesOptions.cs +++ b/src/MongoDB.Driver/ListDatabasesOptions.cs @@ -1,4 +1,4 @@ -/* Copyright 2017-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,7 +13,9 @@ * limitations under the License. */ +using System; using MongoDB.Bson; +using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver { @@ -27,6 +29,7 @@ public sealed class ListDatabasesOptions private BsonValue _comment; private FilterDefinition<BsonDocument> _filter; private bool? _nameOnly; + private TimeSpan? _timeout; // properties /// <summary> @@ -64,5 +67,15 @@ public bool? NameOnly get { return _nameOnly; } set { _nameOnly = value; } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } } diff --git a/src/MongoDB.Driver/ListIndexesOptions.cs b/src/MongoDB.Driver/ListIndexesOptions.cs index 4df3bcc77bf..f40cc041375 100644 --- a/src/MongoDB.Driver/ListIndexesOptions.cs +++ b/src/MongoDB.Driver/ListIndexesOptions.cs @@ -1,4 +1,4 @@ -/* Copyright 2021-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,7 +13,9 @@ * limitations under the License. */ +using System; using MongoDB.Bson; +using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver { @@ -24,6 +26,7 @@ public sealed class ListIndexesOptions { private int? _batchSize; private BsonValue _comment; + private TimeSpan? _timeout; /// <summary> /// Gets or sets the batch size. @@ -42,5 +45,15 @@ public BsonValue Comment get { return _comment; } set { _comment = value; } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } } diff --git a/src/MongoDB.Driver/MapReduceOptions.cs b/src/MongoDB.Driver/MapReduceOptions.cs index 53075a94f60..5b268609eea 100644 --- a/src/MongoDB.Driver/MapReduceOptions.cs +++ b/src/MongoDB.Driver/MapReduceOptions.cs @@ -40,6 +40,7 @@ public sealed class MapReduceOptions<TDocument, TResult> private IBsonSerializer<TResult> _resultSerializer; private BsonDocument _scope; private SortDefinition<TDocument> _sort; + private TimeSpan? _timeout; private bool? _verbose; // properties @@ -143,6 +144,16 @@ public SortDefinition<TDocument> Sort set { _sort = value; } } + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } + /// <summary> /// Gets or sets whether to include timing information. /// </summary> diff --git a/src/MongoDB.Driver/MongoClient.cs b/src/MongoDB.Driver/MongoClient.cs index 0fa1b4eb8d2..4b70bfdfd16 100644 --- a/src/MongoDB.Driver/MongoClient.cs +++ b/src/MongoDB.Driver/MongoClient.cs @@ -23,7 +23,6 @@ using MongoDB.Bson.IO; using MongoDB.Bson.Serialization; using MongoDB.Bson.Serialization.Serializers; -using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Clusters; using MongoDB.Driver.Core.Logging; using MongoDB.Driver.Core.Misc; @@ -42,6 +41,7 @@ public sealed class MongoClient : IMongoClient private readonly IClusterInternal _cluster; #pragma warning restore CA2213 // Disposable fields should be disposed private readonly IAutoEncryptionLibMongoCryptController _libMongoCryptController; + private readonly Func<IMongoClient, IOperationExecutor> _operationExecutorFactory; private readonly IOperationExecutor _operationExecutor; private readonly MongoClientSettings _settings; private readonly ILogger<LogCategories.Client> _logger; @@ -60,23 +60,9 @@ public MongoClient() /// </summary> /// <param name="settings">The settings.</param> public MongoClient(MongoClientSettings settings) + : this(settings, client => new OperationExecutor(client)) { - _settings = Ensure.IsNotNull(settings, nameof(settings)).FrozenCopy(); - _logger = _settings.LoggingSettings?.CreateLogger<LogCategories.Client>(); - - _cluster = _settings.ClusterSource.Get(_settings.ToClusterKey()); - _operationExecutor = new OperationExecutor(this); - if (settings.AutoEncryptionOptions != null) - { - _libMongoCryptController = - MongoClientSettings.Extensions.AutoEncryptionProvider.CreateAutoCryptClientController(this, settings.AutoEncryptionOptions); - _settings.LoggingSettings?.CreateLogger<LogCategories.Client>()?.LogTrace( - StructuredLogTemplateProviders.TopologyId_Message_SharedLibraryVersion, - _cluster.ClusterId, - "CryptClient created. Configured shared library version: ", - _libMongoCryptController.CryptSharedLibraryVersion() ?? "None"); - } } /// <summary> @@ -97,10 +83,25 @@ public MongoClient(string connectionString) { } - internal MongoClient(IOperationExecutor operationExecutor, MongoClientSettings settings) - : this(settings) + internal MongoClient(MongoClientSettings settings, Func<IMongoClient, IOperationExecutor> operationExecutorFactory) { - _operationExecutor = operationExecutor; + _settings = Ensure.IsNotNull(settings, nameof(settings)).FrozenCopy(); + _operationExecutorFactory = Ensure.IsNotNull(operationExecutorFactory, nameof(operationExecutorFactory)); + _logger = _settings.LoggingSettings?.CreateLogger<LogCategories.Client>(); + _cluster = _settings.ClusterSource.Get(_settings.ToClusterKey()); + _operationExecutor = _operationExecutorFactory(this); + + if (settings.AutoEncryptionOptions != null) + { + _libMongoCryptController = + MongoClientSettings.Extensions.AutoEncryptionProvider.CreateAutoCryptClientController(this, settings.AutoEncryptionOptions); + + _settings.LoggingSettings?.CreateLogger<LogCategories.Client>()?.LogTrace( + StructuredLogTemplateProviders.TopologyId_Message_SharedLibraryVersion, + _cluster.ClusterId, + "CryptClient created. Configured shared library version: ", + _libMongoCryptController.CryptSharedLibraryVersion() ?? "None"); + } } // public properties @@ -112,13 +113,11 @@ internal MongoClient(IOperationExecutor operationExecutor, MongoClientSettings s // internal properties internal IAutoEncryptionLibMongoCryptController LibMongoCryptController => ThrowIfDisposed(_libMongoCryptController); - internal IOperationExecutor OperationExecutor => ThrowIfDisposed(_operationExecutor); // internal methods internal void ConfigureAutoEncryptionMessageEncoderSettings(MessageEncoderSettings messageEncoderSettings) { ThrowIfDisposed(); - var autoEncryptionOptions = _settings.AutoEncryptionOptions; if (autoEncryptionOptions != null) { @@ -133,32 +132,36 @@ internal void ConfigureAutoEncryptionMessageEncoderSettings(MessageEncoderSettin // public methods /// <inheritdoc/> public ClientBulkWriteResult BulkWrite(IReadOnlyList<BulkWriteModel> models, ClientBulkWriteOptions options = null, CancellationToken cancellationToken = default) - => UsingImplicitSession(session => BulkWrite(session, models, options, cancellationToken), cancellationToken); + { + ThrowIfDisposed(); + using var session = _operationExecutor.StartImplicitSession(); + return BulkWrite(session, models, options, cancellationToken); + } /// <inheritdoc/> public ClientBulkWriteResult BulkWrite(IClientSessionHandle session, IReadOnlyList<BulkWriteModel> models, ClientBulkWriteOptions options = null, CancellationToken cancellationToken = default) { + Ensure.IsNotNull(session, nameof(session)); + ThrowIfDisposed(); var operation = CreateClientBulkWriteOperation(models, options); - return ExecuteWriteOperation<ClientBulkWriteResult>(session, operation, cancellationToken); + return ExecuteWriteOperation<ClientBulkWriteResult>(session, operation, options?.Timeout, cancellationToken); } /// <inheritdoc/> - public Task<ClientBulkWriteResult> BulkWriteAsync(IReadOnlyList<BulkWriteModel> models, ClientBulkWriteOptions options = null, CancellationToken cancellationToken = default) - => UsingImplicitSession(session => BulkWriteAsync(session, models, options, cancellationToken), cancellationToken); - - /// <inheritdoc/> - public Task<ClientBulkWriteResult> BulkWriteAsync(IClientSessionHandle session, IReadOnlyList<BulkWriteModel> models, ClientBulkWriteOptions options = null, CancellationToken cancellationToken = default) + public async Task<ClientBulkWriteResult> BulkWriteAsync(IReadOnlyList<BulkWriteModel> models, ClientBulkWriteOptions options = null, CancellationToken cancellationToken = default) { - var operation = CreateClientBulkWriteOperation(models, options); - return ExecuteWriteOperationAsync<ClientBulkWriteResult>(session, operation, cancellationToken); + ThrowIfDisposed(); + using var session = _operationExecutor.StartImplicitSession(); + return await BulkWriteAsync(session, models, options, cancellationToken).ConfigureAwait(false); } /// <inheritdoc/> - public void DropDatabase(string name, CancellationToken cancellationToken = default(CancellationToken)) + public Task<ClientBulkWriteResult> BulkWriteAsync(IClientSessionHandle session, IReadOnlyList<BulkWriteModel> models, ClientBulkWriteOptions options = null, CancellationToken cancellationToken = default) { + Ensure.IsNotNull(session, nameof(session)); ThrowIfDisposed(); - - UsingImplicitSession(session => DropDatabase(session, name, cancellationToken), cancellationToken); + var operation = CreateClientBulkWriteOperation(models, options); + return ExecuteWriteOperationAsync<ClientBulkWriteResult>(session, operation, options?.Timeout, cancellationToken); } /// <inheritdoc/> @@ -180,6 +183,7 @@ public void Dispose(bool disposing) { _logger?.LogDebug(_cluster.ClusterId, "MongoClient disposing"); + _operationExecutor.Dispose(); _settings.ClusterSource.Return(_cluster); _libMongoCryptController?.Dispose(); @@ -191,39 +195,39 @@ public void Dispose(bool disposing) } /// <inheritdoc/> - public void DropDatabase(IClientSessionHandle session, string name, CancellationToken cancellationToken = default(CancellationToken)) + public void DropDatabase(string name, CancellationToken cancellationToken = default) { - Ensure.IsNotNull(session, nameof(session)); ThrowIfDisposed(); - - var messageEncoderSettings = GetMessageEncoderSettings(); - var operation = new DropDatabaseOperation(new DatabaseNamespace(name), messageEncoderSettings) - { - WriteConcern = _settings.WriteConcern - }; - ExecuteWriteOperation(session, operation, cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + DropDatabase(session, name, cancellationToken); } /// <inheritdoc/> - public Task DropDatabaseAsync(string name, CancellationToken cancellationToken = default(CancellationToken)) + public void DropDatabase(IClientSessionHandle session, string name, CancellationToken cancellationToken = default) { + Ensure.IsNotNull(session, nameof(session)); ThrowIfDisposed(); - - return UsingImplicitSessionAsync(session => DropDatabaseAsync(session, name, cancellationToken), cancellationToken); + var operation = CreateDropDatabaseOperation(name); + // TODO: CSOT: find a way to add timeout parameter to the interface method + ExecuteWriteOperation(session, operation, null, cancellationToken); } /// <inheritdoc/> - public Task DropDatabaseAsync(IClientSessionHandle session, string name, CancellationToken cancellationToken = default(CancellationToken)) + public async Task DropDatabaseAsync(string name, CancellationToken cancellationToken = default) { ThrowIfDisposed(); + using var session = _operationExecutor.StartImplicitSession(); + await DropDatabaseAsync(session, name, cancellationToken).ConfigureAwait(false); + } + /// <inheritdoc/> + public Task DropDatabaseAsync(IClientSessionHandle session, string name, CancellationToken cancellationToken = default) + { Ensure.IsNotNull(session, nameof(session)); - var messageEncoderSettings = GetMessageEncoderSettings(); - var operation = new DropDatabaseOperation(new DatabaseNamespace(name), messageEncoderSettings) - { - WriteConcern = _settings.WriteConcern - }; - return ExecuteWriteOperationAsync(session, operation, cancellationToken); + ThrowIfDisposed(); + var opertion = CreateDropDatabaseOperation(name); + // TODO: CSOT: find a way to add timeout parameter to the interface method + return ExecuteWriteOperationAsync(session, opertion, null, cancellationToken); } /// <inheritdoc/> @@ -241,205 +245,143 @@ public IMongoDatabase GetDatabase(string name, MongoDatabaseSettings settings = } /// <inheritdoc /> - public IAsyncCursor<string> ListDatabaseNames( - CancellationToken cancellationToken = default(CancellationToken)) - { - ThrowIfDisposed(); - - return ListDatabaseNames(options: null, cancellationToken); - } + public IAsyncCursor<string> ListDatabaseNames(CancellationToken cancellationToken = default) + => ListDatabaseNames(options: null, cancellationToken); /// <inheritdoc /> public IAsyncCursor<string> ListDatabaseNames( ListDatabaseNamesOptions options, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { ThrowIfDisposed(); - - return UsingImplicitSession(session => ListDatabaseNames(session, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return ListDatabaseNames(session, options, cancellationToken); } /// <inheritdoc /> public IAsyncCursor<string> ListDatabaseNames( IClientSessionHandle session, - CancellationToken cancellationToken = default(CancellationToken)) - { - ThrowIfDisposed(); - - return ListDatabaseNames(session, options: null, cancellationToken); - } + CancellationToken cancellationToken = default) + => ListDatabaseNames(session, options: null, cancellationToken); /// <inheritdoc /> public IAsyncCursor<string> ListDatabaseNames( IClientSessionHandle session, ListDatabaseNamesOptions options, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { ThrowIfDisposed(); - var listDatabasesOptions = CreateListDatabasesOptionsFromListDatabaseNamesOptions(options); var databases = ListDatabases(session, listDatabasesOptions, cancellationToken); - return CreateDatabaseNamesCursor(databases); } /// <inheritdoc /> - public Task<IAsyncCursor<string>> ListDatabaseNamesAsync( - CancellationToken cancellationToken = default(CancellationToken)) - { - ThrowIfDisposed(); - - return ListDatabaseNamesAsync(options: null, cancellationToken); - } + public Task<IAsyncCursor<string>> ListDatabaseNamesAsync(CancellationToken cancellationToken = default) + => ListDatabaseNamesAsync(options: null, cancellationToken); /// <inheritdoc /> - public Task<IAsyncCursor<string>> ListDatabaseNamesAsync( + public async Task<IAsyncCursor<string>> ListDatabaseNamesAsync( ListDatabaseNamesOptions options, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { ThrowIfDisposed(); - - return UsingImplicitSessionAsync(session => ListDatabaseNamesAsync(session, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await ListDatabaseNamesAsync(session, options, cancellationToken).ConfigureAwait(false); } /// <inheritdoc /> public Task<IAsyncCursor<string>> ListDatabaseNamesAsync( IClientSessionHandle session, - CancellationToken cancellationToken = default(CancellationToken)) - { - ThrowIfDisposed(); - - return ListDatabaseNamesAsync(session, options: null, cancellationToken); - } + CancellationToken cancellationToken = default) + => ListDatabaseNamesAsync(session, options: null, cancellationToken); /// <inheritdoc /> public async Task<IAsyncCursor<string>> ListDatabaseNamesAsync( IClientSessionHandle session, ListDatabaseNamesOptions options, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { ThrowIfDisposed(); - var listDatabasesOptions = CreateListDatabasesOptionsFromListDatabaseNamesOptions(options); var databases = await ListDatabasesAsync(session, listDatabasesOptions, cancellationToken).ConfigureAwait(false); - return CreateDatabaseNamesCursor(databases); } /// <inheritdoc/> - public IAsyncCursor<BsonDocument> ListDatabases( - CancellationToken cancellationToken = default(CancellationToken)) + public IAsyncCursor<BsonDocument> ListDatabases(CancellationToken cancellationToken) { ThrowIfDisposed(); - - return UsingImplicitSession(session => ListDatabases(session, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return ListDatabases(session, cancellationToken); } /// <inheritdoc/> public IAsyncCursor<BsonDocument> ListDatabases( ListDatabasesOptions options, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { ThrowIfDisposed(); - - return UsingImplicitSession(session => ListDatabases(session, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return ListDatabases(session, options, cancellationToken); } /// <inheritdoc/> public IAsyncCursor<BsonDocument> ListDatabases( IClientSessionHandle session, - CancellationToken cancellationToken = default(CancellationToken)) - { - ThrowIfDisposed(); - - return ListDatabases(session, null, cancellationToken); - } + CancellationToken cancellationToken = default) + => ListDatabases(session, null, cancellationToken); /// <inheritdoc/> public IAsyncCursor<BsonDocument> ListDatabases( IClientSessionHandle session, ListDatabasesOptions options, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { ThrowIfDisposed(); - Ensure.IsNotNull(session, nameof(session)); - options = options ?? new ListDatabasesOptions(); - var messageEncoderSettings = GetMessageEncoderSettings(); - var translationOptions = _settings.TranslationOptions; - var operation = CreateListDatabaseOperation(options, messageEncoderSettings, translationOptions); - return ExecuteReadOperation(session, operation, cancellationToken); + var operation = CreateListDatabasesOperation(options); + return ExecuteReadOperation(session, operation, options?.Timeout, cancellationToken); } /// <inheritdoc/> - public Task<IAsyncCursor<BsonDocument>> ListDatabasesAsync( - CancellationToken cancellationToken = default(CancellationToken)) + public async Task<IAsyncCursor<BsonDocument>> ListDatabasesAsync(CancellationToken cancellationToken = default) { ThrowIfDisposed(); - - return UsingImplicitSessionAsync(session => ListDatabasesAsync(session, null, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await ListDatabasesAsync(session, cancellationToken).ConfigureAwait(false); } /// <inheritdoc/> - public Task<IAsyncCursor<BsonDocument>> ListDatabasesAsync( + public async Task<IAsyncCursor<BsonDocument>> ListDatabasesAsync( ListDatabasesOptions options, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { ThrowIfDisposed(); - - return UsingImplicitSessionAsync(session => ListDatabasesAsync(session, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await ListDatabasesAsync(session, options, cancellationToken).ConfigureAwait(false); } /// <inheritdoc/> public Task<IAsyncCursor<BsonDocument>> ListDatabasesAsync( IClientSessionHandle session, - CancellationToken cancellationToken = default(CancellationToken)) - { - ThrowIfDisposed(); - - return ListDatabasesAsync(session, null, cancellationToken); - } + CancellationToken cancellationToken = default) + => ListDatabasesAsync(session, null, cancellationToken); /// <inheritdoc/> public Task<IAsyncCursor<BsonDocument>> ListDatabasesAsync( IClientSessionHandle session, ListDatabasesOptions options, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); ThrowIfDisposed(); - - options = options ?? new ListDatabasesOptions(); - var messageEncoderSettings = GetMessageEncoderSettings(); - var translationOptions = _settings.TranslationOptions; - var operation = CreateListDatabaseOperation(options, messageEncoderSettings, translationOptions); - return ExecuteReadOperationAsync(session, operation, cancellationToken); - } - - /// <summary> - /// Starts an implicit session. - /// </summary> - /// <returns>A session.</returns> - internal IClientSessionHandle StartImplicitSession(CancellationToken cancellationToken) - { - ThrowIfDisposed(); - - return StartImplicitSession(); - } - - /// <summary> - /// Starts an implicit session. - /// </summary> - /// <returns>A Task whose result is a session.</returns> - internal Task<IClientSessionHandle> StartImplicitSessionAsync(CancellationToken cancellationToken) - { - ThrowIfDisposed(); - - return Task.FromResult(StartImplicitSession()); + var operation = CreateListDatabasesOperation(options); + return ExecuteReadOperationAsync(session, operation, options?.Timeout, cancellationToken); } /// <inheritdoc/> - public IClientSessionHandle StartSession(ClientSessionOptions options = null, CancellationToken cancellationToken = default(CancellationToken)) + public IClientSessionHandle StartSession(ClientSessionOptions options = null, CancellationToken cancellationToken = default) { ThrowIfDisposed(); @@ -447,7 +389,7 @@ internal Task<IClientSessionHandle> StartImplicitSessionAsync(CancellationToken } /// <inheritdoc/> - public Task<IClientSessionHandle> StartSessionAsync(ClientSessionOptions options = null, CancellationToken cancellationToken = default(CancellationToken)) + public Task<IClientSessionHandle> StartSessionAsync(ClientSessionOptions options = null, CancellationToken cancellationToken = default) { ThrowIfDisposed(); @@ -458,11 +400,11 @@ internal Task<IClientSessionHandle> StartImplicitSessionAsync(CancellationToken public IChangeStreamCursor<TResult> Watch<TResult>( PipelineDefinition<ChangeStreamDocument<BsonDocument>, TResult> pipeline, ChangeStreamOptions options = null, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { ThrowIfDisposed(); - - return UsingImplicitSession(session => Watch(session, pipeline, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return Watch(session, pipeline, options, cancellationToken); } /// <inheritdoc/> @@ -470,26 +412,24 @@ public IChangeStreamCursor<TResult> Watch<TResult>( IClientSessionHandle session, PipelineDefinition<ChangeStreamDocument<BsonDocument>, TResult> pipeline, ChangeStreamOptions options = null, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(pipeline, nameof(pipeline)); ThrowIfDisposed(); - - var translationOptions = _settings.TranslationOptions; - var operation = CreateChangeStreamOperation(pipeline, options, translationOptions); - return ExecuteReadOperation(session, operation, cancellationToken); + var operation = CreateChangeStreamOperation(pipeline, options); + return ExecuteReadOperation(session, operation, options?.Timeout, cancellationToken); } /// <inheritdoc/> - public Task<IChangeStreamCursor<TResult>> WatchAsync<TResult>( + public async Task<IChangeStreamCursor<TResult>> WatchAsync<TResult>( PipelineDefinition<ChangeStreamDocument<BsonDocument>, TResult> pipeline, ChangeStreamOptions options = null, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { ThrowIfDisposed(); - - return UsingImplicitSessionAsync(session => WatchAsync(session, pipeline, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await WatchAsync(session, pipeline, options, cancellationToken).ConfigureAwait(false); } /// <inheritdoc/> @@ -497,56 +437,50 @@ public Task<IChangeStreamCursor<TResult>> WatchAsync<TResult>( IClientSessionHandle session, PipelineDefinition<ChangeStreamDocument<BsonDocument>, TResult> pipeline, ChangeStreamOptions options = null, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(pipeline, nameof(pipeline)); - ThrowIfDisposed(); - - var translationOptions = _settings.TranslationOptions; - var operation = CreateChangeStreamOperation(pipeline, options, translationOptions); - return ExecuteReadOperationAsync(session, operation, cancellationToken); + var operation = CreateChangeStreamOperation(pipeline, options); + return ExecuteReadOperationAsync(session, operation, options?.Timeout, cancellationToken); } /// <inheritdoc/> public IMongoClient WithReadConcern(ReadConcern readConcern) { Ensure.IsNotNull(readConcern, nameof(readConcern)); - ThrowIfDisposed(); var newSettings = Settings.Clone(); newSettings.ReadConcern = readConcern; - return new MongoClient(_operationExecutor, newSettings); + return new MongoClient(newSettings, _operationExecutorFactory); } /// <inheritdoc/> public IMongoClient WithReadPreference(ReadPreference readPreference) { Ensure.IsNotNull(readPreference, nameof(readPreference)); - ThrowIfDisposed(); var newSettings = Settings.Clone(); newSettings.ReadPreference = readPreference; - return new MongoClient(_operationExecutor, newSettings); + return new MongoClient(newSettings, _operationExecutorFactory); } /// <inheritdoc/> public IMongoClient WithWriteConcern(WriteConcern writeConcern) { Ensure.IsNotNull(writeConcern, nameof(writeConcern)); - ThrowIfDisposed(); var newSettings = Settings.Clone(); newSettings.WriteConcern = writeConcern; - return new MongoClient(_operationExecutor, newSettings); + return new MongoClient(newSettings, _operationExecutorFactory); } // private methods - private ClientBulkWriteOperation CreateClientBulkWriteOperation(IReadOnlyList<BulkWriteModel> models, ClientBulkWriteOptions options = null) + private ClientBulkWriteOperation CreateClientBulkWriteOperation(IReadOnlyList<BulkWriteModel> models, ClientBulkWriteOptions options) { if (_settings.AutoEncryptionOptions != null) { @@ -578,17 +512,22 @@ private ClientBulkWriteOperation CreateClientBulkWriteOperation(IReadOnlyList<Bu } private IAsyncCursor<string> CreateDatabaseNamesCursor(IAsyncCursor<BsonDocument> cursor) - { - return new BatchTransformingAsyncCursor<BsonDocument, string>( + => new BatchTransformingAsyncCursor<BsonDocument, string>( cursor, databases => databases.Select(database => database["name"].AsString)); - } - private ListDatabasesOperation CreateListDatabaseOperation( - ListDatabasesOptions options, - MessageEncoderSettings messageEncoderSettings, - ExpressionTranslationOptions translationOptions) + private DropDatabaseOperation CreateDropDatabaseOperation(string name) + => new(new DatabaseNamespace(name), GetMessageEncoderSettings()) + { + WriteConcern = _settings.WriteConcern + }; + + private ListDatabasesOperation CreateListDatabasesOperation(ListDatabasesOptions options) { + options ??= new ListDatabasesOptions(); + var messageEncoderSettings = GetMessageEncoderSettings(); + var translationOptions = _settings.TranslationOptions; + return new ListDatabasesOperation(messageEncoderSettings) { AuthorizedDatabases = options.AuthorizedDatabases, @@ -607,73 +546,58 @@ private ListDatabasesOptions CreateListDatabasesOptionsFromListDatabaseNamesOpti listDatabasesOptions.AuthorizedDatabases = options.AuthorizedDatabases; listDatabasesOptions.Filter = options.Filter; listDatabasesOptions.Comment = options.Comment; + listDatabasesOptions.Timeout = options.Timeout; } return listDatabasesOptions; } - private IReadBindingHandle CreateReadBinding(IClientSessionHandle session) - { - var readPreference = _settings.ReadPreference; - if (session.IsInTransaction && readPreference.ReadPreferenceMode != ReadPreferenceMode.Primary) - { - throw new InvalidOperationException("Read preference in a transaction must be primary."); - } - - var binding = new ReadPreferenceBinding(_cluster, readPreference, session.WrappedCoreSession.Fork()); - return new ReadBindingHandle(binding); - } - - private IReadWriteBindingHandle CreateReadWriteBinding(IClientSessionHandle session) - { - var binding = new WritableServerBinding(_cluster, session.WrappedCoreSession.Fork()); - return new ReadWriteBindingHandle(binding); - } - private ChangeStreamOperation<TResult> CreateChangeStreamOperation<TResult>( PipelineDefinition<ChangeStreamDocument<BsonDocument>, TResult> pipeline, - ChangeStreamOptions options, - ExpressionTranslationOptions translationOptions) - { - return ChangeStreamHelper.CreateChangeStreamOperation( + ChangeStreamOptions options) + => ChangeStreamHelper.CreateChangeStreamOperation( pipeline, options, _settings.ReadConcern, GetMessageEncoderSettings(), _settings.RetryReads, - translationOptions); - } + _settings.TranslationOptions); - private TResult ExecuteReadOperation<TResult>(IClientSessionHandle session, IReadOperation<TResult> operation, CancellationToken cancellationToken = default(CancellationToken)) + private OperationContext CreateOperationContext(IClientSessionHandle session, TimeSpan? timeout, CancellationToken cancellationToken) { - using (var binding = CreateReadBinding(session)) + var operationContext = session.WrappedCoreSession.CurrentTransaction?.OperationContext; + if (operationContext != null && timeout != null) { - return _operationExecutor.ExecuteReadOperation(binding, operation, cancellationToken); + throw new InvalidOperationException("Cannot specify per operation timeout inside transaction."); } + + return operationContext?.Fork() ?? new OperationContext(timeout ?? _settings.Timeout, cancellationToken); } - private async Task<TResult> ExecuteReadOperationAsync<TResult>(IClientSessionHandle session, IReadOperation<TResult> operation, CancellationToken cancellationToken = default(CancellationToken)) + private TResult ExecuteReadOperation<TResult>(IClientSessionHandle session, IReadOperation<TResult> operation, TimeSpan? timeout, CancellationToken cancellationToken) { - using (var binding = CreateReadBinding(session)) - { - return await _operationExecutor.ExecuteReadOperationAsync(binding, operation, cancellationToken).ConfigureAwait(false); - } + var readPreference = session.GetEffectiveReadPreference(_settings.ReadPreference); + using var operationContext = CreateOperationContext(session, timeout, cancellationToken); + return _operationExecutor.ExecuteReadOperation(operationContext, session, operation, readPreference, false); } - private TResult ExecuteWriteOperation<TResult>(IClientSessionHandle session, IWriteOperation<TResult> operation, CancellationToken cancellationToken = default(CancellationToken)) + private async Task<TResult> ExecuteReadOperationAsync<TResult>(IClientSessionHandle session, IReadOperation<TResult> operation, TimeSpan? timeout, CancellationToken cancellationToken) { - using (var binding = CreateReadWriteBinding(session)) - { - return _operationExecutor.ExecuteWriteOperation(binding, operation, cancellationToken); - } + var readPreference = session.GetEffectiveReadPreference(_settings.ReadPreference); + using var operationContext = CreateOperationContext(session, timeout, cancellationToken); + return await _operationExecutor.ExecuteReadOperationAsync(operationContext, session, operation, readPreference, false).ConfigureAwait(false); } - private async Task<TResult> ExecuteWriteOperationAsync<TResult>(IClientSessionHandle session, IWriteOperation<TResult> operation, CancellationToken cancellationToken = default(CancellationToken)) + private TResult ExecuteWriteOperation<TResult>(IClientSessionHandle session, IWriteOperation<TResult> operation, TimeSpan? timeout, CancellationToken cancellationToken) { - using (var binding = CreateReadWriteBinding(session)) - { - return await _operationExecutor.ExecuteWriteOperationAsync(binding, operation, cancellationToken).ConfigureAwait(false); - } + using var operationContext = CreateOperationContext(session, timeout, cancellationToken); + return _operationExecutor.ExecuteWriteOperation(operationContext, session, operation, false); + } + + private async Task<TResult> ExecuteWriteOperationAsync<TResult>(IClientSessionHandle session, IWriteOperation<TResult> operation, TimeSpan? timeout, CancellationToken cancellationToken) + { + using var operationContext = CreateOperationContext(session, timeout, cancellationToken); + return await _operationExecutor.ExecuteWriteOperationAsync(operationContext, session, operation, false).ConfigureAwait(false); } private MessageEncoderSettings GetMessageEncoderSettings() @@ -696,13 +620,6 @@ private RenderArgs<BsonDocument> GetRenderArgs() return new RenderArgs<BsonDocument>(BsonDocumentSerializer.Instance, serializerRegistry, translationOptions: translationOptions); } - private IClientSessionHandle StartImplicitSession() - { - var options = new ClientSessionOptions { CausalConsistency = false, Snapshot = false }; - ICoreSessionHandle coreSession = _cluster.StartSession(options.ToCore(isImplicit: true)); - return new ClientSessionHandle(this, options, coreSession); - } - private IClientSessionHandle StartSession(ClientSessionOptions options) { if (options != null && options.Snapshot && options.CausalConsistency == true) @@ -710,7 +627,17 @@ private IClientSessionHandle StartSession(ClientSessionOptions options) throw new NotSupportedException("Combining both causal consistency and snapshot options is not supported."); } - options = options ?? new ClientSessionOptions(); + options ??= new ClientSessionOptions(); + if (_settings.Timeout.HasValue && options.DefaultTransactionOptions?.Timeout == null) + { + options.DefaultTransactionOptions = new TransactionOptions( + _settings.Timeout, + options.DefaultTransactionOptions?.ReadConcern, + options.DefaultTransactionOptions?.ReadPreference, + options.DefaultTransactionOptions?.WriteConcern, + options.DefaultTransactionOptions?.MaxCommitTime); + } + var coreSession = _cluster.StartSession(options.ToCore()); return new ClientSessionHandle(this, options, coreSession); @@ -718,37 +645,5 @@ private IClientSessionHandle StartSession(ClientSessionOptions options) private void ThrowIfDisposed() => ThrowIfDisposed(string.Empty); private T ThrowIfDisposed<T>(T value) => _disposed ? throw new ObjectDisposedException(GetType().Name) : value; - - private void UsingImplicitSession(Action<IClientSessionHandle> func, CancellationToken cancellationToken) - { - using (var session = StartImplicitSession(cancellationToken)) - { - func(session); - } - } - - private TResult UsingImplicitSession<TResult>(Func<IClientSessionHandle, TResult> func, CancellationToken cancellationToken) - { - using (var session = StartImplicitSession(cancellationToken)) - { - return func(session); - } - } - - private async Task UsingImplicitSessionAsync(Func<IClientSessionHandle, Task> funcAsync, CancellationToken cancellationToken) - { - using (var session = await StartImplicitSessionAsync(cancellationToken).ConfigureAwait(false)) - { - await funcAsync(session).ConfigureAwait(false); - } - } - - private async Task<TResult> UsingImplicitSessionAsync<TResult>(Func<IClientSessionHandle, Task<TResult>> funcAsync, CancellationToken cancellationToken) - { - using (var session = await StartImplicitSessionAsync(cancellationToken).ConfigureAwait(false)) - { - return await funcAsync(session).ConfigureAwait(false); - } - } } } diff --git a/src/MongoDB.Driver/MongoClientSettings.cs b/src/MongoDB.Driver/MongoClientSettings.cs index d7da362ca92..652e1ccf87e 100644 --- a/src/MongoDB.Driver/MongoClientSettings.cs +++ b/src/MongoDB.Driver/MongoClientSettings.cs @@ -20,9 +20,9 @@ using System.Text; using MongoDB.Driver.Core.Compression; using MongoDB.Driver.Core.Configuration; +using MongoDB.Driver.Core.Connections; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.Servers; -using MongoDB.Driver.Encryption; using MongoDB.Shared; namespace MongoDB.Driver @@ -71,9 +71,11 @@ public class MongoClientSettings : IEquatable<MongoClientSettings>, IInheritable private ServerMonitoringMode _serverMonitoringMode; private TimeSpan _serverSelectionTimeout; private TimeSpan _socketTimeout; + private Socks5ProxySettings _socks5ProxySettings; private int _srvMaxHosts; private string _srvServiceName; private SslSettings _sslSettings; + private TimeSpan? _timeout; private ExpressionTranslationOptions _translationOptions; private bool _useTls; private int _waitQueueSize; @@ -122,9 +124,11 @@ public MongoClientSettings() _serverMonitoringMode = ServerMonitoringMode.Auto; _serverSelectionTimeout = MongoDefaults.ServerSelectionTimeout; _socketTimeout = MongoDefaults.SocketTimeout; + _socks5ProxySettings = null; _srvMaxHosts = 0; _srvServiceName = MongoInternalDefaults.MongoClientSettings.SrvServiceName; _sslSettings = null; + _timeout = System.Threading.Timeout.InfiniteTimeSpan; _translationOptions = null; _useTls = false; #pragma warning disable 618 @@ -428,6 +432,19 @@ public int MinConnectionPoolSize } } + /// <summary> + /// Gets or sets the SOCKS5 proxy settings. + /// </summary> + public Socks5ProxySettings Socks5ProxySettings + { + get => _socks5ProxySettings; + set + { + if (_isFrozen) { throw new InvalidOperationException("MongoClientSettings is frozen."); } + _socks5ProxySettings = value; + } + } + /// <summary> /// Gets or sets the read concern. /// </summary> @@ -666,6 +683,21 @@ public SslSettings SslSettings } } + /// <summary> + /// Gets or sets the per-operation timeout + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get { return _timeout; } + set + { + ThrowIfFrozen(); + _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } + } + TimeSpan? IInheritableMongoClientSettings.Timeout => Timeout; + /// <summary> /// Gets or sets the translation options. /// </summary> @@ -874,6 +906,10 @@ public static MongoClientSettings FromUrl(MongoUrl url) clientSettings.ServerMonitoringMode = url.ServerMonitoringMode ?? ServerMonitoringMode.Auto; clientSettings.ServerSelectionTimeout = url.ServerSelectionTimeout; clientSettings.SocketTimeout = url.SocketTimeout; + if (!string.IsNullOrEmpty(url.ProxyHost)) + { + clientSettings.Socks5ProxySettings = Socks5ProxySettings.Create(url.ProxyHost, url.ProxyPort, url.ProxyUsername, url.ProxyPassword); + } clientSettings.SrvMaxHosts = url.SrvMaxHosts.GetValueOrDefault(0); clientSettings.SrvServiceName = url.SrvServiceName; clientSettings.SslSettings = null; @@ -881,6 +917,7 @@ public static MongoClientSettings FromUrl(MongoUrl url) { clientSettings.SslSettings = new SslSettings { CheckCertificateRevocation = false }; } + clientSettings.Timeout = url.Timeout; clientSettings.UseTls = url.UseTls; #pragma warning disable 618 clientSettings.WaitQueueSize = url.ComputedWaitQueueSize; @@ -932,9 +969,11 @@ public MongoClientSettings Clone() clone._serverMonitoringMode = _serverMonitoringMode; clone._serverSelectionTimeout = _serverSelectionTimeout; clone._socketTimeout = _socketTimeout; + clone._socks5ProxySettings = _socks5ProxySettings; clone._srvMaxHosts = _srvMaxHosts; clone._srvServiceName = _srvServiceName; clone._sslSettings = (_sslSettings == null) ? null : _sslSettings.Clone(); + clone._timeout = _timeout; clone._translationOptions = _translationOptions; clone._useTls = _useTls; clone._waitQueueSize = _waitQueueSize; @@ -1001,9 +1040,11 @@ public override bool Equals(object obj) _serverMonitoringMode == rhs._serverMonitoringMode && _serverSelectionTimeout == rhs._serverSelectionTimeout && _socketTimeout == rhs._socketTimeout && + object.Equals(_socks5ProxySettings, rhs._socks5ProxySettings) && _srvMaxHosts == rhs._srvMaxHosts && _srvServiceName == rhs._srvServiceName && _sslSettings == rhs._sslSettings && + _timeout == rhs._timeout && object.Equals(_translationOptions, rhs._translationOptions) && _useTls == rhs._useTls && _waitQueueSize == rhs._waitQueueSize && @@ -1088,9 +1129,11 @@ public override int GetHashCode() .Hash(_serverMonitoringMode) .Hash(_serverSelectionTimeout) .Hash(_socketTimeout) + .Hash(_socks5ProxySettings) .Hash(_srvMaxHosts) .Hash(_srvServiceName) .Hash(_sslSettings) + .Hash(_timeout) .Hash(_translationOptions) .Hash(_useTls) .Hash(_waitQueueSize) @@ -1145,6 +1188,10 @@ public override string ToString() sb.AppendFormat("MaxConnectionLifeTime={0};", _maxConnectionLifeTime); sb.AppendFormat("MaxConnectionPoolSize={0};", _maxConnectionPoolSize); sb.AppendFormat("MinConnectionPoolSize={0};", _minConnectionPoolSize); + if (_socks5ProxySettings != null) + { + sb.AppendFormat("ProxyHost={0};", _socks5ProxySettings); + } if (_readEncoding != null) { sb.Append("ReadEncoding=UTF8Encoding;"); @@ -1172,6 +1219,10 @@ public override string ToString() { sb.AppendFormat("SslSettings={0};", _sslSettings); } + if(_timeout != null) + { + sb.AppendFormat("Timeout={0};", _timeout); + } sb.AppendFormat("Tls={0};", _useTls); sb.AppendFormat("TlsInsecure={0};", _allowInsecureTls); if (_translationOptions != null) @@ -1221,6 +1272,7 @@ internal ClusterKey ToClusterKey() _serverMonitoringMode, _serverSelectionTimeout, _socketTimeout, + _socks5ProxySettings, _srvMaxHosts, _srvServiceName, _sslSettings, diff --git a/src/MongoDB.Driver/MongoCollectionBase.cs b/src/MongoDB.Driver/MongoCollectionBase.cs index 8a38110a899..b7c937de1c0 100644 --- a/src/MongoDB.Driver/MongoCollectionBase.cs +++ b/src/MongoDB.Driver/MongoCollectionBase.cs @@ -164,7 +164,8 @@ private DeleteResult DeleteMany(FilterDefinition<TDocument> filter, DeleteOption var bulkWriteOptions = new BulkWriteOptions { Comment = options.Comment, - Let = options.Let + Let = options.Let, + Timeout = options.Timeout }; var result = bulkWriteFunc(new[] { model }, bulkWriteOptions); return DeleteResult.FromCore(result); @@ -205,7 +206,8 @@ private async Task<DeleteResult> DeleteManyAsync(FilterDefinition<TDocument> fil var bulkWriteOptions = new BulkWriteOptions { Comment = options.Comment, - Let = options.Let + Let = options.Let, + Timeout = options.Timeout }; var result = await bulkWriteFuncAsync(new[] { model }, bulkWriteOptions).ConfigureAwait(false); return DeleteResult.FromCore(result); @@ -246,7 +248,8 @@ private DeleteResult DeleteOne(FilterDefinition<TDocument> filter, DeleteOptions var bulkWriteOptions = new BulkWriteOptions { Comment = options.Comment, - Let = options.Let + Let = options.Let, + Timeout = options.Timeout }; var result = bulkWrite(new[] { model }, bulkWriteOptions); return DeleteResult.FromCore(result); @@ -287,7 +290,8 @@ private async Task<DeleteResult> DeleteOneAsync(FilterDefinition<TDocument> filt var bulkWriteOptions = new BulkWriteOptions { Comment = options.Comment, - Let = options.Let + Let = options.Let, + Timeout = options.Timeout }; var result = await bulkWriteAsync(new[] { model }, bulkWriteOptions).ConfigureAwait(false); return DeleteResult.FromCore(result); @@ -435,7 +439,8 @@ private void InsertOne(TDocument document, InsertOneOptions options, Action<IEnu var bulkWriteOptions = options == null ? null : new BulkWriteOptions { BypassDocumentValidation = options.BypassDocumentValidation, - Comment = options.Comment + Comment = options.Comment, + Timeout = options.Timeout }; bulkWrite(new[] { model }, bulkWriteOptions); } @@ -471,7 +476,8 @@ private async Task InsertOneAsync(TDocument document, InsertOneOptions options, var bulkWriteOptions = options == null ? null : new BulkWriteOptions { BypassDocumentValidation = options.BypassDocumentValidation, - Comment = options.Comment + Comment = options.Comment, + Timeout = options.Timeout }; await bulkWriteAsync(new[] { model }, bulkWriteOptions).ConfigureAwait(false); } @@ -500,7 +506,8 @@ private void InsertMany(IEnumerable<TDocument> documents, InsertManyOptions opti { BypassDocumentValidation = options.BypassDocumentValidation, Comment = options.Comment, - IsOrdered = options.IsOrdered + IsOrdered = options.IsOrdered, + Timeout = options.Timeout }; bulkWrite(models, bulkWriteOptions); } @@ -524,7 +531,8 @@ private Task InsertManyAsync(IEnumerable<TDocument> documents, InsertManyOptions { BypassDocumentValidation = options.BypassDocumentValidation, Comment = options.Comment, - IsOrdered = options.IsOrdered + IsOrdered = options.IsOrdered, + Timeout = options.Timeout }; return bulkWriteAsync(models, bulkWriteOptions); } @@ -598,7 +606,8 @@ private ReplaceOneResult ReplaceOne(FilterDefinition<TDocument> filter, TDocumen { BypassDocumentValidation = options.BypassDocumentValidation, Comment = options.Comment, - Let = options.Let + Let = options.Let, + Timeout = options.Timeout }; var result = bulkWrite(new[] { model }, bulkWriteOptions); return ReplaceOneResult.FromCore(result); @@ -656,7 +665,8 @@ private async Task<ReplaceOneResult> ReplaceOneAsync(FilterDefinition<TDocument> { BypassDocumentValidation = options.BypassDocumentValidation, Comment = options.Comment, - Let = options.Let + Let = options.Let, + Timeout = options.Timeout }; var result = await bulkWriteAsync(new[] { model }, bulkWriteOptions).ConfigureAwait(false); return ReplaceOneResult.FromCore(result); @@ -697,7 +707,8 @@ private UpdateResult UpdateMany(FilterDefinition<TDocument> filter, UpdateDefini { BypassDocumentValidation = options.BypassDocumentValidation, Comment = options.Comment, - Let = options.Let + Let = options.Let, + Timeout = options.Timeout }; var result = bulkWrite(new[] { model }, bulkWriteOptions); return UpdateResult.FromCore(result); @@ -738,7 +749,8 @@ private async Task<UpdateResult> UpdateManyAsync(FilterDefinition<TDocument> fil { BypassDocumentValidation = options.BypassDocumentValidation, Comment = options.Comment, - Let = options.Let + Let = options.Let, + Timeout = options.Timeout }; var result = await bulkWriteAsync(new[] { model }, bulkWriteOptions).ConfigureAwait(false); return UpdateResult.FromCore(result); @@ -784,7 +796,8 @@ private UpdateResult UpdateOne(FilterDefinition<TDocument> filter, UpdateDefinit { BypassDocumentValidation = options.BypassDocumentValidation, Comment = options.Comment, - Let = options.Let + Let = options.Let, + Timeout = options.Timeout }; var result = bulkWrite(new[] { model }, bulkWriteOptions); return UpdateResult.FromCore(result); @@ -830,7 +843,8 @@ private async Task<UpdateResult> UpdateOneAsync(FilterDefinition<TDocument> filt { BypassDocumentValidation = options.BypassDocumentValidation, Comment = options.Comment, - Let = options.Let + Let = options.Let, + Timeout = options.Timeout }; var result = await bulkWriteAsync(new[] { model }, bulkWriteOptions).ConfigureAwait(false); return UpdateResult.FromCore(result); diff --git a/src/MongoDB.Driver/MongoCollectionImpl.cs b/src/MongoDB.Driver/MongoCollectionImpl.cs index 51f03dc0a04..20b11c8c942 100644 --- a/src/MongoDB.Driver/MongoCollectionImpl.cs +++ b/src/MongoDB.Driver/MongoCollectionImpl.cs @@ -21,8 +21,6 @@ using MongoDB.Bson; using MongoDB.Bson.IO; using MongoDB.Bson.Serialization; -using MongoDB.Driver.Core; -using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Clusters; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.Operations; @@ -94,160 +92,126 @@ public override MongoCollectionSettings Settings } // public methods - public override IAsyncCursor<TResult> Aggregate<TResult>(PipelineDefinition<TDocument, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override IAsyncCursor<TResult> Aggregate<TResult>(PipelineDefinition<TDocument, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default) { - return UsingImplicitSession(session => Aggregate(session, pipeline, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return Aggregate(session, pipeline, options, cancellationToken: cancellationToken); } - public override IAsyncCursor<TResult> Aggregate<TResult>(IClientSessionHandle session, PipelineDefinition<TDocument, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override IAsyncCursor<TResult> Aggregate<TResult>(IClientSessionHandle session, PipelineDefinition<TDocument, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(pipeline, nameof(pipeline)); - options = options ?? new AggregateOptions(); + options ??= new AggregateOptions(); var renderArgs = GetRenderArgs(options.TranslationOptions); - var renderedPipeline = pipeline.Render(renderArgs); - - var lastStage = renderedPipeline.Documents.LastOrDefault(); - var lastStageName = lastStage?.GetElement(0).Name; - if (lastStage != null && (lastStageName == "$out" || lastStageName == "$merge")) + var renderedPipeline = AggregateHelper.RenderAggregatePipeline(pipeline, renderArgs, out var isAggregateToCollection); + if (isAggregateToCollection) { var aggregateOperation = CreateAggregateToCollectionOperation(renderedPipeline, options); - ExecuteWriteOperation(session, aggregateOperation, cancellationToken); - - // we want to delay execution of the find because the user may - // not want to iterate the results at all... - var findOperation = CreateAggregateToCollectionFindOperation(lastStage, renderedPipeline.OutputSerializer, options); - var forkedSession = session.Fork(); - var deferredCursor = new DeferredAsyncCursor<TResult>( - () => forkedSession.Dispose(), - ct => ExecuteReadOperation(forkedSession, findOperation, ReadPreference.Primary, ct), - ct => ExecuteReadOperationAsync(forkedSession, findOperation, ReadPreference.Primary, ct)); - return deferredCursor; + ExecuteWriteOperation(session, aggregateOperation, options.Timeout, cancellationToken); + return CreateAggregateToCollectionResultCursor(session, renderedPipeline, options); } else { var aggregateOperation = CreateAggregateOperation(renderedPipeline, options); - return ExecuteReadOperation(session, aggregateOperation, cancellationToken); + return ExecuteReadOperation(session, aggregateOperation, options.Timeout, cancellationToken); } } - public override Task<IAsyncCursor<TResult>> AggregateAsync<TResult>(PipelineDefinition<TDocument, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override async Task<IAsyncCursor<TResult>> AggregateAsync<TResult>(PipelineDefinition<TDocument, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => AggregateAsync(session, pipeline, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await AggregateAsync(session, pipeline, options, cancellationToken).ConfigureAwait(false); } - public override async Task<IAsyncCursor<TResult>> AggregateAsync<TResult>(IClientSessionHandle session, PipelineDefinition<TDocument, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override async Task<IAsyncCursor<TResult>> AggregateAsync<TResult>(IClientSessionHandle session, PipelineDefinition<TDocument, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(pipeline, nameof(pipeline)); - options = options ?? new AggregateOptions(); + options ??= new AggregateOptions(); var renderArgs = GetRenderArgs(options.TranslationOptions); - var renderedPipeline = pipeline.Render(renderArgs); - - var lastStage = renderedPipeline.Documents.LastOrDefault(); - var lastStageName = lastStage?.GetElement(0).Name; - if (lastStage != null && (lastStageName == "$out" || lastStageName == "$merge")) + var renderedPipeline = AggregateHelper.RenderAggregatePipeline(pipeline, renderArgs, out var isAggregateToCollection); + if (isAggregateToCollection) { var aggregateOperation = CreateAggregateToCollectionOperation(renderedPipeline, options); - await ExecuteWriteOperationAsync(session, aggregateOperation, cancellationToken).ConfigureAwait(false); - - // we want to delay execution of the find because the user may - // not want to iterate the results at all... - var findOperation = CreateAggregateToCollectionFindOperation(lastStage, renderedPipeline.OutputSerializer, options); - var forkedSession = session.Fork(); - var deferredCursor = new DeferredAsyncCursor<TResult>( - () => forkedSession.Dispose(), - ct => ExecuteReadOperation(forkedSession, findOperation, ReadPreference.Primary, ct), - ct => ExecuteReadOperationAsync(forkedSession, findOperation, ReadPreference.Primary, ct)); - return await Task.FromResult<IAsyncCursor<TResult>>(deferredCursor).ConfigureAwait(false); + await ExecuteWriteOperationAsync(session, aggregateOperation, options.Timeout, cancellationToken).ConfigureAwait(false); + return CreateAggregateToCollectionResultCursor(session, renderedPipeline, options); } else { var aggregateOperation = CreateAggregateOperation(renderedPipeline, options); - return await ExecuteReadOperationAsync(session, aggregateOperation, cancellationToken).ConfigureAwait(false); + return await ExecuteReadOperationAsync(session, aggregateOperation, options.Timeout, cancellationToken).ConfigureAwait(false); } } - public override void AggregateToCollection<TResult>(PipelineDefinition<TDocument, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override void AggregateToCollection<TResult>(PipelineDefinition<TDocument, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default) { - UsingImplicitSession(session => AggregateToCollection(session, pipeline, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + AggregateToCollection(session, pipeline, options, cancellationToken); } - public override void AggregateToCollection<TResult>(IClientSessionHandle session, PipelineDefinition<TDocument, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override void AggregateToCollection<TResult>(IClientSessionHandle session, PipelineDefinition<TDocument, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(pipeline, nameof(pipeline)); - options = options ?? new AggregateOptions(); + options ??= new AggregateOptions(); var renderArgs = GetRenderArgs(options.TranslationOptions); - var renderedPipeline = pipeline.Render(renderArgs); - - var lastStage = renderedPipeline.Documents.LastOrDefault(); - var lastStageName = lastStage?.GetElement(0).Name; - if (lastStage == null || (lastStageName != "$out" && lastStageName != "$merge")) + var renderedPipeline = AggregateHelper.RenderAggregatePipeline(pipeline, renderArgs, out var isAggregateToCollection); + if (renderedPipeline.Documents.Count == 0 || !isAggregateToCollection) { throw new InvalidOperationException("AggregateToCollection requires that the last stage be $out or $merge."); } var aggregateOperation = CreateAggregateToCollectionOperation(renderedPipeline, options); - ExecuteWriteOperation(session, aggregateOperation, cancellationToken); + ExecuteWriteOperation(session, aggregateOperation, options.Timeout, cancellationToken); } - public override Task AggregateToCollectionAsync<TResult>(PipelineDefinition<TDocument, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override async Task AggregateToCollectionAsync<TResult>(PipelineDefinition<TDocument, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => AggregateToCollectionAsync(session, pipeline, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + await AggregateToCollectionAsync(session, pipeline, options, cancellationToken).ConfigureAwait(false); } - public override async Task AggregateToCollectionAsync<TResult>(IClientSessionHandle session, PipelineDefinition<TDocument, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override Task AggregateToCollectionAsync<TResult>(IClientSessionHandle session, PipelineDefinition<TDocument, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(pipeline, nameof(pipeline)); - options = options ?? new AggregateOptions(); + options ??= new AggregateOptions(); var renderArgs = GetRenderArgs(options.TranslationOptions); - var renderedPipeline = pipeline.Render(renderArgs); - - var lastStage = renderedPipeline.Documents.LastOrDefault(); - var lastStageName = lastStage?.GetElement(0).Name; - if (lastStage == null || (lastStageName != "$out" && lastStageName != "$merge")) + var renderedPipeline = AggregateHelper.RenderAggregatePipeline(pipeline, renderArgs, out var isAggregateToCollection); + if (renderedPipeline.Documents.Count == 0 || !isAggregateToCollection) { throw new InvalidOperationException("AggregateToCollectionAsync requires that the last stage be $out or $merge."); } var aggregateOperation = CreateAggregateToCollectionOperation(renderedPipeline, options); - await ExecuteWriteOperationAsync(session, aggregateOperation, cancellationToken).ConfigureAwait(false); + return ExecuteWriteOperationAsync(session, aggregateOperation, options.Timeout, cancellationToken); } - public override BulkWriteResult<TDocument> BulkWrite(IEnumerable<WriteModel<TDocument>> requests, BulkWriteOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override BulkWriteResult<TDocument> BulkWrite(IEnumerable<WriteModel<TDocument>> requests, BulkWriteOptions options, CancellationToken cancellationToken = default) { - return UsingImplicitSession(session => BulkWrite(session, requests, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return BulkWrite(session, requests, options, cancellationToken); } - public override BulkWriteResult<TDocument> BulkWrite(IClientSessionHandle session, IEnumerable<WriteModel<TDocument>> requests, BulkWriteOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override BulkWriteResult<TDocument> BulkWrite(IClientSessionHandle session, IEnumerable<WriteModel<TDocument>> requests, BulkWriteOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); - Ensure.IsNotNull((object)requests, nameof(requests)); - + Ensure.IsNotNull(requests, nameof(requests)); var requestsArray = requests.ToArray(); if (requestsArray.Length == 0) { throw new ArgumentException("Must contain at least 1 request.", nameof(requests)); } - foreach (var request in requestsArray) - { - request.ThrowIfNotValid(); - } - - options = options ?? new BulkWriteOptions(); - - var renderArgs = GetRenderArgs(); - var operation = CreateBulkWriteOperation(session, requestsArray, options, renderArgs); + var operation = CreateBulkWriteOperation(session, requestsArray, options); try { - var result = ExecuteWriteOperation(session, operation, cancellationToken); + var result = ExecuteWriteOperation(session, operation, options?.Timeout, cancellationToken); return BulkWriteResult<TDocument>.FromCore(result, requestsArray); } catch (MongoBulkWriteOperationException ex) @@ -256,34 +220,26 @@ public override MongoCollectionSettings Settings } } - public override Task<BulkWriteResult<TDocument>> BulkWriteAsync(IEnumerable<WriteModel<TDocument>> requests, BulkWriteOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override async Task<BulkWriteResult<TDocument>> BulkWriteAsync(IEnumerable<WriteModel<TDocument>> requests, BulkWriteOptions options, CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => BulkWriteAsync(session, requests, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await BulkWriteAsync(session, requests, options, cancellationToken).ConfigureAwait(false); } - public override async Task<BulkWriteResult<TDocument>> BulkWriteAsync(IClientSessionHandle session, IEnumerable<WriteModel<TDocument>> requests, BulkWriteOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override async Task<BulkWriteResult<TDocument>> BulkWriteAsync(IClientSessionHandle session, IEnumerable<WriteModel<TDocument>> requests, BulkWriteOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); - Ensure.IsNotNull((object)requests, nameof(requests)); - + Ensure.IsNotNull(requests, nameof(requests)); var requestsArray = requests.ToArray(); if (requestsArray.Length == 0) { throw new ArgumentException("Must contain at least 1 request.", nameof(requests)); } - foreach (var request in requestsArray) - { - request.ThrowIfNotValid(); - } - - options = options ?? new BulkWriteOptions(); - - var renderArgs = GetRenderArgs(); - var operation = CreateBulkWriteOperation(session, requestsArray, options, renderArgs); + var operation = CreateBulkWriteOperation(session, requestsArray, options); try { - var result = await ExecuteWriteOperationAsync(session, operation, cancellationToken).ConfigureAwait(false); + var result = await ExecuteWriteOperationAsync(session, operation, options?.Timeout, cancellationToken).ConfigureAwait(false); return BulkWriteResult<TDocument>.FromCore(result, requestsArray); } catch (MongoBulkWriteOperationException ex) @@ -293,314 +249,296 @@ public override MongoCollectionSettings Settings } [Obsolete("Use CountDocuments or EstimatedDocumentCount instead.")] - public override long Count(FilterDefinition<TDocument> filter, CountOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override long Count(FilterDefinition<TDocument> filter, CountOptions options, CancellationToken cancellationToken = default) { - return UsingImplicitSession(session => Count(session, filter, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return Count(session, filter, options, cancellationToken); } [Obsolete("Use CountDocuments or EstimatedDocumentCount instead.")] - public override long Count(IClientSessionHandle session, FilterDefinition<TDocument> filter, CountOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override long Count(IClientSessionHandle session, FilterDefinition<TDocument> filter, CountOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(filter, nameof(filter)); - options = options ?? new CountOptions(); - var renderArgs = GetRenderArgs(); - var operation = CreateCountOperation(filter, options, renderArgs); - return ExecuteReadOperation(session, operation, cancellationToken); + var operation = CreateCountOperation(filter, options); + return ExecuteReadOperation(session, operation, options?.Timeout, cancellationToken); } [Obsolete("Use CountDocumentsAsync or EstimatedDocumentCountAsync instead.")] - public override Task<long> CountAsync(FilterDefinition<TDocument> filter, CountOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override async Task<long> CountAsync(FilterDefinition<TDocument> filter, CountOptions options, CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => CountAsync(session, filter, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await CountAsync(session, filter, options, cancellationToken).ConfigureAwait(false); } [Obsolete("Use CountDocumentsAsync or EstimatedDocumentCountAsync instead.")] - public override Task<long> CountAsync(IClientSessionHandle session, FilterDefinition<TDocument> filter, CountOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override Task<long> CountAsync(IClientSessionHandle session, FilterDefinition<TDocument> filter, CountOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(filter, nameof(filter)); - options = options ?? new CountOptions(); - var renderArgs = GetRenderArgs(); - var operation = CreateCountOperation(filter, options, renderArgs); - return ExecuteReadOperationAsync(session, operation, cancellationToken); + var operation = CreateCountOperation(filter, options); + return ExecuteReadOperationAsync(session, operation, options?.Timeout, cancellationToken); } - public override long CountDocuments(FilterDefinition<TDocument> filter, CountOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override long CountDocuments(FilterDefinition<TDocument> filter, CountOptions options, CancellationToken cancellationToken = default) { - return UsingImplicitSession(session => CountDocuments(session, filter, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return CountDocuments(session, filter, options, cancellationToken); } - public override long CountDocuments(IClientSessionHandle session, FilterDefinition<TDocument> filter, CountOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override long CountDocuments(IClientSessionHandle session, FilterDefinition<TDocument> filter, CountOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(filter, nameof(filter)); - options = options ?? new CountOptions(); - var renderArgs = GetRenderArgs(); - var operation = CreateCountDocumentsOperation(filter, options, renderArgs); - return ExecuteReadOperation(session, operation, cancellationToken); + var operation = CreateCountDocumentsOperation(filter, options); + return ExecuteReadOperation(session, operation, options?.Timeout, cancellationToken); } - public override Task<long> CountDocumentsAsync(FilterDefinition<TDocument> filter, CountOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override async Task<long> CountDocumentsAsync(FilterDefinition<TDocument> filter, CountOptions options, CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => CountDocumentsAsync(session, filter, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await CountDocumentsAsync(session, filter, options, cancellationToken).ConfigureAwait(false); } - public override Task<long> CountDocumentsAsync(IClientSessionHandle session, FilterDefinition<TDocument> filter, CountOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override Task<long> CountDocumentsAsync(IClientSessionHandle session, FilterDefinition<TDocument> filter, CountOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(filter, nameof(filter)); - options = options ?? new CountOptions(); - var renderArgs = GetRenderArgs(); - var operation = CreateCountDocumentsOperation(filter, options, renderArgs); - return ExecuteReadOperationAsync(session, operation, cancellationToken); + var operation = CreateCountDocumentsOperation(filter, options); + return ExecuteReadOperationAsync(session, operation, options?.Timeout, cancellationToken); } - public override IAsyncCursor<TField> Distinct<TField>(FieldDefinition<TDocument, TField> field, FilterDefinition<TDocument> filter, DistinctOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override IAsyncCursor<TField> Distinct<TField>(FieldDefinition<TDocument, TField> field, FilterDefinition<TDocument> filter, DistinctOptions options, CancellationToken cancellationToken = default) { - return UsingImplicitSession(session => Distinct(session, field, filter, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return Distinct(session, field, filter, options, cancellationToken); } - public override IAsyncCursor<TField> Distinct<TField>(IClientSessionHandle session, FieldDefinition<TDocument, TField> field, FilterDefinition<TDocument> filter, DistinctOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override IAsyncCursor<TField> Distinct<TField>(IClientSessionHandle session, FieldDefinition<TDocument, TField> field, FilterDefinition<TDocument> filter, DistinctOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(field, nameof(field)); Ensure.IsNotNull(filter, nameof(filter)); - options = options ?? new DistinctOptions(); - var renderArgs = GetRenderArgs(); - var operation = CreateDistinctOperation(field, filter, options, renderArgs); - return ExecuteReadOperation(session, operation, cancellationToken); + var operation = CreateDistinctOperation(field, filter, options); + return ExecuteReadOperation(session, operation, options?.Timeout, cancellationToken); } - public override Task<IAsyncCursor<TField>> DistinctAsync<TField>(FieldDefinition<TDocument, TField> field, FilterDefinition<TDocument> filter, DistinctOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override async Task<IAsyncCursor<TField>> DistinctAsync<TField>(FieldDefinition<TDocument, TField> field, FilterDefinition<TDocument> filter, DistinctOptions options, CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => DistinctAsync(session, field, filter, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await DistinctAsync(session, field, filter, options, cancellationToken).ConfigureAwait(false); } - public override Task<IAsyncCursor<TField>> DistinctAsync<TField>(IClientSessionHandle session, FieldDefinition<TDocument, TField> field, FilterDefinition<TDocument> filter, DistinctOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override Task<IAsyncCursor<TField>> DistinctAsync<TField>(IClientSessionHandle session, FieldDefinition<TDocument, TField> field, FilterDefinition<TDocument> filter, DistinctOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(field, nameof(field)); Ensure.IsNotNull(filter, nameof(filter)); - options = options ?? new DistinctOptions(); - var renderArgs = GetRenderArgs(); - var operation = CreateDistinctOperation(field, filter, options, renderArgs); - return ExecuteReadOperationAsync(session, operation, cancellationToken); + var operation = CreateDistinctOperation(field, filter, options); + return ExecuteReadOperationAsync(session, operation, options?.Timeout, cancellationToken); } - public override IAsyncCursor<TItem> DistinctMany<TItem>(FieldDefinition<TDocument, IEnumerable<TItem>> field, FilterDefinition<TDocument> filter, DistinctOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override IAsyncCursor<TItem> DistinctMany<TItem>(FieldDefinition<TDocument, IEnumerable<TItem>> field, FilterDefinition<TDocument> filter, DistinctOptions options, CancellationToken cancellationToken = default) { - return UsingImplicitSession(session => DistinctMany(session, field, filter, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return DistinctMany(session, field, filter, options, cancellationToken); } - public override IAsyncCursor<TItem> DistinctMany<TItem>(IClientSessionHandle session, FieldDefinition<TDocument, IEnumerable<TItem>> field, FilterDefinition<TDocument> filter, DistinctOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override IAsyncCursor<TItem> DistinctMany<TItem>(IClientSessionHandle session, FieldDefinition<TDocument, IEnumerable<TItem>> field, FilterDefinition<TDocument> filter, DistinctOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(field, nameof(field)); Ensure.IsNotNull(filter, nameof(filter)); - options = options ?? new DistinctOptions(); - var renderArgs = GetRenderArgs(); - var operation = CreateDistinctManyOperation(field, filter, options, renderArgs); - return ExecuteReadOperation(session, operation, cancellationToken); + var operation = CreateDistinctManyOperation(field, filter, options); + return ExecuteReadOperation(session, operation, options?.Timeout, cancellationToken); } - public override Task<IAsyncCursor<TItem>> DistinctManyAsync<TItem>(FieldDefinition<TDocument, IEnumerable<TItem>> field, FilterDefinition<TDocument> filter, DistinctOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override async Task<IAsyncCursor<TItem>> DistinctManyAsync<TItem>(FieldDefinition<TDocument, IEnumerable<TItem>> field, FilterDefinition<TDocument> filter, DistinctOptions options, CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => DistinctManyAsync(session, field, filter, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await DistinctManyAsync(session, field, filter, options, cancellationToken).ConfigureAwait(false); } - public override Task<IAsyncCursor<TItem>> DistinctManyAsync<TItem>(IClientSessionHandle session, FieldDefinition<TDocument, IEnumerable<TItem>> field, FilterDefinition<TDocument> filter, DistinctOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override Task<IAsyncCursor<TItem>> DistinctManyAsync<TItem>(IClientSessionHandle session, FieldDefinition<TDocument, IEnumerable<TItem>> field, FilterDefinition<TDocument> filter, DistinctOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(field, nameof(field)); Ensure.IsNotNull(filter, nameof(filter)); - options = options ?? new DistinctOptions(); - var renderArgs = GetRenderArgs(); - var operation = CreateDistinctManyOperation(field, filter, options, renderArgs); - return ExecuteReadOperationAsync(session, operation, cancellationToken); + var operation = CreateDistinctManyOperation(field, filter, options); + return ExecuteReadOperationAsync(session, operation, options?.Timeout, cancellationToken); } - public override long EstimatedDocumentCount(EstimatedDocumentCountOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override long EstimatedDocumentCount(EstimatedDocumentCountOptions options, CancellationToken cancellationToken = default) { - return UsingImplicitSession(session => - { - var operation = CreateEstimatedDocumentCountOperation(options); - return ExecuteReadOperation(session, operation, cancellationToken); - }); + using var session = _operationExecutor.StartImplicitSession(); + var operation = CreateEstimatedDocumentCountOperation(options); + return ExecuteReadOperation(session, operation, options?.Timeout, cancellationToken); } - public override Task<long> EstimatedDocumentCountAsync(EstimatedDocumentCountOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override async Task<long> EstimatedDocumentCountAsync(EstimatedDocumentCountOptions options, CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => - { - var operation = CreateEstimatedDocumentCountOperation(options); - return ExecuteReadOperationAsync(session, operation, cancellationToken); - }); + using var session = _operationExecutor.StartImplicitSession(); + var operation = CreateEstimatedDocumentCountOperation(options); + return await ExecuteReadOperationAsync(session, operation, options?.Timeout, cancellationToken).ConfigureAwait(false); } - public override IAsyncCursor<TProjection> FindSync<TProjection>(FilterDefinition<TDocument> filter, FindOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default(CancellationToken)) + public override IAsyncCursor<TProjection> FindSync<TProjection>(FilterDefinition<TDocument> filter, FindOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default) { - return UsingImplicitSession(session => FindSync(session, filter, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return FindSync(session, filter, options, cancellationToken); } - public override IAsyncCursor<TProjection> FindSync<TProjection>(IClientSessionHandle session, FilterDefinition<TDocument> filter, FindOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default(CancellationToken)) + public override IAsyncCursor<TProjection> FindSync<TProjection>(IClientSessionHandle session, FilterDefinition<TDocument> filter, FindOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(filter, nameof(filter)); - options = options ?? new FindOptions<TDocument, TProjection>(); - var renderArgs = GetRenderArgs(options.TranslationOptions); - var operation = CreateFindOperation<TProjection>(filter, options, renderArgs); - return ExecuteReadOperation(session, operation, cancellationToken); + var operation = CreateFindOperation(filter, options); + return ExecuteReadOperation(session, operation, options?.Timeout, cancellationToken); } - public override Task<IAsyncCursor<TProjection>> FindAsync<TProjection>(FilterDefinition<TDocument> filter, FindOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default(CancellationToken)) + public override async Task<IAsyncCursor<TProjection>> FindAsync<TProjection>(FilterDefinition<TDocument> filter, FindOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => FindAsync(session, filter, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await FindAsync(session, filter, options, cancellationToken).ConfigureAwait(false); } - public override Task<IAsyncCursor<TProjection>> FindAsync<TProjection>(IClientSessionHandle session, FilterDefinition<TDocument> filter, FindOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default(CancellationToken)) + public override Task<IAsyncCursor<TProjection>> FindAsync<TProjection>(IClientSessionHandle session, FilterDefinition<TDocument> filter, FindOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(filter, nameof(filter)); - options = options ?? new FindOptions<TDocument, TProjection>(); - var renderArgs = GetRenderArgs(options.TranslationOptions); - var operation = CreateFindOperation<TProjection>(filter, options, renderArgs); - return ExecuteReadOperationAsync(session, operation, cancellationToken); + var operation = CreateFindOperation(filter, options); + return ExecuteReadOperationAsync(session, operation, options?.Timeout, cancellationToken); } - public override TProjection FindOneAndDelete<TProjection>(FilterDefinition<TDocument> filter, FindOneAndDeleteOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default(CancellationToken)) + public override TProjection FindOneAndDelete<TProjection>(FilterDefinition<TDocument> filter, FindOneAndDeleteOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default) { - return UsingImplicitSession(session => FindOneAndDelete(session, filter, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return FindOneAndDelete(session, filter, options, cancellationToken); } - public override TProjection FindOneAndDelete<TProjection>(IClientSessionHandle session, FilterDefinition<TDocument> filter, FindOneAndDeleteOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default(CancellationToken)) + public override TProjection FindOneAndDelete<TProjection>(IClientSessionHandle session, FilterDefinition<TDocument> filter, FindOneAndDeleteOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(filter, nameof(filter)); - options = options ?? new FindOneAndDeleteOptions<TDocument, TProjection>(); - var renderArgs = GetRenderArgs(); - var operation = CreateFindOneAndDeleteOperation<TProjection>(filter, options, renderArgs); - return ExecuteWriteOperation(session, operation, cancellationToken); + var operation = CreateFindOneAndDeleteOperation(filter, options); + return ExecuteWriteOperation(session, operation, options?.Timeout, cancellationToken); } - public override Task<TProjection> FindOneAndDeleteAsync<TProjection>(FilterDefinition<TDocument> filter, FindOneAndDeleteOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default(CancellationToken)) + public override async Task<TProjection> FindOneAndDeleteAsync<TProjection>(FilterDefinition<TDocument> filter, FindOneAndDeleteOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => FindOneAndDeleteAsync(session, filter, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await FindOneAndDeleteAsync(session, filter, options, cancellationToken).ConfigureAwait(false); } - public override Task<TProjection> FindOneAndDeleteAsync<TProjection>(IClientSessionHandle session, FilterDefinition<TDocument> filter, FindOneAndDeleteOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default(CancellationToken)) + public override Task<TProjection> FindOneAndDeleteAsync<TProjection>(IClientSessionHandle session, FilterDefinition<TDocument> filter, FindOneAndDeleteOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(filter, nameof(filter)); - options = options ?? new FindOneAndDeleteOptions<TDocument, TProjection>(); - var renderArgs = GetRenderArgs(); - var operation = CreateFindOneAndDeleteOperation<TProjection>(filter, options, renderArgs); - return ExecuteWriteOperationAsync(session, operation, cancellationToken); + var operation = CreateFindOneAndDeleteOperation(filter, options); + return ExecuteWriteOperationAsync(session, operation, options?.Timeout, cancellationToken); } - public override TProjection FindOneAndReplace<TProjection>(FilterDefinition<TDocument> filter, TDocument replacement, FindOneAndReplaceOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default(CancellationToken)) + public override TProjection FindOneAndReplace<TProjection>(FilterDefinition<TDocument> filter, TDocument replacement, FindOneAndReplaceOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default) { - return UsingImplicitSession(session => FindOneAndReplace(session, filter, replacement, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return FindOneAndReplace(session, filter, replacement, options, cancellationToken); } - public override TProjection FindOneAndReplace<TProjection>(IClientSessionHandle session, FilterDefinition<TDocument> filter, TDocument replacement, FindOneAndReplaceOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default(CancellationToken)) + public override TProjection FindOneAndReplace<TProjection>(IClientSessionHandle session, FilterDefinition<TDocument> filter, TDocument replacement, FindOneAndReplaceOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(filter, nameof(filter)); var replacementObject = Ensure.IsNotNull((object)replacement, nameof(replacement)); // only box once if it's a struct - options = options ?? new FindOneAndReplaceOptions<TDocument, TProjection>(); - var renderArgs = GetRenderArgs(); - var operation = CreateFindOneAndReplaceOperation(filter, replacementObject, options, renderArgs); - return ExecuteWriteOperation(session, operation, cancellationToken); + var operation = CreateFindOneAndReplaceOperation(filter, replacementObject, options); + return ExecuteWriteOperation(session, operation, options?.Timeout, cancellationToken); } - public override Task<TProjection> FindOneAndReplaceAsync<TProjection>(FilterDefinition<TDocument> filter, TDocument replacement, FindOneAndReplaceOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default(CancellationToken)) + public override async Task<TProjection> FindOneAndReplaceAsync<TProjection>(FilterDefinition<TDocument> filter, TDocument replacement, FindOneAndReplaceOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => FindOneAndReplaceAsync(session, filter, replacement, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await FindOneAndReplaceAsync(session, filter, replacement, options, cancellationToken).ConfigureAwait(false); } - public override Task<TProjection> FindOneAndReplaceAsync<TProjection>(IClientSessionHandle session, FilterDefinition<TDocument> filter, TDocument replacement, FindOneAndReplaceOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default(CancellationToken)) + public override Task<TProjection> FindOneAndReplaceAsync<TProjection>(IClientSessionHandle session, FilterDefinition<TDocument> filter, TDocument replacement, FindOneAndReplaceOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(filter, nameof(filter)); var replacementObject = Ensure.IsNotNull((object)replacement, nameof(replacement)); // only box once if it's a struct - options = options ?? new FindOneAndReplaceOptions<TDocument, TProjection>(); - var renderArgs = GetRenderArgs(); - var operation = CreateFindOneAndReplaceOperation(filter, replacementObject, options, renderArgs); - return ExecuteWriteOperationAsync(session, operation, cancellationToken); + var operation = CreateFindOneAndReplaceOperation(filter, replacementObject, options); + return ExecuteWriteOperationAsync(session, operation, options?.Timeout, cancellationToken); } - public override TProjection FindOneAndUpdate<TProjection>(FilterDefinition<TDocument> filter, UpdateDefinition<TDocument> update, FindOneAndUpdateOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default(CancellationToken)) + public override TProjection FindOneAndUpdate<TProjection>(FilterDefinition<TDocument> filter, UpdateDefinition<TDocument> update, FindOneAndUpdateOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default) { - return UsingImplicitSession(session => FindOneAndUpdate(session, filter, update, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return FindOneAndUpdate(session, filter, update, options, cancellationToken); } - public override TProjection FindOneAndUpdate<TProjection>(IClientSessionHandle session, FilterDefinition<TDocument> filter, UpdateDefinition<TDocument> update, FindOneAndUpdateOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default(CancellationToken)) + public override TProjection FindOneAndUpdate<TProjection>(IClientSessionHandle session, FilterDefinition<TDocument> filter, UpdateDefinition<TDocument> update, FindOneAndUpdateOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(filter, nameof(filter)); Ensure.IsNotNull(update, nameof(update)); - options = options ?? new FindOneAndUpdateOptions<TDocument, TProjection>(); + options ??= new FindOneAndUpdateOptions<TDocument, TProjection>(); if (update is PipelineUpdateDefinition<TDocument> && (options.ArrayFilters != null && options.ArrayFilters.Any())) { throw new NotSupportedException("An arrayfilter is not supported in the pipeline-style update."); } - var renderArgs = GetRenderArgs(); - var operation = CreateFindOneAndUpdateOperation(filter, update, options, renderArgs); - return ExecuteWriteOperation(session, operation, cancellationToken); + var operation = CreateFindOneAndUpdateOperation(filter, update, options); + return ExecuteWriteOperation(session, operation, options?.Timeout, cancellationToken); } - public override Task<TProjection> FindOneAndUpdateAsync<TProjection>(FilterDefinition<TDocument> filter, UpdateDefinition<TDocument> update, FindOneAndUpdateOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default(CancellationToken)) + public override async Task<TProjection> FindOneAndUpdateAsync<TProjection>(FilterDefinition<TDocument> filter, UpdateDefinition<TDocument> update, FindOneAndUpdateOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => FindOneAndUpdateAsync(session, filter, update, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await FindOneAndUpdateAsync(session, filter, update, options, cancellationToken).ConfigureAwait(false); } - public override Task<TProjection> FindOneAndUpdateAsync<TProjection>(IClientSessionHandle session, FilterDefinition<TDocument> filter, UpdateDefinition<TDocument> update, FindOneAndUpdateOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default(CancellationToken)) + public override Task<TProjection> FindOneAndUpdateAsync<TProjection>(IClientSessionHandle session, FilterDefinition<TDocument> filter, UpdateDefinition<TDocument> update, FindOneAndUpdateOptions<TDocument, TProjection> options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(filter, nameof(filter)); Ensure.IsNotNull(update, nameof(update)); - options = options ?? new FindOneAndUpdateOptions<TDocument, TProjection>(); - + options ??= new FindOneAndUpdateOptions<TDocument, TProjection>(); if (update is PipelineUpdateDefinition<TDocument> && (options.ArrayFilters != null && options.ArrayFilters.Any())) { throw new NotSupportedException("An arrayfilter is not supported in the pipeline-style update."); } - var renderArgs = GetRenderArgs(); - var operation = CreateFindOneAndUpdateOperation(filter, update, options, renderArgs); - return ExecuteWriteOperationAsync(session, operation, cancellationToken); + var operation = CreateFindOneAndUpdateOperation(filter, update, options); + return ExecuteWriteOperationAsync(session, operation, options?.Timeout, cancellationToken); } [Obsolete("Use Aggregation pipeline instead.")] - public override IAsyncCursor<TResult> MapReduce<TResult>(BsonJavaScript map, BsonJavaScript reduce, MapReduceOptions<TDocument, TResult> options = null, CancellationToken cancellationToken = default(CancellationToken)) + public override IAsyncCursor<TResult> MapReduce<TResult>(BsonJavaScript map, BsonJavaScript reduce, MapReduceOptions<TDocument, TResult> options = null, CancellationToken cancellationToken = default) { - return UsingImplicitSession(session => MapReduce(session, map, reduce, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return MapReduce(session, map, reduce, options, cancellationToken: cancellationToken); } [Obsolete("Use Aggregation pipeline instead.")] - public override IAsyncCursor<TResult> MapReduce<TResult>(IClientSessionHandle session, BsonJavaScript map, BsonJavaScript reduce, MapReduceOptions<TDocument, TResult> options = null, CancellationToken cancellationToken = default(CancellationToken)) + public override IAsyncCursor<TResult> MapReduce<TResult>(IClientSessionHandle session, BsonJavaScript map, BsonJavaScript reduce, MapReduceOptions<TDocument, TResult> options = null, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(map, nameof(map)); Ensure.IsNotNull(reduce, nameof(reduce)); - options = options ?? new MapReduceOptions<TDocument, TResult>(); + options ??= new MapReduceOptions<TDocument, TResult>(); var outputOptions = options.OutputOptions ?? MapReduceOutputOptions.Inline; var resultSerializer = ResolveResultSerializer<TResult>(options.ResultSerializer); @@ -609,38 +547,30 @@ public override MongoCollectionSettings Settings if (outputOptions == MapReduceOutputOptions.Inline) { var operation = CreateMapReduceOperation(map, reduce, options, resultSerializer, renderArgs); - return ExecuteReadOperation(session, operation, cancellationToken); + return ExecuteReadOperation(session, operation, options.Timeout, cancellationToken); } else { var mapReduceOperation = CreateMapReduceOutputToCollectionOperation(map, reduce, options, outputOptions, renderArgs); - ExecuteWriteOperation(session, mapReduceOperation, cancellationToken); - - // we want to delay execution of the find because the user may - // not want to iterate the results at all... - var findOperation = CreateMapReduceOutputToCollectionFindOperation<TResult>(options, mapReduceOperation.OutputCollectionNamespace, resultSerializer); - var forkedSession = session.Fork(); - var deferredCursor = new DeferredAsyncCursor<TResult>( - () => forkedSession.Dispose(), - ct => ExecuteReadOperation(forkedSession, findOperation, ReadPreference.Primary, ct), - ct => ExecuteReadOperationAsync(forkedSession, findOperation, ReadPreference.Primary, ct)); - return deferredCursor; + ExecuteWriteOperation(session, mapReduceOperation, options.Timeout, cancellationToken); + return CreateMapReduceOutputToCollectionResultCursor(session, options, mapReduceOperation.OutputCollectionNamespace, resultSerializer); } } [Obsolete("Use Aggregation pipeline instead.")] - public override Task<IAsyncCursor<TResult>> MapReduceAsync<TResult>(BsonJavaScript map, BsonJavaScript reduce, MapReduceOptions<TDocument, TResult> options = null, CancellationToken cancellationToken = default(CancellationToken)) + public override async Task<IAsyncCursor<TResult>> MapReduceAsync<TResult>(BsonJavaScript map, BsonJavaScript reduce, MapReduceOptions<TDocument, TResult> options = null, CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => MapReduceAsync(session, map, reduce, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await MapReduceAsync(session, map, reduce, options, cancellationToken).ConfigureAwait(false); } [Obsolete("Use Aggregation pipeline instead.")] - public override async Task<IAsyncCursor<TResult>> MapReduceAsync<TResult>(IClientSessionHandle session, BsonJavaScript map, BsonJavaScript reduce, MapReduceOptions<TDocument, TResult> options = null, CancellationToken cancellationToken = default(CancellationToken)) + public override async Task<IAsyncCursor<TResult>> MapReduceAsync<TResult>(IClientSessionHandle session, BsonJavaScript map, BsonJavaScript reduce, MapReduceOptions<TDocument, TResult> options = null, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(map, nameof(map)); Ensure.IsNotNull(reduce, nameof(reduce)); - options = options ?? new MapReduceOptions<TDocument, TResult>(); + options ??= new MapReduceOptions<TDocument, TResult>(); var outputOptions = options.OutputOptions ?? MapReduceOutputOptions.Inline; var resultSerializer = ResolveResultSerializer<TResult>(options.ResultSerializer); @@ -649,22 +579,13 @@ public override MongoCollectionSettings Settings if (outputOptions == MapReduceOutputOptions.Inline) { var operation = CreateMapReduceOperation(map, reduce, options, resultSerializer, renderArgs); - return await ExecuteReadOperationAsync(session, operation, cancellationToken).ConfigureAwait(false); + return await ExecuteReadOperationAsync(session, operation, options.Timeout, cancellationToken).ConfigureAwait(false); } else { var mapReduceOperation = CreateMapReduceOutputToCollectionOperation(map, reduce, options, outputOptions, renderArgs); - await ExecuteWriteOperationAsync(session, mapReduceOperation, cancellationToken).ConfigureAwait(false); - - // we want to delay execution of the find because the user may - // not want to iterate the results at all... - var findOperation = CreateMapReduceOutputToCollectionFindOperation<TResult>(options, mapReduceOperation.OutputCollectionNamespace, resultSerializer); - var forkedSession = session.Fork(); - var deferredCursor = new DeferredAsyncCursor<TResult>( - () => forkedSession.Dispose(), - ct => ExecuteReadOperation(forkedSession, findOperation, ReadPreference.Primary, ct), - ct => ExecuteReadOperationAsync(forkedSession, findOperation, ReadPreference.Primary, ct)); - return await Task.FromResult(deferredCursor).ConfigureAwait(false); + await ExecuteWriteOperationAsync(session, mapReduceOperation, options.Timeout, cancellationToken).ConfigureAwait(false); + return CreateMapReduceOutputToCollectionResultCursor(session, options, mapReduceOperation.OutputCollectionNamespace, resultSerializer); } } @@ -685,43 +606,45 @@ public override IFilteredMongoCollection<TDerivedDocument> OfType<TDerivedDocume public override IChangeStreamCursor<TResult> Watch<TResult>( PipelineDefinition<ChangeStreamDocument<TDocument>, TResult> pipeline, ChangeStreamOptions options = null, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { - return UsingImplicitSession(session => Watch(session, pipeline, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return Watch(session, pipeline, options, cancellationToken); } public override IChangeStreamCursor<TResult> Watch<TResult>( IClientSessionHandle session, PipelineDefinition<ChangeStreamDocument<TDocument>, TResult> pipeline, ChangeStreamOptions options = null, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(pipeline, nameof(pipeline)); - var translationOptions = _database.Client.Settings.TranslationOptions; - var operation = CreateChangeStreamOperation(pipeline, options, translationOptions); - return ExecuteReadOperation(session, operation, cancellationToken); + + var operation = CreateChangeStreamOperation(pipeline, options); + return ExecuteReadOperation(session, operation, options?.Timeout, cancellationToken); } - public override Task<IChangeStreamCursor<TResult>> WatchAsync<TResult>( + public override async Task<IChangeStreamCursor<TResult>> WatchAsync<TResult>( PipelineDefinition<ChangeStreamDocument<TDocument>, TResult> pipeline, ChangeStreamOptions options = null, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => WatchAsync(session, pipeline, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await WatchAsync(session, pipeline, options, cancellationToken).ConfigureAwait(false); } public override Task<IChangeStreamCursor<TResult>> WatchAsync<TResult>( IClientSessionHandle session, PipelineDefinition<ChangeStreamDocument<TDocument>, TResult> pipeline, ChangeStreamOptions options = null, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(pipeline, nameof(pipeline)); - var translationOptions = _database.Client.Settings.TranslationOptions; - var operation = CreateChangeStreamOperation(pipeline, options, translationOptions); - return ExecuteReadOperationAsync(session, operation, cancellationToken); + + var operation = CreateChangeStreamOperation(pipeline, options); + return ExecuteReadOperationAsync(session, operation, options?.Timeout, cancellationToken); } public override IMongoCollection<TDocument> WithReadConcern(ReadConcern readConcern) @@ -850,72 +773,11 @@ private AggregateOperation<TResult> CreateAggregateOperation<TResult>(RenderedPi }; } - private FindOperation<TResult> CreateAggregateToCollectionFindOperation<TResult>(BsonDocument outStage, IBsonSerializer<TResult> resultSerializer, AggregateOptions options) + private IAsyncCursor<TResult> CreateAggregateToCollectionResultCursor<TResult>(IClientSessionHandle session, RenderedPipelineDefinition<TResult> pipeline, AggregateOptions options) { - CollectionNamespace outputCollectionNamespace; - var stageName = outStage.GetElement(0).Name; - switch (stageName) - { - case "$out": - { - var outValue = outStage[0]; - DatabaseNamespace outputDatabaseNamespace; - string outputCollectionName; - if (outValue.IsString) - { - outputDatabaseNamespace = _collectionNamespace.DatabaseNamespace; - outputCollectionName = outValue.AsString; - } - else - { - outputDatabaseNamespace = new DatabaseNamespace(outValue["db"].AsString); - outputCollectionName = outValue["coll"].AsString; - } - outputCollectionNamespace = new CollectionNamespace(outputDatabaseNamespace, outputCollectionName); - } - break; - case "$merge": - { - var mergeArguments = outStage[0]; - DatabaseNamespace outputDatabaseNamespace; - string outputCollectionName; - if (mergeArguments.IsString) - { - outputDatabaseNamespace = _collectionNamespace.DatabaseNamespace; - outputCollectionName = mergeArguments.AsString; - } - else - { - var into = mergeArguments.AsBsonDocument["into"]; - if (into.IsString) - { - outputDatabaseNamespace = _collectionNamespace.DatabaseNamespace; - outputCollectionName = into.AsString; - } - else - { - if (into.AsBsonDocument.Contains("db")) - { - outputDatabaseNamespace = new DatabaseNamespace(into["db"].AsString); - } - else - { - outputDatabaseNamespace = _collectionNamespace.DatabaseNamespace; - } - outputCollectionName = into["coll"].AsString; - } - } - outputCollectionNamespace = new CollectionNamespace(outputDatabaseNamespace, outputCollectionName); - } - break; - default: - throw new ArgumentException($"Unexpected stage name: {stageName}."); - } + var outputCollectionNamespace = AggregateHelper.GetOutCollection(pipeline.Documents.Last(), _collectionNamespace.DatabaseNamespace); - return new FindOperation<TResult>( - outputCollectionNamespace, - resultSerializer, - _messageEncoderSettings) + var findOperation = new FindOperation<TResult>(outputCollectionNamespace, pipeline.OutputSerializer, _messageEncoderSettings) { BatchSize = options.BatchSize, Collation = options.Collation, @@ -923,6 +785,15 @@ private FindOperation<TResult> CreateAggregateToCollectionFindOperation<TResult> ReadConcern = _settings.ReadConcern, RetryRequested = _database.Client.Settings.RetryReads }; + + // we want to delay execution of the find because the user may + // not want to iterate the results at all... + var forkedSession = session.Fork(); + var deferredCursor = new DeferredAsyncCursor<TResult>( + () => forkedSession.Dispose(), + ct => ExecuteReadOperation(forkedSession, findOperation, ReadPreference.Primary, options?.Timeout, ct), + ct => ExecuteReadOperationAsync(forkedSession, findOperation, ReadPreference.Primary, options?.Timeout, ct)); + return deferredCursor; } private AggregateToCollectionOperation CreateAggregateToCollectionOperation<TResult>(RenderedPipelineDefinition<TResult> renderedPipeline, AggregateOptions options) @@ -947,15 +818,22 @@ private AggregateToCollectionOperation CreateAggregateToCollectionOperation<TRes private BulkMixedWriteOperation CreateBulkWriteOperation( IClientSessionHandle session, - IEnumerable<WriteModel<TDocument>> requests, - BulkWriteOptions options, - RenderArgs<TDocument> renderArgs) + IReadOnlyList<WriteModel<TDocument>> requests, + BulkWriteOptions options) { + options ??= new BulkWriteOptions(); + var renderArgs = GetRenderArgs(); var effectiveWriteConcern = session.IsInTransaction ? WriteConcern.Acknowledged : _settings.WriteConcern; + var writeModels = requests.Select((model, index) => + { + model.ThrowIfNotValid(); + return ConvertWriteModelToWriteRequest(model, index, renderArgs); + }).ToArray(); + return new BulkMixedWriteOperation( _collectionNamespace, - requests.Select((model, index) => ConvertWriteModelToWriteRequest(model, index, renderArgs)), + writeModels, _messageEncoderSettings) { BypassDocumentValidation = options.BypassDocumentValidation, @@ -969,9 +847,10 @@ private BulkMixedWriteOperation CreateBulkWriteOperation( private ChangeStreamOperation<TResult> CreateChangeStreamOperation<TResult>( PipelineDefinition<ChangeStreamDocument<TDocument>, TResult> pipeline, - ChangeStreamOptions options, - ExpressionTranslationOptions translationOptions) + ChangeStreamOptions options) { + var translationOptions = _database.Client.Settings.TranslationOptions; + return ChangeStreamHelper.CreateChangeStreamOperation( this, pipeline, @@ -984,9 +863,11 @@ private ChangeStreamOperation<TResult> CreateChangeStreamOperation<TResult>( private CountDocumentsOperation CreateCountDocumentsOperation( FilterDefinition<TDocument> filter, - CountOptions options, - RenderArgs<TDocument> renderArgs) + CountOptions options) { + options ??= new CountOptions(); + var renderArgs = GetRenderArgs(); + return new CountDocumentsOperation(_collectionNamespace, _messageEncoderSettings) { Collation = options.Collation, @@ -1003,9 +884,11 @@ private CountDocumentsOperation CreateCountDocumentsOperation( private CountOperation CreateCountOperation( FilterDefinition<TDocument> filter, - CountOptions options, - RenderArgs<TDocument> renderArgs) + CountOptions options) { + options ??= new CountOptions(); + var renderArgs = GetRenderArgs(); + return new CountOperation(_collectionNamespace, _messageEncoderSettings) { Collation = options.Collation, @@ -1023,9 +906,10 @@ private CountOperation CreateCountOperation( private DistinctOperation<TField> CreateDistinctOperation<TField>( FieldDefinition<TDocument, TField> field, FilterDefinition<TDocument> filter, - DistinctOptions options, - RenderArgs<TDocument> renderArgs) + DistinctOptions options) { + options ??= new DistinctOptions(); + var renderArgs = GetRenderArgs(); var renderedField = field.Render(renderArgs); var valueSerializer = GetValueSerializerForDistinct(renderedField, _settings.SerializerRegistry); @@ -1047,9 +931,10 @@ private DistinctOperation<TField> CreateDistinctOperation<TField>( private DistinctOperation<TItem> CreateDistinctManyOperation<TItem>( FieldDefinition<TDocument, IEnumerable<TItem>> field, FilterDefinition<TDocument> filter, - DistinctOptions options, - RenderArgs<TDocument> renderArgs) + DistinctOptions options) { + options ??= new DistinctOptions(); + var renderArgs = GetRenderArgs(); var renderedField = field.Render(renderArgs); var itemSerializer = GetItemSerializerForDistinctMany(renderedField, _settings.SerializerRegistry); @@ -1080,9 +965,10 @@ private EstimatedDocumentCountOperation CreateEstimatedDocumentCountOperation(Es private FindOneAndDeleteOperation<TProjection> CreateFindOneAndDeleteOperation<TProjection>( FilterDefinition<TDocument> filter, - FindOneAndDeleteOptions<TDocument, TProjection> options, - RenderArgs<TDocument> renderArgs) + FindOneAndDeleteOptions<TDocument, TProjection> options) { + options ??= new FindOneAndDeleteOptions<TDocument, TProjection>(); + var renderArgs = GetRenderArgs(); var projection = options.Projection ?? new ClientSideDeserializationProjectionDefinition<TDocument, TProjection>(); var renderedProjection = projection.Render(renderArgs with { RenderForFind = true }); @@ -1106,17 +992,19 @@ private FindOneAndDeleteOperation<TProjection> CreateFindOneAndDeleteOperation<T private FindOneAndReplaceOperation<TProjection> CreateFindOneAndReplaceOperation<TProjection>( FilterDefinition<TDocument> filter, - object replacementObject, - FindOneAndReplaceOptions<TDocument, TProjection> options, - RenderArgs<TDocument> renderArgs) + object replacement, + FindOneAndReplaceOptions<TDocument, TProjection> options) { + options ??= new FindOneAndReplaceOptions<TDocument, TProjection>(); + + var renderArgs = GetRenderArgs(); var projection = options.Projection ?? new ClientSideDeserializationProjectionDefinition<TDocument, TProjection>(); var renderedProjection = projection.Render(renderArgs with { RenderForFind = true }); return new FindOneAndReplaceOperation<TProjection>( _collectionNamespace, filter.Render(renderArgs), - new BsonDocumentWrapper(replacementObject, _documentSerializer), + new BsonDocumentWrapper(replacement, _documentSerializer), new FindAndModifyValueDeserializer<TProjection>(renderedProjection.ProjectionSerializer), _messageEncoderSettings) { @@ -1138,9 +1026,9 @@ private FindOneAndReplaceOperation<TProjection> CreateFindOneAndReplaceOperation private FindOneAndUpdateOperation<TProjection> CreateFindOneAndUpdateOperation<TProjection>( FilterDefinition<TDocument> filter, UpdateDefinition<TDocument> update, - FindOneAndUpdateOptions<TDocument, TProjection> options, - RenderArgs<TDocument> renderArgs) + FindOneAndUpdateOptions<TDocument, TProjection> options) { + var renderArgs = GetRenderArgs(); var projection = options.Projection ?? new ClientSideDeserializationProjectionDefinition<TDocument, TProjection>(); var renderedProjection = projection.Render(renderArgs with { RenderForFind = true }); @@ -1169,9 +1057,11 @@ private FindOneAndUpdateOperation<TProjection> CreateFindOneAndUpdateOperation<T private FindOperation<TProjection> CreateFindOperation<TProjection>( FilterDefinition<TDocument> filter, - FindOptions<TDocument, TProjection> options, - RenderArgs<TDocument> renderArgs) + FindOptions<TDocument, TProjection> options) { + options ??= new FindOptions<TDocument, TProjection>(); + + var renderArgs = GetRenderArgs(options.TranslationOptions); var projection = options.Projection ?? new ClientSideDeserializationProjectionDefinition<TDocument, TProjection>(); var renderedProjection = projection.Render(renderArgs with { RenderForFind = true }); @@ -1285,10 +1175,10 @@ private MapReduceOutputToCollectionOperation CreateMapReduceOutputToCollectionOp } #pragma warning disable CS0618 // Type or member is obsolete - private FindOperation<TResult> CreateMapReduceOutputToCollectionFindOperation<TResult>(MapReduceOptions<TDocument, TResult> options, CollectionNamespace outputCollectionNamespace, IBsonSerializer<TResult> resultSerializer) + private IAsyncCursor<TResult> CreateMapReduceOutputToCollectionResultCursor<TResult>(IClientSessionHandle session, MapReduceOptions<TDocument, TResult> options, CollectionNamespace outputCollectionNamespace, IBsonSerializer<TResult> resultSerializer) #pragma warning restore CS0618 // Type or member is obsolete { - return new FindOperation<TResult>( + var findOperation = new FindOperation<TResult>( outputCollectionNamespace, resultSerializer, _messageEncoderSettings) @@ -1298,21 +1188,58 @@ private FindOperation<TResult> CreateMapReduceOutputToCollectionFindOperation<TR ReadConcern = _settings.ReadConcern, RetryRequested = _database.Client.Settings.RetryReads }; + + // we want to delay execution of the find because the user may + // not want to iterate the results at all... + var forkedSession = session.Fork(); + var deferredCursor = new DeferredAsyncCursor<TResult>( + () => forkedSession.Dispose(), + ct => ExecuteReadOperation(forkedSession, findOperation, ReadPreference.Primary, options?.Timeout, ct), + ct => ExecuteReadOperationAsync(forkedSession, findOperation, ReadPreference.Primary, options?.Timeout, ct)); + return deferredCursor; } - private IReadBindingHandle CreateReadBinding(IClientSessionHandle session, ReadPreference readPreference) + private OperationContext CreateOperationContext(IClientSessionHandle session, TimeSpan? timeout, CancellationToken cancellationToken) { - if (session.IsInTransaction && readPreference.ReadPreferenceMode != ReadPreferenceMode.Primary) + var operationContext = session.WrappedCoreSession.CurrentTransaction?.OperationContext; + if (operationContext != null && timeout != null) { - throw new InvalidOperationException("Read preference in a transaction must be primary."); + throw new InvalidOperationException("Cannot specify per operation timeout inside transaction."); } - return ChannelPinningHelper.CreateReadBinding(_cluster, session.WrappedCoreSession.Fork(), readPreference); + return operationContext?.Fork() ?? new OperationContext(timeout ?? _settings.Timeout, cancellationToken); + } + + private TResult ExecuteReadOperation<TResult>(IClientSessionHandle session, IReadOperation<TResult> operation, TimeSpan? timeout, CancellationToken cancellationToken) + => ExecuteReadOperation(session, operation, null, timeout, cancellationToken); + + private TResult ExecuteReadOperation<TResult>(IClientSessionHandle session, IReadOperation<TResult> operation, ReadPreference explicitReadPreference, TimeSpan? timeout, CancellationToken cancellationToken) + { + var readPreference = explicitReadPreference ?? session.GetEffectiveReadPreference(_settings.ReadPreference); + using var operationContext = CreateOperationContext(session, timeout, cancellationToken); + return _operationExecutor.ExecuteReadOperation(operationContext, session, operation, readPreference, true); + } + + private Task<TResult> ExecuteReadOperationAsync<TResult>(IClientSessionHandle session, IReadOperation<TResult> operation, TimeSpan? timeout, CancellationToken cancellationToken) + => ExecuteReadOperationAsync(session, operation, null, timeout, cancellationToken); + + private async Task<TResult> ExecuteReadOperationAsync<TResult>(IClientSessionHandle session, IReadOperation<TResult> operation, ReadPreference explicitReadPreference, TimeSpan? timeout, CancellationToken cancellationToken) + { + var readPreference = explicitReadPreference ?? session.GetEffectiveReadPreference(_settings.ReadPreference); + using var operationContext = CreateOperationContext(session, timeout, cancellationToken); + return await _operationExecutor.ExecuteReadOperationAsync(operationContext, session, operation, readPreference, true).ConfigureAwait(false); + } + + private TResult ExecuteWriteOperation<TResult>(IClientSessionHandle session, IWriteOperation<TResult> operation, TimeSpan? timeout, CancellationToken cancellationToken) + { + using var operationContext = CreateOperationContext(session, timeout, cancellationToken); + return _operationExecutor.ExecuteWriteOperation(operationContext, session, operation, true); } - private IWriteBindingHandle CreateReadWriteBinding(IClientSessionHandle session) + private async Task<TResult> ExecuteWriteOperationAsync<TResult>(IClientSessionHandle session, IWriteOperation<TResult> operation, TimeSpan? timeout, CancellationToken cancellationToken) { - return ChannelPinningHelper.CreateReadWriteBinding(_cluster, session.WrappedCoreSession.Fork()); + using var operationContext = CreateOperationContext(session, timeout, cancellationToken); + return await _operationExecutor.ExecuteWriteOperationAsync(operationContext, session, operation, true).ConfigureAwait(false); } private MessageEncoderSettings GetMessageEncoderSettings() @@ -1389,50 +1316,6 @@ private RenderArgs<TDocument> GetRenderArgs(ExpressionTranslationOptions transla return new RenderArgs<TDocument>(_documentSerializer, _settings.SerializerRegistry, translationOptions: translationOptions); } - private TResult ExecuteReadOperation<TResult>(IClientSessionHandle session, IReadOperation<TResult> operation, CancellationToken cancellationToken = default(CancellationToken)) - { - var effectiveReadPreference = ReadPreferenceResolver.GetEffectiveReadPreference(session, null, _settings.ReadPreference); - return ExecuteReadOperation(session, operation, effectiveReadPreference, cancellationToken); - } - - private TResult ExecuteReadOperation<TResult>(IClientSessionHandle session, IReadOperation<TResult> operation, ReadPreference readPreference, CancellationToken cancellationToken = default(CancellationToken)) - { - using (var binding = CreateReadBinding(session, readPreference)) - { - return _operationExecutor.ExecuteReadOperation(binding, operation, cancellationToken); - } - } - - private Task<TResult> ExecuteReadOperationAsync<TResult>(IClientSessionHandle session, IReadOperation<TResult> operation, CancellationToken cancellationToken = default(CancellationToken)) - { - var effectiveReadPreference = ReadPreferenceResolver.GetEffectiveReadPreference(session, null, _settings.ReadPreference); - return ExecuteReadOperationAsync(session, operation, effectiveReadPreference, cancellationToken); - } - - private async Task<TResult> ExecuteReadOperationAsync<TResult>(IClientSessionHandle session, IReadOperation<TResult> operation, ReadPreference readPreference, CancellationToken cancellationToken = default(CancellationToken)) - { - using (var binding = CreateReadBinding(session, readPreference)) - { - return await _operationExecutor.ExecuteReadOperationAsync(binding, operation, cancellationToken).ConfigureAwait(false); - } - } - - private TResult ExecuteWriteOperation<TResult>(IClientSessionHandle session, IWriteOperation<TResult> operation, CancellationToken cancellationToken = default(CancellationToken)) - { - using (var binding = CreateReadWriteBinding(session)) - { - return _operationExecutor.ExecuteWriteOperation(binding, operation, cancellationToken); - } - } - - private async Task<TResult> ExecuteWriteOperationAsync<TResult>(IClientSessionHandle session, IWriteOperation<TResult> operation, CancellationToken cancellationToken = default(CancellationToken)) - { - using (var binding = CreateReadWriteBinding(session)) - { - return await _operationExecutor.ExecuteWriteOperationAsync(binding, operation, cancellationToken).ConfigureAwait(false); - } - } - private IEnumerable<BsonDocument> RenderArrayFilters(IEnumerable<ArrayFilterDefinition> arrayFilters) { if (arrayFilters == null) @@ -1465,38 +1348,6 @@ private IBsonSerializer<TResult> ResolveResultSerializer<TResult>(IBsonSerialize return _settings.SerializerRegistry.GetSerializer<TResult>(); } - private void UsingImplicitSession(Action<IClientSessionHandle> func, CancellationToken cancellationToken = default(CancellationToken)) - { - using (var session = _operationExecutor.StartImplicitSession(cancellationToken)) - { - func(session); - } - } - - private TResult UsingImplicitSession<TResult>(Func<IClientSessionHandle, TResult> func, CancellationToken cancellationToken = default(CancellationToken)) - { - using (var session = _operationExecutor.StartImplicitSession(cancellationToken)) - { - return func(session); - } - } - - private async Task UsingImplicitSessionAsync(Func<IClientSessionHandle, Task> funcAsync, CancellationToken cancellationToken = default(CancellationToken)) - { - using (var session = await _operationExecutor.StartImplicitSessionAsync(cancellationToken).ConfigureAwait(false)) - { - await funcAsync(session).ConfigureAwait(false); - } - } - - private async Task<TResult> UsingImplicitSessionAsync<TResult>(Func<IClientSessionHandle, Task<TResult>> funcAsync, CancellationToken cancellationToken = default(CancellationToken)) - { - using (var session = await _operationExecutor.StartImplicitSessionAsync(cancellationToken).ConfigureAwait(false)) - { - return await funcAsync(session).ConfigureAwait(false); - } - } - // nested types private class MongoIndexManager : MongoIndexManagerBase<TDocument> { @@ -1526,231 +1377,195 @@ public override MongoCollectionSettings Settings } // public methods - public override IEnumerable<string> CreateMany(IEnumerable<CreateIndexModel<TDocument>> models, CancellationToken cancellationToken = default(CancellationToken)) - { - return CreateMany(models, null, cancellationToken); - } + public override IEnumerable<string> CreateMany(IEnumerable<CreateIndexModel<TDocument>> models, CancellationToken cancellationToken = default) + => CreateMany(models, null, cancellationToken: cancellationToken); public override IEnumerable<string> CreateMany( IEnumerable<CreateIndexModel<TDocument>> models, CreateManyIndexesOptions options, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { - return _collection.UsingImplicitSession(session => CreateMany(session, models, options, cancellationToken), cancellationToken); + using var session = _collection._operationExecutor.StartImplicitSession(); + return CreateMany(session, models, options, cancellationToken); } - public override IEnumerable<string> CreateMany(IClientSessionHandle session, IEnumerable<CreateIndexModel<TDocument>> models, CancellationToken cancellationToken = default(CancellationToken)) - { - return CreateMany(session, models, null, cancellationToken); - } + public override IEnumerable<string> CreateMany(IClientSessionHandle session, IEnumerable<CreateIndexModel<TDocument>> models, CancellationToken cancellationToken = default) + => CreateMany(session, models, null, cancellationToken: cancellationToken); public override IEnumerable<string> CreateMany( IClientSessionHandle session, IEnumerable<CreateIndexModel<TDocument>> models, CreateManyIndexesOptions options, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(models, nameof(models)); - var renderArgs = _collection.GetRenderArgs(); - var requests = CreateCreateIndexRequests(models, renderArgs); - var operation = CreateCreateIndexesOperation(requests, options); - _collection.ExecuteWriteOperation(session, operation, cancellationToken); - - return requests.Select(x => x.GetIndexName()); + var operation = CreateCreateIndexesOperation(models, options); + _collection.ExecuteWriteOperation(session, operation, options?.Timeout, cancellationToken); + return operation.Requests.Select(x => x.GetIndexName()); } - public override Task<IEnumerable<string>> CreateManyAsync(IEnumerable<CreateIndexModel<TDocument>> models, CancellationToken cancellationToken = default(CancellationToken)) - { - return CreateManyAsync(models, null, cancellationToken); - } + public override Task<IEnumerable<string>> CreateManyAsync(IEnumerable<CreateIndexModel<TDocument>> models, CancellationToken cancellationToken = default) + => CreateManyAsync(models, null, cancellationToken: cancellationToken); - public override Task<IEnumerable<string>> CreateManyAsync( + public override async Task<IEnumerable<string>> CreateManyAsync( IEnumerable<CreateIndexModel<TDocument>> models, CreateManyIndexesOptions options, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { - return _collection.UsingImplicitSessionAsync(session => CreateManyAsync(session, models, options, cancellationToken), cancellationToken); + using var session = _collection._operationExecutor.StartImplicitSession(); + return await CreateManyAsync(session, models, options, cancellationToken).ConfigureAwait(false); } - public override Task<IEnumerable<string>> CreateManyAsync(IClientSessionHandle session, IEnumerable<CreateIndexModel<TDocument>> models, CancellationToken cancellationToken = default(CancellationToken)) - { - return CreateManyAsync(session, models, null, cancellationToken); - } + public override Task<IEnumerable<string>> CreateManyAsync(IClientSessionHandle session, IEnumerable<CreateIndexModel<TDocument>> models, CancellationToken cancellationToken = default) + => CreateManyAsync(session, models, null, cancellationToken: cancellationToken); public override async Task<IEnumerable<string>> CreateManyAsync( IClientSessionHandle session, IEnumerable<CreateIndexModel<TDocument>> models, CreateManyIndexesOptions options, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(models, nameof(models)); - var renderArgs = _collection.GetRenderArgs(); - var requests = CreateCreateIndexRequests(models, renderArgs); - var operation = CreateCreateIndexesOperation(requests, options); - await _collection.ExecuteWriteOperationAsync(session, operation, cancellationToken).ConfigureAwait(false); - - return requests.Select(x => x.GetIndexName()); + var operation = CreateCreateIndexesOperation(models, options); + await _collection.ExecuteWriteOperationAsync(session, operation, options?.Timeout, cancellationToken).ConfigureAwait(false); + return operation.Requests.Select(x => x.GetIndexName()); } public override void DropAll(CancellationToken cancellationToken) - { - _collection.UsingImplicitSession(session => DropAll(session, cancellationToken), cancellationToken); - } + => DropAll(options: null, cancellationToken: cancellationToken); - public override void DropAll(DropIndexOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override void DropAll(DropIndexOptions options, CancellationToken cancellationToken = default) { - _collection.UsingImplicitSession(session => DropAll(session, options, cancellationToken), cancellationToken); + using var session = _collection._operationExecutor.StartImplicitSession(); + DropAll(session, options, cancellationToken); } - public override void DropAll(IClientSessionHandle session, CancellationToken cancellationToken = default(CancellationToken)) - { - DropAll(session, null, cancellationToken); - } + public override void DropAll(IClientSessionHandle session, CancellationToken cancellationToken = default) + => DropAll(session, null, cancellationToken); - public override void DropAll(IClientSessionHandle session, DropIndexOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override void DropAll(IClientSessionHandle session, DropIndexOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); var operation = CreateDropAllOperation(options); - _collection.ExecuteWriteOperation(session, operation, cancellationToken); + _collection.ExecuteWriteOperation(session, operation, options?.Timeout, cancellationToken); } public override Task DropAllAsync(CancellationToken cancellationToken) - { - return _collection.UsingImplicitSessionAsync(session => DropAllAsync(session, cancellationToken), cancellationToken); - } + => DropAllAsync(options: null, cancellationToken); - public override Task DropAllAsync(DropIndexOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override async Task DropAllAsync(DropIndexOptions options, CancellationToken cancellationToken = default) { - return _collection.UsingImplicitSessionAsync(session => DropAllAsync(session, options, cancellationToken), cancellationToken); + using var session = _collection._operationExecutor.StartImplicitSession(); + await DropAllAsync(session, options, cancellationToken).ConfigureAwait(false); } - public override Task DropAllAsync(IClientSessionHandle session, CancellationToken cancellationToken = default(CancellationToken)) - { - return DropAllAsync(session, null, cancellationToken); - } + public override Task DropAllAsync(IClientSessionHandle session, CancellationToken cancellationToken = default) + => DropAllAsync(session, null, cancellationToken); - public override Task DropAllAsync(IClientSessionHandle session, DropIndexOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override Task DropAllAsync(IClientSessionHandle session, DropIndexOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); var operation = CreateDropAllOperation(options); - return _collection.ExecuteWriteOperationAsync(session, operation, cancellationToken); + return _collection.ExecuteWriteOperationAsync(session, operation, options?.Timeout, cancellationToken); } - public override void DropOne(string name, CancellationToken cancellationToken = default(CancellationToken)) - { - _collection.UsingImplicitSession(session => DropOne(session, name, cancellationToken), cancellationToken); - } + public override void DropOne(string name, CancellationToken cancellationToken = default) + => DropOne(name, null, cancellationToken); - public override void DropOne(string name, DropIndexOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override void DropOne(string name, DropIndexOptions options, CancellationToken cancellationToken = default) { - _collection.UsingImplicitSession(session => DropOne(session, name, options, cancellationToken), cancellationToken); + using var session = _collection._operationExecutor.StartImplicitSession(); + DropOne(session, name, options, cancellationToken); } - public override void DropOne(IClientSessionHandle session, string name, CancellationToken cancellationToken = default(CancellationToken)) - { - DropOne(session, name, null, cancellationToken); - } + public override void DropOne(IClientSessionHandle session, string name, CancellationToken cancellationToken = default) + => DropOne(session, name, null, cancellationToken); - public override void DropOne( - IClientSessionHandle session, - string name, - DropIndexOptions options, - CancellationToken cancellationToken) + public override void DropOne(IClientSessionHandle session, string name, DropIndexOptions options, CancellationToken cancellationToken) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNullOrEmpty(name, nameof(name)); if (name == "*") { - throw new ArgumentException("Cannot specify '*' for the index name. Use DropAllAsync to drop all indexes.", "name"); + throw new ArgumentException($"Cannot specify '*' for the index name. Use {nameof(DropAll)} to drop all indexes.", nameof(name)); } var operation = CreateDropOneOperation(name, options); - _collection.ExecuteWriteOperation(session, operation, cancellationToken); + _collection.ExecuteWriteOperation(session, operation, options?.Timeout, cancellationToken); } - public override Task DropOneAsync(string name, CancellationToken cancellationToken = default(CancellationToken)) - { - return _collection.UsingImplicitSessionAsync(session => DropOneAsync(session, name, cancellationToken), cancellationToken); - } + public override Task DropOneAsync(string name, CancellationToken cancellationToken = default) + => DropOneAsync(name, null, cancellationToken); - public override Task DropOneAsync(string name, DropIndexOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override async Task DropOneAsync(string name, DropIndexOptions options, CancellationToken cancellationToken = default) { - return _collection.UsingImplicitSessionAsync(session => DropOneAsync(session, name, options, cancellationToken), cancellationToken); + using var session = _collection._operationExecutor.StartImplicitSession(); + await DropOneAsync(session, name, options, cancellationToken).ConfigureAwait(false); } - public override Task DropOneAsync(IClientSessionHandle session, string name, CancellationToken cancellationToken = default(CancellationToken)) - { - return DropOneAsync(session, name, null, cancellationToken); - } + public override Task DropOneAsync(IClientSessionHandle session, string name, CancellationToken cancellationToken = default) + => DropOneAsync(session, name, null, cancellationToken); - public override Task DropOneAsync( - IClientSessionHandle session, - string name, - DropIndexOptions options, - CancellationToken cancellationToken) + public override Task DropOneAsync(IClientSessionHandle session, string name, DropIndexOptions options, CancellationToken cancellationToken) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNullOrEmpty(name, nameof(name)); if (name == "*") { - throw new ArgumentException("Cannot specify '*' for the index name. Use DropAllAsync to drop all indexes.", "name"); + throw new ArgumentException($"Cannot specify '*' for the index name. Use {nameof(DropAllAsync)} to drop all indexes.", nameof(name)); } var operation = CreateDropOneOperation(name, options); - return _collection.ExecuteWriteOperationAsync(session, operation, cancellationToken); + return _collection.ExecuteWriteOperationAsync(session, operation, options?.Timeout, cancellationToken); } - public override IAsyncCursor<BsonDocument> List(CancellationToken cancellationToken = default(CancellationToken)) - { - return List(options: null, cancellationToken); - } + public override IAsyncCursor<BsonDocument> List(CancellationToken cancellationToken = default) + => List(options: null, cancellationToken); - public override IAsyncCursor<BsonDocument> List(ListIndexesOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override IAsyncCursor<BsonDocument> List(ListIndexesOptions options, CancellationToken cancellationToken = default) { - return _collection.UsingImplicitSession(session => List(session, options, cancellationToken), cancellationToken); + using var session = _collection._operationExecutor.StartImplicitSession(); + return List(session, options, cancellationToken); } public override IAsyncCursor<BsonDocument> List(IClientSessionHandle session, CancellationToken cancellationToken = default) - { - return List(session, options: null, cancellationToken); - } + => List(session, options: null, cancellationToken); - public override IAsyncCursor<BsonDocument> List(IClientSessionHandle session, ListIndexesOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override IAsyncCursor<BsonDocument> List(IClientSessionHandle session, ListIndexesOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); var operation = CreateListIndexesOperation(options); - return _collection.ExecuteReadOperation(session, operation, ReadPreference.Primary, cancellationToken); + return _collection.ExecuteReadOperation(session, operation, options?.Timeout, cancellationToken); } - public override Task<IAsyncCursor<BsonDocument>> ListAsync(CancellationToken cancellationToken = default(CancellationToken)) - { - return ListAsync(options: null, cancellationToken); - } + public override Task<IAsyncCursor<BsonDocument>> ListAsync(CancellationToken cancellationToken = default) + => ListAsync(options: null, cancellationToken); - public override Task<IAsyncCursor<BsonDocument>> ListAsync(ListIndexesOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override async Task<IAsyncCursor<BsonDocument>> ListAsync(ListIndexesOptions options, CancellationToken cancellationToken = default) { - return _collection.UsingImplicitSessionAsync(session => ListAsync(session, options, cancellationToken), cancellationToken); + using var session = _collection._operationExecutor.StartImplicitSession(); + return await ListAsync(session, options, cancellationToken).ConfigureAwait(false); } public override Task<IAsyncCursor<BsonDocument>> ListAsync(IClientSessionHandle session, CancellationToken cancellationToken = default) - { - return ListAsync(session, options: null, cancellationToken); - } + => ListAsync(session, options: null, cancellationToken); - public override Task<IAsyncCursor<BsonDocument>> ListAsync(IClientSessionHandle session, ListIndexesOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public override Task<IAsyncCursor<BsonDocument>> ListAsync(IClientSessionHandle session, ListIndexesOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); var operation = CreateListIndexesOperation(options); - return _collection.ExecuteReadOperationAsync(session, operation, ReadPreference.Primary, cancellationToken); + return _collection.ExecuteReadOperationAsync(session, operation, options?.Timeout, cancellationToken); } // private methods - private CreateIndexesOperation CreateCreateIndexesOperation(IEnumerable<CreateIndexRequest> requests, CreateManyIndexesOptions options) + private CreateIndexesOperation CreateCreateIndexesOperation(IEnumerable<CreateIndexModel<TDocument>> models, CreateManyIndexesOptions options) { + var requests = CreateCreateIndexRequests(models); + return new CreateIndexesOperation(_collection._collectionNamespace, requests, _collection._messageEncoderSettings) { Comment = options?.Comment, @@ -1760,8 +1575,9 @@ private CreateIndexesOperation CreateCreateIndexesOperation(IEnumerable<CreateIn }; } - private IEnumerable<CreateIndexRequest> CreateCreateIndexRequests(IEnumerable<CreateIndexModel<TDocument>> models, RenderArgs<TDocument> renderArgs) + private IEnumerable<CreateIndexRequest> CreateCreateIndexRequests(IEnumerable<CreateIndexModel<TDocument>> models) { + var renderArgs = _collection.GetRenderArgs(); return models.Select(m => { var options = m.Options ?? new CreateIndexOptions<TDocument>(); @@ -1841,20 +1657,20 @@ public MongoSearchIndexManager(MongoCollectionImpl<TDocument> collection) public IEnumerable<string> CreateMany(IEnumerable<CreateSearchIndexModel> models, CancellationToken cancellationToken = default) { + using var session = _collection._operationExecutor.StartImplicitSession(); var operation = CreateCreateIndexesOperation(models); - var result = _collection.UsingImplicitSession(session => _collection.ExecuteWriteOperation(session, operation, cancellationToken), cancellationToken); - var indexNames = GetIndexNames(result); - - return indexNames; + // TODO: CSOT: find a way to add timeout parameter to the interface method + var result = _collection.ExecuteWriteOperation(session, operation, null, cancellationToken); + return GetIndexNames(result); } public async Task<IEnumerable<string>> CreateManyAsync(IEnumerable<CreateSearchIndexModel> models, CancellationToken cancellationToken = default) { + using var session = _collection._operationExecutor.StartImplicitSession(); var operation = CreateCreateIndexesOperation(models); - var result = await _collection.UsingImplicitSessionAsync(session => _collection.ExecuteWriteOperationAsync(session, operation, cancellationToken), cancellationToken).ConfigureAwait(false); - var indexNames = GetIndexNames(result); - - return indexNames; + // TODO: CSOT: find a way to add timeout parameter to the interface method + var result = await _collection.ExecuteWriteOperationAsync(session, operation, null, cancellationToken).ConfigureAwait(false); + return GetIndexNames(result); } public string CreateOne(BsonDocument definition, string name = null, CancellationToken cancellationToken = default) => @@ -1877,14 +1693,18 @@ public async Task<string> CreateOneAsync(CreateSearchIndexModel model, Cancellat public void DropOne(string indexName, CancellationToken cancellationToken = default) { + using var session = _collection._operationExecutor.StartImplicitSession(); var operation = new DropSearchIndexOperation(_collection.CollectionNamespace, indexName, _collection._messageEncoderSettings); - _collection.UsingImplicitSession(session => _collection.ExecuteWriteOperation(session, operation, cancellationToken), cancellationToken); + // TODO: CSOT: find a way to add timeout parameter to the interface method + _collection.ExecuteWriteOperation(session, operation, null, cancellationToken); } - public Task DropOneAsync(string indexName, CancellationToken cancellationToken = default) + public async Task DropOneAsync(string indexName, CancellationToken cancellationToken = default) { + using var session = _collection._operationExecutor.StartImplicitSession(); var operation = new DropSearchIndexOperation(_collection.CollectionNamespace, indexName, _collection._messageEncoderSettings); - return _collection.UsingImplicitSessionAsync(session => _collection.ExecuteWriteOperationAsync(session, operation, cancellationToken), cancellationToken); + // TODO: CSOT: find a way to add timeout parameter to the interface method + await _collection.ExecuteWriteOperationAsync(session, operation, null, cancellationToken).ConfigureAwait(false); } public IAsyncCursor<BsonDocument> List(string indexName, AggregateOptions aggregateOptions = null, CancellationToken cancellationToken = default) @@ -1899,16 +1719,18 @@ public Task<IAsyncCursor<BsonDocument>> ListAsync(string indexName, AggregateOpt public void Update(string indexName, BsonDocument definition, CancellationToken cancellationToken = default) { + using var session = _collection._operationExecutor.StartImplicitSession(); var operation = new UpdateSearchIndexOperation(_collection.CollectionNamespace, indexName, definition, _collection._messageEncoderSettings); - - _collection.UsingImplicitSession(session => _collection.ExecuteWriteOperation(session, operation, cancellationToken), cancellationToken); + // TODO: CSOT: find a way to add timeout parameter to the interface method + _collection.ExecuteWriteOperation(session, operation, null, cancellationToken); } public async Task UpdateAsync(string indexName, BsonDocument definition, CancellationToken cancellationToken = default) { + using var session = _collection._operationExecutor.StartImplicitSession(); var operation = new UpdateSearchIndexOperation(_collection.CollectionNamespace, indexName, definition, _collection._messageEncoderSettings); - - await _collection.UsingImplicitSessionAsync(session => _collection.ExecuteWriteOperationAsync(session, operation, cancellationToken), cancellationToken).ConfigureAwait(false); + // TODO: CSOT: find a way to add timeout parameter to the interface method + await _collection.ExecuteWriteOperationAsync(session, operation, null, cancellationToken).ConfigureAwait(false); } // private methods diff --git a/src/MongoDB.Driver/MongoCollectionSettings.cs b/src/MongoDB.Driver/MongoCollectionSettings.cs index a8489008921..81691de6f09 100644 --- a/src/MongoDB.Driver/MongoCollectionSettings.cs +++ b/src/MongoDB.Driver/MongoCollectionSettings.cs @@ -16,7 +16,6 @@ using System; using System.Collections.Generic; using System.Text; -using MongoDB.Bson; using MongoDB.Bson.Serialization; using MongoDB.Driver.Core.Misc; @@ -32,6 +31,7 @@ public class MongoCollectionSettings private Setting<ReadConcern> _readConcern; private Setting<UTF8Encoding> _readEncoding; private Setting<ReadPreference> _readPreference; + private TimeSpan? _timeout; private Setting<WriteConcern> _writeConcern; private Setting<UTF8Encoding> _writeEncoding; @@ -121,6 +121,20 @@ public IBsonSerializerRegistry SerializerRegistry get { return BsonSerializer.SerializerRegistry; } } + /// <summary> + /// Gets or sets the per-operation timeout + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get { return _timeout; } + set + { + if (_isFrozen) { throw new InvalidOperationException("MongoCollectionSettings is frozen."); } + _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } + } + /// <summary> /// Gets or sets the WriteConcern to use. /// </summary> @@ -163,6 +177,7 @@ public virtual MongoCollectionSettings Clone() clone._readConcern = _readConcern.Clone(); clone._readEncoding = _readEncoding.Clone(); clone._readPreference = _readPreference.Clone(); + clone._timeout = _timeout; clone._writeConcern = _writeConcern.Clone(); clone._writeEncoding = _writeEncoding.Clone(); return clone; @@ -193,6 +208,7 @@ public override bool Equals(object obj) object.Equals(_readConcern.Value, rhs._readConcern.Value) && object.Equals(_readEncoding, rhs._readEncoding) && _readPreference.Value == rhs._readPreference.Value && + _timeout == rhs._timeout && _writeConcern.Value == rhs._writeConcern.Value && object.Equals(_writeEncoding, rhs._writeEncoding); } @@ -247,6 +263,7 @@ public override int GetHashCode() hash = 37 * hash + ((_readConcern.Value == null) ? 0 : _readConcern.Value.GetHashCode()); hash = 37 * hash + ((_readEncoding.Value == null) ? 0 : _readEncoding.Value.GetHashCode()); hash = 37 * hash + ((_readPreference.Value == null) ? 0 : _readPreference.Value.GetHashCode()); + hash = 37 * hash + _timeout?.GetHashCode() ?? 0; hash = 37 * hash + ((_writeConcern.Value == null) ? 0 : _writeConcern.Value.GetHashCode()); hash = 37 * hash + ((_writeEncoding.Value == null) ? 0 : _writeEncoding.Value.GetHashCode()); return hash; @@ -271,6 +288,10 @@ public override string ToString() parts.Add(string.Format("ReadEncoding={0}", (_readEncoding.Value == null) ? "null" : "UTF8Encoding")); } parts.Add(string.Format("ReadPreference={0}", _readPreference)); + if (_timeout.HasValue) + { + parts.Add(string.Format("Timeout={0}", _timeout)); + } parts.Add(string.Format("WriteConcern={0}", _writeConcern)); if (_writeEncoding.HasBeenSet) { @@ -298,6 +319,10 @@ internal void ApplyDefaultValues(MongoDatabaseSettings databaseSettings) { ReadPreference = databaseSettings.ReadPreference; } + if (!_timeout.HasValue) + { + Timeout = databaseSettings.Timeout; + } if (!_writeConcern.HasBeenSet) { WriteConcern = databaseSettings.WriteConcern; diff --git a/src/MongoDB.Driver/MongoDatabase.cs b/src/MongoDB.Driver/MongoDatabase.cs index a57eec55d94..31b872da989 100644 --- a/src/MongoDB.Driver/MongoDatabase.cs +++ b/src/MongoDB.Driver/MongoDatabase.cs @@ -23,8 +23,6 @@ using MongoDB.Bson.IO; using MongoDB.Bson.Serialization; using MongoDB.Bson.Serialization.Serializers; -using MongoDB.Driver.Core; -using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Clusters; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.Operations; @@ -58,131 +56,110 @@ public MongoDatabase(IMongoClient client, DatabaseNamespace databaseNamespace, M public MongoDatabaseSettings Settings => _settings; // public methods - public IAsyncCursor<TResult> Aggregate<TResult>(PipelineDefinition<NoPipelineInput, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public IAsyncCursor<TResult> Aggregate<TResult>(PipelineDefinition<NoPipelineInput, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default) { - return UsingImplicitSession(session => Aggregate(session, pipeline, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return Aggregate(session, pipeline, options, cancellationToken: cancellationToken); } - public IAsyncCursor<TResult> Aggregate<TResult>(IClientSessionHandle session, PipelineDefinition<NoPipelineInput, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public IAsyncCursor<TResult> Aggregate<TResult>(IClientSessionHandle session, PipelineDefinition<NoPipelineInput, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); - var renderArgs = GetRenderArgs(NoPipelineInputSerializer.Instance, options?.TranslationOptions); - var renderedPipeline = Ensure.IsNotNull(pipeline, nameof(pipeline)).Render(renderArgs); - options = options ?? new AggregateOptions(); + Ensure.IsNotNull(pipeline, nameof(pipeline)); + options ??= new AggregateOptions(); - var lastStage = renderedPipeline.Documents.LastOrDefault(); - var lastStageName = lastStage?.GetElement(0).Name; - if (lastStage != null && (lastStageName == "$out" || lastStageName == "$merge")) + var renderArgs = GetRenderArgs(NoPipelineInputSerializer.Instance, options.TranslationOptions); + var renderedPipeline = AggregateHelper.RenderAggregatePipeline(pipeline, renderArgs, out var isAggregateToCollection); + if (isAggregateToCollection) { var aggregateOperation = CreateAggregateToCollectionOperation(renderedPipeline, options); - ExecuteWriteOperation(session, aggregateOperation, cancellationToken); - - // we want to delay execution of the find because the user may - // not want to iterate the results at all... - var findOperation = CreateAggregateToCollectionFindOperation(lastStage, renderedPipeline.OutputSerializer, options); - var forkedSession = session.Fork(); - var deferredCursor = new DeferredAsyncCursor<TResult>( - () => forkedSession.Dispose(), - ct => ExecuteReadOperation(forkedSession, findOperation, ReadPreference.Primary, ct), - ct => ExecuteReadOperationAsync(forkedSession, findOperation, ReadPreference.Primary, ct)); - return deferredCursor; + ExecuteWriteOperation(session, aggregateOperation, options.Timeout, cancellationToken); + return CreateAggregateToCollectionResultCursor(session, renderedPipeline, options); } else { var aggregateOperation = CreateAggregateOperation(renderedPipeline, options); - return ExecuteReadOperation(session, aggregateOperation, cancellationToken); + return ExecuteReadOperation(session, aggregateOperation, options.Timeout, cancellationToken); } } - public Task<IAsyncCursor<TResult>> AggregateAsync<TResult>(PipelineDefinition<NoPipelineInput, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public async Task<IAsyncCursor<TResult>> AggregateAsync<TResult>(PipelineDefinition<NoPipelineInput, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => AggregateAsync(session, pipeline, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await AggregateAsync(session, pipeline, options, cancellationToken).ConfigureAwait(false); } - public async Task<IAsyncCursor<TResult>> AggregateAsync<TResult>(IClientSessionHandle session, PipelineDefinition<NoPipelineInput, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public async Task<IAsyncCursor<TResult>> AggregateAsync<TResult>(IClientSessionHandle session, PipelineDefinition<NoPipelineInput, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); - var renderArgs = GetRenderArgs(NoPipelineInputSerializer.Instance, options?.TranslationOptions); - var renderedPipeline = Ensure.IsNotNull(pipeline, nameof(pipeline)).Render(renderArgs); - options = options ?? new AggregateOptions(); + Ensure.IsNotNull(pipeline, nameof(pipeline)); + options ??= new AggregateOptions(); - var lastStage = renderedPipeline.Documents.LastOrDefault(); - var lastStageName = lastStage?.GetElement(0).Name; - if (lastStage != null && (lastStageName == "$out" || lastStageName == "$merge")) + var renderArgs = GetRenderArgs(NoPipelineInputSerializer.Instance, options.TranslationOptions); + var renderedPipeline = AggregateHelper.RenderAggregatePipeline(pipeline, renderArgs, out var isAggregateToCollection); + if (isAggregateToCollection) { var aggregateOperation = CreateAggregateToCollectionOperation(renderedPipeline, options); - await ExecuteWriteOperationAsync(session, aggregateOperation, cancellationToken).ConfigureAwait(false); - - // we want to delay execution of the find because the user may - // not want to iterate the results at all... - var findOperation = CreateAggregateToCollectionFindOperation(lastStage, renderedPipeline.OutputSerializer, options); - var forkedSession = session.Fork(); - var deferredCursor = new DeferredAsyncCursor<TResult>( - () => forkedSession.Dispose(), - ct => ExecuteReadOperation(forkedSession, findOperation, ReadPreference.Primary, ct), - ct => ExecuteReadOperationAsync(forkedSession, findOperation, ReadPreference.Primary, ct)); - return await Task.FromResult<IAsyncCursor<TResult>>(deferredCursor).ConfigureAwait(false); + await ExecuteWriteOperationAsync(session, aggregateOperation, options.Timeout, cancellationToken).ConfigureAwait(false); + return CreateAggregateToCollectionResultCursor(session, renderedPipeline, options); } else { var aggregateOperation = CreateAggregateOperation(renderedPipeline, options); - return await ExecuteReadOperationAsync(session, aggregateOperation, cancellationToken).ConfigureAwait(false); + return await ExecuteReadOperationAsync(session, aggregateOperation, options.Timeout, cancellationToken).ConfigureAwait(false); } } - public void AggregateToCollection<TResult>(PipelineDefinition<NoPipelineInput, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public void AggregateToCollection<TResult>(PipelineDefinition<NoPipelineInput, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default) { - UsingImplicitSession(session => AggregateToCollection(session, pipeline, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + AggregateToCollection(session, pipeline, options, cancellationToken); } - public void AggregateToCollection<TResult>(IClientSessionHandle session, PipelineDefinition<NoPipelineInput, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public void AggregateToCollection<TResult>(IClientSessionHandle session, PipelineDefinition<NoPipelineInput, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); - var renderArgs = GetRenderArgs(NoPipelineInputSerializer.Instance, options?.TranslationOptions); - var renderedPipeline = Ensure.IsNotNull(pipeline, nameof(pipeline)).Render(renderArgs); - options = options ?? new AggregateOptions(); + Ensure.IsNotNull(pipeline, nameof(pipeline)); + options ??= new AggregateOptions(); - var lastStage = renderedPipeline.Documents.LastOrDefault(); - var lastStageName = lastStage?.GetElement(0).Name; - if (lastStage == null || (lastStageName != "$out" && lastStageName != "$merge")) + var renderArgs = GetRenderArgs(NoPipelineInputSerializer.Instance, options.TranslationOptions); + var renderedPipeline = AggregateHelper.RenderAggregatePipeline(pipeline, renderArgs, out var isAggregateToCollection); + if (!isAggregateToCollection) { throw new InvalidOperationException("AggregateToCollection requires that the last stage be $out or $merge."); } - else - { - var aggregateOperation = CreateAggregateToCollectionOperation(renderedPipeline, options); - ExecuteWriteOperation(session, aggregateOperation, cancellationToken); - } + + var aggregateOperation = CreateAggregateToCollectionOperation(renderedPipeline, options); + ExecuteWriteOperation(session, aggregateOperation, options.Timeout, cancellationToken); } - public Task AggregateToCollectionAsync<TResult>(PipelineDefinition<NoPipelineInput, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public async Task AggregateToCollectionAsync<TResult>(PipelineDefinition<NoPipelineInput, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => AggregateToCollectionAsync(session, pipeline, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + await AggregateToCollectionAsync(session, pipeline, options, cancellationToken).ConfigureAwait(false); } - public async Task AggregateToCollectionAsync<TResult>(IClientSessionHandle session, PipelineDefinition<NoPipelineInput, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default(CancellationToken)) + public Task AggregateToCollectionAsync<TResult>(IClientSessionHandle session, PipelineDefinition<NoPipelineInput, TResult> pipeline, AggregateOptions options, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); - var renderArgs = GetRenderArgs(NoPipelineInputSerializer.Instance, options?.TranslationOptions); - var renderedPipeline = Ensure.IsNotNull(pipeline, nameof(pipeline)).Render(renderArgs); - options = options ?? new AggregateOptions(); + Ensure.IsNotNull(pipeline, nameof(pipeline)); + options ??= new AggregateOptions(); - var lastStage = renderedPipeline.Documents.LastOrDefault(); - var lastStageName = lastStage?.GetElement(0).Name; - if (lastStage == null || (lastStageName != "$out" && lastStageName != "$merge")) + var renderArgs = GetRenderArgs(NoPipelineInputSerializer.Instance, options.TranslationOptions); + var renderedPipeline = AggregateHelper.RenderAggregatePipeline(pipeline, renderArgs, out var isAggregateToCollection); + if (!isAggregateToCollection) { throw new InvalidOperationException("AggregateToCollectionAsync requires that the last stage be $out or $merge."); } - else - { - var aggregateOperation = CreateAggregateToCollectionOperation(renderedPipeline, options); - await ExecuteWriteOperationAsync(session, aggregateOperation, cancellationToken).ConfigureAwait(false); - } + + var aggregateOperation = CreateAggregateToCollectionOperation(renderedPipeline, options); + return ExecuteWriteOperationAsync(session, aggregateOperation, options.Timeout, cancellationToken); } public void CreateCollection(string name, CreateCollectionOptions options, CancellationToken cancellationToken) { - UsingImplicitSession(session => CreateCollection(session, name, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + CreateCollection(session, name, options, cancellationToken); } public void CreateCollection(IClientSessionHandle session, string name, CreateCollectionOptions options, CancellationToken cancellationToken) @@ -203,7 +180,7 @@ public void CreateCollection(IClientSessionHandle session, string name, CreateCo return; } - var genericMethodDefinition = typeof(MongoDatabase).GetTypeInfo().GetMethod("CreateCollectionHelper", BindingFlags.NonPublic | BindingFlags.Instance); + var genericMethodDefinition = typeof(MongoDatabase).GetTypeInfo().GetMethod(nameof(CreateCollectionHelper), BindingFlags.NonPublic | BindingFlags.Instance); var documentType = options.GetType().GetTypeInfo().GetGenericArguments()[0]; var methodInfo = genericMethodDefinition.MakeGenericMethod(documentType); try @@ -216,9 +193,10 @@ public void CreateCollection(IClientSessionHandle session, string name, CreateCo } } - public Task CreateCollectionAsync(string name, CreateCollectionOptions options, CancellationToken cancellationToken) + public async Task CreateCollectionAsync(string name, CreateCollectionOptions options, CancellationToken cancellationToken) { - return UsingImplicitSessionAsync(session => CreateCollectionAsync(session, name, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + await CreateCollectionAsync(session, name, options, cancellationToken).ConfigureAwait(false); } public async Task CreateCollectionAsync(IClientSessionHandle session, string name, CreateCollectionOptions options, CancellationToken cancellationToken) @@ -239,7 +217,7 @@ public async Task CreateCollectionAsync(IClientSessionHandle session, string nam return; } - var genericMethodDefinition = typeof(MongoDatabase).GetTypeInfo().GetMethod("CreateCollectionHelperAsync", BindingFlags.NonPublic | BindingFlags.Instance); + var genericMethodDefinition = typeof(MongoDatabase).GetTypeInfo().GetMethod(nameof(CreateCollectionHelperAsync), BindingFlags.NonPublic | BindingFlags.Instance); var documentType = options.GetType().GetTypeInfo().GetGenericArguments()[0]; var methodInfo = genericMethodDefinition.MakeGenericMethod(documentType); try @@ -252,38 +230,38 @@ public async Task CreateCollectionAsync(IClientSessionHandle session, string nam } } - public void CreateView<TDocument, TResult>(string viewName, string viewOn, PipelineDefinition<TDocument, TResult> pipeline, CreateViewOptions<TDocument> options = null, CancellationToken cancellationToken = default(CancellationToken)) + public void CreateView<TDocument, TResult>(string viewName, string viewOn, PipelineDefinition<TDocument, TResult> pipeline, CreateViewOptions<TDocument> options = null, CancellationToken cancellationToken = default) { - UsingImplicitSession(session => CreateView(session, viewName, viewOn, pipeline, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + CreateView(session, viewName, viewOn, pipeline, options, cancellationToken); } - public void CreateView<TDocument, TResult>(IClientSessionHandle session, string viewName, string viewOn, PipelineDefinition<TDocument, TResult> pipeline, CreateViewOptions<TDocument> options = null, CancellationToken cancellationToken = default(CancellationToken)) + public void CreateView<TDocument, TResult>(IClientSessionHandle session, string viewName, string viewOn, PipelineDefinition<TDocument, TResult> pipeline, CreateViewOptions<TDocument> options = null, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(viewName, nameof(viewName)); Ensure.IsNotNull(viewOn, nameof(viewOn)); Ensure.IsNotNull(pipeline, nameof(pipeline)); - options = options ?? new CreateViewOptions<TDocument>(); - var translationOptions = _client.Settings.TranslationOptions; - var operation = CreateCreateViewOperation(viewName, viewOn, pipeline, options, translationOptions); - ExecuteWriteOperation(session, operation, cancellationToken); + + var operation = CreateCreateViewOperation(viewName, viewOn, pipeline, options); + ExecuteWriteOperation(session, operation, options?.Timeout, cancellationToken); } - public Task CreateViewAsync<TDocument, TResult>(string viewName, string viewOn, PipelineDefinition<TDocument, TResult> pipeline, CreateViewOptions<TDocument> options = null, CancellationToken cancellationToken = default(CancellationToken)) + public async Task CreateViewAsync<TDocument, TResult>(string viewName, string viewOn, PipelineDefinition<TDocument, TResult> pipeline, CreateViewOptions<TDocument> options = null, CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => CreateViewAsync(session, viewName, viewOn, pipeline, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + await CreateViewAsync(session, viewName, viewOn, pipeline, options, cancellationToken).ConfigureAwait(false); } - public Task CreateViewAsync<TDocument, TResult>(IClientSessionHandle session, string viewName, string viewOn, PipelineDefinition<TDocument, TResult> pipeline, CreateViewOptions<TDocument> options = null, CancellationToken cancellationToken = default(CancellationToken)) + public Task CreateViewAsync<TDocument, TResult>(IClientSessionHandle session, string viewName, string viewOn, PipelineDefinition<TDocument, TResult> pipeline, CreateViewOptions<TDocument> options = null, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(viewName, nameof(viewName)); Ensure.IsNotNull(viewOn, nameof(viewOn)); Ensure.IsNotNull(pipeline, nameof(pipeline)); - options = options ?? new CreateViewOptions<TDocument>(); - var translationOptions = _client.Settings.TranslationOptions; - var operation = CreateCreateViewOperation(viewName, viewOn, pipeline, options, translationOptions); - return ExecuteWriteOperationAsync(session, operation, cancellationToken); + + var operation = CreateCreateViewOperation(viewName, viewOn, pipeline, options); + return ExecuteWriteOperationAsync(session, operation, options?.Timeout, cancellationToken); } public void DropCollection(string name, CancellationToken cancellationToken) @@ -293,7 +271,8 @@ public void DropCollection(string name, CancellationToken cancellationToken) public void DropCollection(string name, DropCollectionOptions options, CancellationToken cancellationToken = default) { - UsingImplicitSession(session => DropCollection(session, name, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + DropCollection(session, name, options, cancellationToken); } public void DropCollection(IClientSessionHandle session, string name, CancellationToken cancellationToken) @@ -305,31 +284,34 @@ public void DropCollection(IClientSessionHandle session, string name, DropCollec { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNullOrEmpty(name, nameof(name)); - var operation = CreateDropCollectionOperation(name, options, session, cancellationToken); - ExecuteWriteOperation(session, operation, cancellationToken); + + var collectionNamespace = new CollectionNamespace(_databaseNamespace, name); + var encryptedFields = GetEffectiveEncryptedFields(session, collectionNamespace, options, cancellationToken); + var operation = CreateDropCollectionOperation(collectionNamespace, encryptedFields); + ExecuteWriteOperation(session, operation, options?.Timeout, cancellationToken); } public Task DropCollectionAsync(string name, CancellationToken cancellationToken) - { - return DropCollectionAsync(name, options: null, cancellationToken); - } + => DropCollectionAsync(name, options: null, cancellationToken); - public Task DropCollectionAsync(string name, DropCollectionOptions options, CancellationToken cancellationToken) + public async Task DropCollectionAsync(string name, DropCollectionOptions options, CancellationToken cancellationToken) { - return UsingImplicitSessionAsync(session => DropCollectionAsync(session, name, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + await DropCollectionAsync(session, name, options, cancellationToken).ConfigureAwait(false); } public Task DropCollectionAsync(IClientSessionHandle session, string name, CancellationToken cancellationToken) - { - return DropCollectionAsync(session, name, options: null, cancellationToken); - } + => DropCollectionAsync(session, name, options: null, cancellationToken); public async Task DropCollectionAsync(IClientSessionHandle session, string name, DropCollectionOptions options, CancellationToken cancellationToken) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNullOrEmpty(name, nameof(name)); - var operation = await CreateDropCollectionOperationAsync(name, options, session, cancellationToken).ConfigureAwait(false); - await ExecuteWriteOperationAsync(session, operation, cancellationToken).ConfigureAwait(false); + + var collectionNamespace = new CollectionNamespace(_databaseNamespace, name); + var encryptedFields = await GetEffectiveEncryptedFieldsAsync(session, collectionNamespace, options, cancellationToken).ConfigureAwait(false); + var operation = CreateDropCollectionOperation(collectionNamespace, encryptedFields); + await ExecuteWriteOperationAsync(session, operation, options?.Timeout, cancellationToken).ConfigureAwait(false); } public IMongoCollection<TDocument> GetCollection<TDocument>(string name, MongoCollectionSettings settings) @@ -345,67 +327,68 @@ public IMongoCollection<TDocument> GetCollection<TDocument>(string name, MongoCo return new MongoCollectionImpl<TDocument>(this, new CollectionNamespace(_databaseNamespace, name), settings, _cluster, _operationExecutor); } - public IAsyncCursor<string> ListCollectionNames(ListCollectionNamesOptions options = null, CancellationToken cancellationToken = default(CancellationToken)) + public IAsyncCursor<string> ListCollectionNames(ListCollectionNamesOptions options = null, CancellationToken cancellationToken = default) { - return UsingImplicitSession(session => ListCollectionNames(session, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return ListCollectionNames(session, options, cancellationToken); } - public IAsyncCursor<string> ListCollectionNames(IClientSessionHandle session, ListCollectionNamesOptions options = null, CancellationToken cancellationToken = default(CancellationToken)) + public IAsyncCursor<string> ListCollectionNames(IClientSessionHandle session, ListCollectionNamesOptions options = null, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); - var renderArgs = GetRenderArgs(BsonDocumentSerializer.Instance); - var operation = CreateListCollectionNamesOperation(options, renderArgs); - var effectiveReadPreference = ReadPreferenceResolver.GetEffectiveReadPreference(session, null, ReadPreference.Primary); - var cursor = ExecuteReadOperation(session, operation, effectiveReadPreference, cancellationToken); + var operation = CreateListCollectionNamesOperation(options); + var readPreference = session.GetEffectiveReadPreference(ReadPreference.Primary); + var cursor = ExecuteReadOperation(session, operation, readPreference, options?.Timeout, cancellationToken); return new BatchTransformingAsyncCursor<BsonDocument, string>(cursor, ExtractCollectionNames); } - public Task<IAsyncCursor<string>> ListCollectionNamesAsync(ListCollectionNamesOptions options = null, CancellationToken cancellationToken = default(CancellationToken)) + public async Task<IAsyncCursor<string>> ListCollectionNamesAsync(ListCollectionNamesOptions options = null, CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => ListCollectionNamesAsync(session, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await ListCollectionNamesAsync(session, options, cancellationToken).ConfigureAwait(false); } - public async Task<IAsyncCursor<string>> ListCollectionNamesAsync(IClientSessionHandle session, ListCollectionNamesOptions options = null, CancellationToken cancellationToken = default(CancellationToken)) + public async Task<IAsyncCursor<string>> ListCollectionNamesAsync(IClientSessionHandle session, ListCollectionNamesOptions options = null, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); - var renderArgs = GetRenderArgs(BsonDocumentSerializer.Instance); - var operation = CreateListCollectionNamesOperation(options, renderArgs); - var effectiveReadPreference = ReadPreferenceResolver.GetEffectiveReadPreference(session, null, ReadPreference.Primary); - var cursor = await ExecuteReadOperationAsync(session, operation, effectiveReadPreference, cancellationToken).ConfigureAwait(false); + var operation = CreateListCollectionNamesOperation(options); + var readPreference = session.GetEffectiveReadPreference(ReadPreference.Primary); + var cursor = await ExecuteReadOperationAsync(session, operation, readPreference, options?.Timeout, cancellationToken).ConfigureAwait(false); return new BatchTransformingAsyncCursor<BsonDocument, string>(cursor, ExtractCollectionNames); } public IAsyncCursor<BsonDocument> ListCollections(ListCollectionsOptions options, CancellationToken cancellationToken) { - return UsingImplicitSession(session => ListCollections(session, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return ListCollections(session, options, cancellationToken); } public IAsyncCursor<BsonDocument> ListCollections(IClientSessionHandle session, ListCollectionsOptions options, CancellationToken cancellationToken) { Ensure.IsNotNull(session, nameof(session)); - var renderArgs = GetRenderArgs(BsonDocumentSerializer.Instance); - var operation = CreateListCollectionsOperation(options, renderArgs); - var effectiveReadPreference = ReadPreferenceResolver.GetEffectiveReadPreference(session, null, ReadPreference.Primary); - return ExecuteReadOperation(session, operation, effectiveReadPreference, cancellationToken); + var operation = CreateListCollectionsOperation(options); + var readPreference = session.GetEffectiveReadPreference(ReadPreference.Primary); + return ExecuteReadOperation(session, operation, readPreference, options?.Timeout, cancellationToken); } - public Task<IAsyncCursor<BsonDocument>> ListCollectionsAsync(ListCollectionsOptions options, CancellationToken cancellationToken) + public async Task<IAsyncCursor<BsonDocument>> ListCollectionsAsync(ListCollectionsOptions options, CancellationToken cancellationToken) { - return UsingImplicitSessionAsync(session => ListCollectionsAsync(session, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await ListCollectionsAsync(session, options, cancellationToken).ConfigureAwait(false); } public Task<IAsyncCursor<BsonDocument>> ListCollectionsAsync(IClientSessionHandle session, ListCollectionsOptions options, CancellationToken cancellationToken) { Ensure.IsNotNull(session, nameof(session)); - var renderArgs = GetRenderArgs(BsonDocumentSerializer.Instance); - var operation = CreateListCollectionsOperation(options, renderArgs); - var effectiveReadPreference = ReadPreferenceResolver.GetEffectiveReadPreference(session, null, ReadPreference.Primary); - return ExecuteReadOperationAsync(session, operation, effectiveReadPreference, cancellationToken); + var operation = CreateListCollectionsOperation(options); + var readPreference = session.GetEffectiveReadPreference(ReadPreference.Primary); + return ExecuteReadOperationAsync(session, operation, readPreference, options?.Timeout, cancellationToken); } public void RenameCollection(string oldName, string newName, RenameCollectionOptions options, CancellationToken cancellationToken) { - UsingImplicitSession(session => RenameCollection(session, oldName, newName, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + RenameCollection(session, oldName, newName, options, cancellationToken); } public void RenameCollection(IClientSessionHandle session, string oldName, string newName, RenameCollectionOptions options, CancellationToken cancellationToken) @@ -413,15 +396,15 @@ public void RenameCollection(IClientSessionHandle session, string oldName, strin Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNullOrEmpty(oldName, nameof(oldName)); Ensure.IsNotNullOrEmpty(newName, nameof(newName)); - options = options ?? new RenameCollectionOptions(); var operation = CreateRenameCollectionOperation(oldName, newName, options); - ExecuteWriteOperation(session, operation, cancellationToken); + ExecuteWriteOperation(session, operation, options?.Timeout, cancellationToken); } - public Task RenameCollectionAsync(string oldName, string newName, RenameCollectionOptions options, CancellationToken cancellationToken) + public async Task RenameCollectionAsync(string oldName, string newName, RenameCollectionOptions options, CancellationToken cancellationToken) { - return UsingImplicitSessionAsync(session => RenameCollectionAsync(session, oldName, newName, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + await RenameCollectionAsync(session, oldName, newName, options, cancellationToken).ConfigureAwait(false); } public Task RenameCollectionAsync(IClientSessionHandle session, string oldName, string newName, RenameCollectionOptions options, CancellationToken cancellationToken) @@ -429,82 +412,97 @@ public Task RenameCollectionAsync(IClientSessionHandle session, string oldName, Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNullOrEmpty(oldName, nameof(oldName)); Ensure.IsNotNullOrEmpty(newName, nameof(newName)); - options = options ?? new RenameCollectionOptions(); var operation = CreateRenameCollectionOperation(oldName, newName, options); - return ExecuteWriteOperationAsync(session, operation, cancellationToken); + return ExecuteWriteOperationAsync(session, operation, options?.Timeout, cancellationToken); } - public TResult RunCommand<TResult>(Command<TResult> command, ReadPreference readPreference = null, CancellationToken cancellationToken = default(CancellationToken)) + public TResult RunCommand<TResult>(Command<TResult> command, ReadPreference readPreference = null, CancellationToken cancellationToken = default) { - return UsingImplicitSession(session => RunCommand(session, command, readPreference, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return RunCommand(session, command, readPreference, cancellationToken); } - public TResult RunCommand<TResult>(IClientSessionHandle session, Command<TResult> command, ReadPreference readPreference = null, CancellationToken cancellationToken = default(CancellationToken)) + public TResult RunCommand<TResult>(IClientSessionHandle session, Command<TResult> command, ReadPreference readPreference = null, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(command, nameof(command)); var operation = CreateRunCommandOperation(command); - var effectiveReadPreference = ReadPreferenceResolver.GetEffectiveReadPreference(session, readPreference, ReadPreference.Primary); - return ExecuteReadOperation(session, operation, effectiveReadPreference, cancellationToken); + var effectiveReadPreference = readPreference; + if (readPreference == null) + { + effectiveReadPreference = session.GetEffectiveReadPreference(ReadPreference.Primary); + } + + // TODO: CSOT: See what run command should do with timeout + return ExecuteReadOperation(session, operation, effectiveReadPreference, null, cancellationToken); } - public Task<TResult> RunCommandAsync<TResult>(Command<TResult> command, ReadPreference readPreference = null, CancellationToken cancellationToken = default(CancellationToken)) + public async Task<TResult> RunCommandAsync<TResult>(Command<TResult> command, ReadPreference readPreference = null, CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => RunCommandAsync(session, command, readPreference, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await RunCommandAsync(session, command, readPreference, cancellationToken).ConfigureAwait(false); } - public Task<TResult> RunCommandAsync<TResult>(IClientSessionHandle session, Command<TResult> command, ReadPreference readPreference = null, CancellationToken cancellationToken = default(CancellationToken)) + public Task<TResult> RunCommandAsync<TResult>(IClientSessionHandle session, Command<TResult> command, ReadPreference readPreference = null, CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(command, nameof(command)); var operation = CreateRunCommandOperation(command); - var effectiveReadPreference = ReadPreferenceResolver.GetEffectiveReadPreference(session, readPreference, ReadPreference.Primary); - return ExecuteReadOperationAsync(session, operation, effectiveReadPreference, cancellationToken); + var effectiveReadPreference = readPreference; + if (readPreference == null) + { + effectiveReadPreference = session.GetEffectiveReadPreference(ReadPreference.Primary); + } + + // TODO: CSOT: See what run command should do with timeout + return ExecuteReadOperationAsync(session, operation, effectiveReadPreference, null, cancellationToken); } public IChangeStreamCursor<TResult> Watch<TResult>( PipelineDefinition<ChangeStreamDocument<BsonDocument>, TResult> pipeline, ChangeStreamOptions options = null, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { - return UsingImplicitSession(session => Watch(session, pipeline, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return Watch(session, pipeline, options, cancellationToken); } public IChangeStreamCursor<TResult> Watch<TResult>( IClientSessionHandle session, PipelineDefinition<ChangeStreamDocument<BsonDocument>, TResult> pipeline, ChangeStreamOptions options = null, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(pipeline, nameof(pipeline)); - var translationOptions = _client.Settings.TranslationOptions; - var operation = CreateChangeStreamOperation(pipeline, options, translationOptions); - return ExecuteReadOperation(session, operation, cancellationToken); + + var operation = CreateChangeStreamOperation(pipeline, options); + return ExecuteReadOperation(session, operation, options?.Timeout, cancellationToken); } - public Task<IChangeStreamCursor<TResult>> WatchAsync<TResult>( + public async Task<IChangeStreamCursor<TResult>> WatchAsync<TResult>( PipelineDefinition<ChangeStreamDocument<BsonDocument>, TResult> pipeline, ChangeStreamOptions options = null, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { - return UsingImplicitSessionAsync(session => WatchAsync(session, pipeline, options, cancellationToken), cancellationToken); + using var session = _operationExecutor.StartImplicitSession(); + return await WatchAsync(session, pipeline, options, cancellationToken).ConfigureAwait(false); } public Task<IChangeStreamCursor<TResult>> WatchAsync<TResult>( IClientSessionHandle session, PipelineDefinition<ChangeStreamDocument<BsonDocument>, TResult> pipeline, ChangeStreamOptions options = null, - CancellationToken cancellationToken = default(CancellationToken)) + CancellationToken cancellationToken = default) { Ensure.IsNotNull(session, nameof(session)); Ensure.IsNotNull(pipeline, nameof(pipeline)); - var translationOptions = _client.Settings.TranslationOptions; - var operation = CreateChangeStreamOperation(pipeline, options, translationOptions); - return ExecuteReadOperationAsync(session, operation, cancellationToken); + + var operation = CreateChangeStreamOperation(pipeline, options); + return ExecuteReadOperationAsync(session, operation, options?.Timeout, cancellationToken); } public IMongoDatabase WithReadConcern(ReadConcern readConcern) @@ -532,6 +530,7 @@ public IMongoDatabase WithWriteConcern(WriteConcern writeConcern) } // private methods + private AggregateOperation<TResult> CreateAggregateOperation<TResult>(RenderedPipelineDefinition<TResult> renderedPipeline, AggregateOptions options) { var messageEncoderSettings = GetMessageEncoderSettings(); @@ -557,59 +556,16 @@ private AggregateOperation<TResult> CreateAggregateOperation<TResult>(RenderedPi }; } - private FindOperation<TResult> CreateAggregateToCollectionFindOperation<TResult>(BsonDocument outStage, IBsonSerializer<TResult> resultSerializer, AggregateOptions options) + private IAsyncCursor<TResult> CreateAggregateToCollectionResultCursor<TResult>(IClientSessionHandle session, RenderedPipelineDefinition<TResult> pipeline, AggregateOptions options) { - CollectionNamespace outputCollectionNamespace; - var stageName = outStage.GetElement(0).Name; - switch (stageName) - { - case "$out": - { - var outValue = outStage[0]; - DatabaseNamespace outputDatabaseNamespace; - string outputCollectionName; - if (outValue.IsString) - { - outputDatabaseNamespace = _databaseNamespace; - outputCollectionName = outValue.AsString; - } - else - { - outputDatabaseNamespace = new DatabaseNamespace(outValue["db"].AsString); - outputCollectionName = outValue["coll"].AsString; - } - outputCollectionNamespace = new CollectionNamespace(outputDatabaseNamespace, outputCollectionName); - } - break; - case "$merge": - { - var mergeArguments = outStage[0].AsBsonDocument; - DatabaseNamespace outputDatabaseNamespace; - string outputCollectionName; - var into = mergeArguments["into"]; - if (into.IsString) - { - outputDatabaseNamespace = _databaseNamespace; - outputCollectionName = into.AsString; - } - else - { - outputDatabaseNamespace = new DatabaseNamespace(into["db"].AsString); - outputCollectionName = into["coll"].AsString; - } - outputCollectionNamespace = new CollectionNamespace(outputDatabaseNamespace, outputCollectionName); - } - break; - default: - throw new ArgumentException($"Unexpected stage name: {stageName}."); - } + var outputCollectionNamespace = AggregateHelper.GetOutCollection(pipeline.Documents.Last(), _databaseNamespace); // because auto encryption is not supported for non-collection commands. // So, an error will be thrown in the previous CreateAggregateToCollectionOperation step. // However, since we've added encryption configuration for CreateAggregateToCollectionOperation operation, // it's not superfluous to also add it here var messageEncoderSettings = GetMessageEncoderSettings(); - return new FindOperation<TResult>(outputCollectionNamespace, resultSerializer, messageEncoderSettings) + var findOperation = new FindOperation<TResult>(outputCollectionNamespace, pipeline.OutputSerializer, messageEncoderSettings) { BatchSize = options.BatchSize, Collation = options.Collation, @@ -617,6 +573,15 @@ private FindOperation<TResult> CreateAggregateToCollectionFindOperation<TResult> ReadConcern = _settings.ReadConcern, RetryRequested = _client.Settings.RetryReads }; + + // we want to delay execution of the find because the user may + // not want to iterate the results at all... + var forkedSession = session.Fork(); + var deferredCursor = new DeferredAsyncCursor<TResult>( + () => forkedSession.Dispose(), + ct => ExecuteReadOperation(forkedSession, findOperation, ReadPreference.Primary, options.Timeout, ct), + ct => ExecuteReadOperationAsync(forkedSession, findOperation, ReadPreference.Primary, options.Timeout, ct)); + return deferredCursor; } private AggregateToCollectionOperation CreateAggregateToCollectionOperation<TResult>(RenderedPipelineDefinition<TResult> renderedPipeline, AggregateOptions options) @@ -642,24 +607,20 @@ private AggregateToCollectionOperation CreateAggregateToCollectionOperation<TRes private void CreateCollectionHelper<TDocument>(IClientSessionHandle session, string name, CreateCollectionOptions<TDocument> options, CancellationToken cancellationToken) { - options = options ?? new CreateCollectionOptions<TDocument>(); - - var translationOptions = _client.Settings.TranslationOptions; - var operation = CreateCreateCollectionOperation(name, options, translationOptions); - ExecuteWriteOperation(session, operation, cancellationToken); + var operation = CreateCreateCollectionOperation(name, options); + ExecuteWriteOperation(session, operation, options?.Timeout, cancellationToken); } private Task CreateCollectionHelperAsync<TDocument>(IClientSessionHandle session, string name, CreateCollectionOptions<TDocument> options, CancellationToken cancellationToken) { - options = options ?? new CreateCollectionOptions<TDocument>(); - - var translationOptions = _client.Settings.TranslationOptions; - var operation = CreateCreateCollectionOperation(name, options, translationOptions); - return ExecuteWriteOperationAsync(session, operation, cancellationToken); + var operation = CreateCreateCollectionOperation(name, options); + return ExecuteWriteOperationAsync(session, operation, options?.Timeout, cancellationToken); } - private IWriteOperation<BsonDocument> CreateCreateCollectionOperation<TDocument>(string name, CreateCollectionOptions<TDocument> options, ExpressionTranslationOptions translationOptions) + private IWriteOperation<BsonDocument> CreateCreateCollectionOperation<TDocument>(string name, CreateCollectionOptions<TDocument> options) { + options ??= new CreateCollectionOptions<TDocument>(); + var translationOptions = _client.Settings.TranslationOptions; var serializerRegistry = options.SerializerRegistry ?? BsonSerializer.SerializerRegistry; var documentSerializer = options.DocumentSerializer ?? serializerRegistry.GetSerializer<TDocument>(); @@ -685,10 +646,8 @@ private IWriteOperation<BsonDocument> CreateCreateCollectionOperation<TDocument> cco.IndexOptionDefaults = options.IndexOptionDefaults?.ToBsonDocument(); cco.MaxDocuments = options.MaxDocuments; cco.MaxSize = options.MaxSize; - cco.NoPadding = options.NoPadding; cco.StorageEngine = options.StorageEngine; cco.TimeSeriesOptions = options.TimeSeriesOptions; - cco.UsePowerOf2Sizes = options.UsePowerOf2Sizes; cco.ValidationAction = options.ValidationAction; cco.ValidationLevel = options.ValidationLevel; cco.Validator = validator; @@ -700,9 +659,11 @@ private CreateViewOperation CreateCreateViewOperation<TDocument, TResult>( string viewName, string viewOn, PipelineDefinition<TDocument, TResult> pipeline, - CreateViewOptions<TDocument> options, - ExpressionTranslationOptions translationOptions) + CreateViewOptions<TDocument> options) { + options ??= new CreateViewOptions<TDocument>(); + + var translationOptions = _client.Settings.TranslationOptions; var serializerRegistry = options.SerializerRegistry ?? BsonSerializer.SerializerRegistry; var documentSerializer = options.DocumentSerializer ?? serializerRegistry.GetSerializer<TDocument>(); var pipelineDocuments = pipeline.Render(new (documentSerializer, serializerRegistry, translationOptions: translationOptions)).Documents; @@ -713,27 +674,8 @@ private CreateViewOperation CreateCreateViewOperation<TDocument, TResult>( }; } - private IWriteOperation<BsonDocument> CreateDropCollectionOperation(string name, DropCollectionOptions options, IClientSessionHandle session, CancellationToken cancellationToken) + private IWriteOperation<BsonDocument> CreateDropCollectionOperation(CollectionNamespace collectionNamespace, BsonDocument effectiveEncryptedFields) { - var collectionNamespace = new CollectionNamespace(_databaseNamespace, name); - - options = options ?? new DropCollectionOptions(); - - var encryptedFieldsMap = _client.Settings?.AutoEncryptionOptions?.EncryptedFieldsMap; - if (!EncryptedCollectionHelper.TryGetEffectiveEncryptedFields(collectionNamespace, options.EncryptedFields, encryptedFieldsMap, out var effectiveEncryptedFields)) - { - if (encryptedFieldsMap != null) - { - var listCollectionOptions = new ListCollectionsOptions() { Filter = $"{{ name : '{collectionNamespace.CollectionName}' }}" }; - var currrentCollectionInfo = ListCollections(session, listCollectionOptions, cancellationToken).FirstOrDefault(); - effectiveEncryptedFields = currrentCollectionInfo - ?.GetValue("options", defaultValue: null) - ?.AsBsonDocument - ?.GetValue("encryptedFields", defaultValue: null) - ?.ToBsonDocument(); - } - } - var messageEncoderSettings = GetMessageEncoderSettings(); return DropCollectionOperation.CreateEncryptedDropCollectionOperationIfConfigured( collectionNamespace, @@ -745,42 +687,10 @@ private IWriteOperation<BsonDocument> CreateDropCollectionOperation(string name, }); } - private async Task<IWriteOperation<BsonDocument>> CreateDropCollectionOperationAsync(string name, DropCollectionOptions options, IClientSessionHandle session, CancellationToken cancellationToken) - { - var collectionNamespace = new CollectionNamespace(_databaseNamespace, name); - - options = options ?? new DropCollectionOptions(); - - var encryptedFieldsMap = _client.Settings?.AutoEncryptionOptions?.EncryptedFieldsMap; - if (!EncryptedCollectionHelper.TryGetEffectiveEncryptedFields(collectionNamespace, options.EncryptedFields, encryptedFieldsMap, out var effectiveEncryptedFields)) - { - if (encryptedFieldsMap != null) - { - var listCollectionOptions = new ListCollectionsOptions() { Filter = $"{{ name : '{collectionNamespace.CollectionName}' }}" }; - var currentCollectionsInfo = await ListCollectionsAsync(session, listCollectionOptions, cancellationToken).ConfigureAwait(false); - var currentCollectionInfo = await currentCollectionsInfo.FirstOrDefaultAsync(cancellationToken).ConfigureAwait(false); - effectiveEncryptedFields = currentCollectionInfo - ?.GetValue("options", defaultValue: null) - ?.AsBsonDocument - ?.GetValue("encryptedFields", defaultValue: null) - ?.ToBsonDocument(); - } - } - - var messageEncoderSettings = GetMessageEncoderSettings(); - return DropCollectionOperation.CreateEncryptedDropCollectionOperationIfConfigured( - collectionNamespace, - effectiveEncryptedFields, - messageEncoderSettings, - (dco) => - { - dco.WriteConcern = _settings.WriteConcern; - }); - } - - private ListCollectionsOperation CreateListCollectionNamesOperation(ListCollectionNamesOptions options, RenderArgs<BsonDocument> renderArgs) + private ListCollectionsOperation CreateListCollectionNamesOperation(ListCollectionNamesOptions options) { var messageEncoderSettings = GetMessageEncoderSettings(); + var renderArgs = GetRenderArgs(BsonDocumentSerializer.Instance); return new ListCollectionsOperation(_databaseNamespace, messageEncoderSettings) { AuthorizedCollections = options?.AuthorizedCollections, @@ -791,8 +701,9 @@ private ListCollectionsOperation CreateListCollectionNamesOperation(ListCollecti }; } - private ListCollectionsOperation CreateListCollectionsOperation(ListCollectionsOptions options, RenderArgs<BsonDocument> renderArgs) + private ListCollectionsOperation CreateListCollectionsOperation(ListCollectionsOptions options) { + var renderArgs = GetRenderArgs(BsonDocumentSerializer.Instance); var messageEncoderSettings = GetMessageEncoderSettings(); return new ListCollectionsOperation(_databaseNamespace, messageEncoderSettings) { @@ -803,23 +714,10 @@ private ListCollectionsOperation CreateListCollectionsOperation(ListCollectionsO }; } - private IReadBinding CreateReadBinding(IClientSessionHandle session, ReadPreference readPreference) - { - if (session.IsInTransaction && readPreference.ReadPreferenceMode != ReadPreferenceMode.Primary) - { - throw new InvalidOperationException("Read preference in a transaction must be primary."); - } - - return ChannelPinningHelper.CreateReadBinding(_cluster, session.WrappedCoreSession.Fork(), readPreference); - } - - private IWriteBindingHandle CreateReadWriteBinding(IClientSessionHandle session) - { - return ChannelPinningHelper.CreateReadWriteBinding(_cluster, session.WrappedCoreSession.Fork()); - } - private RenameCollectionOperation CreateRenameCollectionOperation(string oldName, string newName, RenameCollectionOptions options) { + options ??= new RenameCollectionOptions(); + var messageEncoderSettings = GetMessageEncoderSettings(); return new RenameCollectionOperation( new CollectionNamespace(_databaseNamespace, oldName), @@ -843,9 +741,10 @@ private ReadCommandOperation<TResult> CreateRunCommandOperation<TResult>(Command private ChangeStreamOperation<TResult> CreateChangeStreamOperation<TResult>( PipelineDefinition<ChangeStreamDocument<BsonDocument>, TResult> pipeline, - ChangeStreamOptions options, - ExpressionTranslationOptions translationOptions) + ChangeStreamOptions options) { + var translationOptions = _client.Settings.TranslationOptions; + return ChangeStreamHelper.CreateChangeStreamOperation( this, pipeline, @@ -856,53 +755,93 @@ private ChangeStreamOperation<TResult> CreateChangeStreamOperation<TResult>( translationOptions); } - private IEnumerable<string> ExtractCollectionNames(IEnumerable<BsonDocument> collections) + private OperationContext CreateOperationContext(IClientSessionHandle session, TimeSpan? timeout, CancellationToken cancellationToken) { - return collections.Select(collection => collection["name"].AsString); + var operationContext = session.WrappedCoreSession.CurrentTransaction?.OperationContext; + if (operationContext != null && timeout != null) + { + throw new InvalidOperationException("Cannot specify per operation timeout inside transaction."); + } + + return operationContext?.Fork() ?? new OperationContext(timeout ?? _settings.Timeout, cancellationToken); } - private T ExecuteReadOperation<T>(IClientSessionHandle session, IReadOperation<T> operation, CancellationToken cancellationToken) + private TResult ExecuteReadOperation<TResult>(IClientSessionHandle session, IReadOperation<TResult> operation, TimeSpan? timeout, CancellationToken cancellationToken) + => ExecuteReadOperation(session, operation, null, timeout, cancellationToken); + + private TResult ExecuteReadOperation<TResult>(IClientSessionHandle session, IReadOperation<TResult> operation, ReadPreference explicitReadPreference, TimeSpan? timeout, CancellationToken cancellationToken) { - var readPreference = ReadPreferenceResolver.GetEffectiveReadPreference(session, null, _settings.ReadPreference); - return ExecuteReadOperation(session, operation, readPreference, cancellationToken); + var readPreference = explicitReadPreference ?? session.GetEffectiveReadPreference(_settings.ReadPreference); + using var operationContext = CreateOperationContext(session, timeout, cancellationToken); + return _operationExecutor.ExecuteReadOperation(operationContext, session, operation, readPreference, true); } - private T ExecuteReadOperation<T>(IClientSessionHandle session, IReadOperation<T> operation, ReadPreference readPreference, CancellationToken cancellationToken) + private Task<TResult> ExecuteReadOperationAsync<TResult>(IClientSessionHandle session, IReadOperation<TResult> operation, TimeSpan? timeout, CancellationToken cancellationToken) + => ExecuteReadOperationAsync(session, operation, null, timeout, cancellationToken); + + private async Task<TResult> ExecuteReadOperationAsync<TResult>(IClientSessionHandle session, IReadOperation<TResult> operation, ReadPreference explicitReadPreference, TimeSpan? timeout, CancellationToken cancellationToken) { - using (var binding = CreateReadBinding(session, readPreference)) - { - return _operationExecutor.ExecuteReadOperation(binding, operation, cancellationToken); - } + var readPreference = explicitReadPreference ?? session.GetEffectiveReadPreference(_settings.ReadPreference); + using var operationContext = CreateOperationContext(session, timeout, cancellationToken); + return await _operationExecutor.ExecuteReadOperationAsync(operationContext, session, operation, readPreference, true).ConfigureAwait(false); } - private Task<T> ExecuteReadOperationAsync<T>(IClientSessionHandle session, IReadOperation<T> operation, CancellationToken cancellationToken) + private TResult ExecuteWriteOperation<TResult>(IClientSessionHandle session, IWriteOperation<TResult> operation, TimeSpan? timeout, CancellationToken cancellationToken) { - var readPreference = ReadPreferenceResolver.GetEffectiveReadPreference(session, null, _settings.ReadPreference); - return ExecuteReadOperationAsync(session, operation, readPreference, cancellationToken); + using var operationContext = CreateOperationContext(session, timeout, cancellationToken); + return _operationExecutor.ExecuteWriteOperation(operationContext, session, operation, true); } - private async Task<T> ExecuteReadOperationAsync<T>(IClientSessionHandle session, IReadOperation<T> operation, ReadPreference readPreference, CancellationToken cancellationToken) + private async Task<TResult> ExecuteWriteOperationAsync<TResult>(IClientSessionHandle session, IWriteOperation<TResult> operation, TimeSpan? timeout, CancellationToken cancellationToken) { - using (var binding = CreateReadBinding(session, readPreference)) - { - return await _operationExecutor.ExecuteReadOperationAsync(binding, operation, cancellationToken).ConfigureAwait(false); - } + using var operationContext = CreateOperationContext(session, timeout, cancellationToken); + return await _operationExecutor.ExecuteWriteOperationAsync(operationContext, session, operation, true).ConfigureAwait(false); + } + + private IEnumerable<string> ExtractCollectionNames(IEnumerable<BsonDocument> collections) + { + return collections.Select(collection => collection["name"].AsString); } - private T ExecuteWriteOperation<T>(IClientSessionHandle session, IWriteOperation<T> operation, CancellationToken cancellationToken) + private BsonDocument GetEffectiveEncryptedFields(IClientSessionHandle session, CollectionNamespace collectionNamespace, DropCollectionOptions options, CancellationToken cancellationToken) { - using (var binding = CreateReadWriteBinding(session)) + var encryptedFieldsMap = _client.Settings?.AutoEncryptionOptions?.EncryptedFieldsMap; + if (!EncryptedCollectionHelper.TryGetEffectiveEncryptedFields(collectionNamespace, options?.EncryptedFields, encryptedFieldsMap, out var effectiveEncryptedFields)) { - return _operationExecutor.ExecuteWriteOperation(binding, operation, cancellationToken); + if (encryptedFieldsMap != null) + { + var listCollectionOptions = new ListCollectionsOptions() { Filter = $"{{ name : '{collectionNamespace.CollectionName}' }}" }; + var currentCollectionInfo = ListCollections(session, listCollectionOptions, cancellationToken: cancellationToken).FirstOrDefault(); + effectiveEncryptedFields = currentCollectionInfo + ?.GetValue("options", defaultValue: null) + ?.AsBsonDocument + ?.GetValue("encryptedFields", defaultValue: null) + ?.ToBsonDocument(); + } } + + return effectiveEncryptedFields; } - private async Task<T> ExecuteWriteOperationAsync<T>(IClientSessionHandle session, IWriteOperation<T> operation, CancellationToken cancellationToken) + private async Task<BsonDocument> GetEffectiveEncryptedFieldsAsync(IClientSessionHandle session, CollectionNamespace collectionNamespace, DropCollectionOptions options, CancellationToken cancellationToken) { - using (var binding = CreateReadWriteBinding(session)) + var encryptedFieldsMap = _client.Settings?.AutoEncryptionOptions?.EncryptedFieldsMap; + if (!EncryptedCollectionHelper.TryGetEffectiveEncryptedFields(collectionNamespace, options?.EncryptedFields, encryptedFieldsMap, out var effectiveEncryptedFields)) { - return await _operationExecutor.ExecuteWriteOperationAsync(binding, operation, cancellationToken).ConfigureAwait(false); + if (encryptedFieldsMap != null) + { + var listCollectionOptions = new ListCollectionsOptions() { Filter = $"{{ name : '{collectionNamespace.CollectionName}' }}" }; + var currentCollectionsInfo = await ListCollectionsAsync(session, listCollectionOptions, cancellationToken: cancellationToken).ConfigureAwait(false); + var currentCollectionInfo = await currentCollectionsInfo.FirstOrDefaultAsync(cancellationToken: cancellationToken).ConfigureAwait(false); + effectiveEncryptedFields = currentCollectionInfo + ?.GetValue("options", defaultValue: null) + ?.AsBsonDocument + ?.GetValue("encryptedFields", defaultValue: null) + ?.ToBsonDocument(); + } } + + return effectiveEncryptedFields; } private MessageEncoderSettings GetMessageEncoderSettings() @@ -932,37 +871,5 @@ private RenderArgs<TDocument> GetRenderArgs<TDocument>(IBsonSerializer<TDocument translationOptions = translationOptions.AddMissingOptionsFrom(_client.Settings.TranslationOptions); return new RenderArgs<TDocument>(documentSerializer, _settings.SerializerRegistry, translationOptions: translationOptions); } - - private void UsingImplicitSession(Action<IClientSessionHandle> func, CancellationToken cancellationToken) - { - using (var session = _operationExecutor.StartImplicitSession(cancellationToken)) - { - func(session); - } - } - - private TResult UsingImplicitSession<TResult>(Func<IClientSessionHandle, TResult> func, CancellationToken cancellationToken) - { - using (var session = _operationExecutor.StartImplicitSession(cancellationToken)) - { - return func(session); - } - } - - private async Task UsingImplicitSessionAsync(Func<IClientSessionHandle, Task> funcAsync, CancellationToken cancellationToken) - { - using (var session = await _operationExecutor.StartImplicitSessionAsync(cancellationToken).ConfigureAwait(false)) - { - await funcAsync(session).ConfigureAwait(false); - } - } - - private async Task<TResult> UsingImplicitSessionAsync<TResult>(Func<IClientSessionHandle, Task<TResult>> funcAsync, CancellationToken cancellationToken) - { - using (var session = await _operationExecutor.StartImplicitSessionAsync(cancellationToken).ConfigureAwait(false)) - { - return await funcAsync(session).ConfigureAwait(false); - } - } } } diff --git a/src/MongoDB.Driver/MongoDatabaseSettings.cs b/src/MongoDB.Driver/MongoDatabaseSettings.cs index b82ea52828d..e88fe90ed6c 100644 --- a/src/MongoDB.Driver/MongoDatabaseSettings.cs +++ b/src/MongoDB.Driver/MongoDatabaseSettings.cs @@ -16,7 +16,6 @@ using System; using System.Collections.Generic; using System.Text; -using MongoDB.Bson; using MongoDB.Bson.Serialization; using MongoDB.Driver.Core.Misc; @@ -31,6 +30,7 @@ public class MongoDatabaseSettings private Setting<ReadConcern> _readConcern; private Setting<UTF8Encoding> _readEncoding; private Setting<ReadPreference> _readPreference; + private TimeSpan? _timeout; private Setting<WriteConcern> _writeConcern; private Setting<UTF8Encoding> _writeEncoding; @@ -106,6 +106,20 @@ public IBsonSerializerRegistry SerializerRegistry get { return BsonSerializer.SerializerRegistry; } } + /// <summary> + /// Gets or sets the per-operation timeout + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get { return _timeout; } + set + { + if (_isFrozen) { throw new InvalidOperationException("MongoCollectionSettings is frozen."); } + _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } + } + /// <summary> /// Gets or sets the WriteConcern to use. /// </summary> @@ -147,6 +161,7 @@ public MongoDatabaseSettings Clone() clone._readConcern = _readConcern.Clone(); clone._readEncoding = _readEncoding.Clone(); clone._readPreference = _readPreference.Clone(); + clone._timeout = _timeout; clone._writeConcern = _writeConcern.Clone(); clone._writeEncoding = _writeEncoding.Clone(); return clone; @@ -176,6 +191,7 @@ public override bool Equals(object obj) _readConcern.Value == rhs._readConcern.Value && object.Equals(_readEncoding, rhs._readEncoding) && object.Equals(_readPreference.Value, rhs._readPreference.Value) && + _timeout == rhs._timeout && _writeConcern.Value == rhs._writeConcern.Value && object.Equals(_writeEncoding, rhs._writeEncoding); } @@ -229,6 +245,7 @@ public override int GetHashCode() hash = 37 * hash + ((_readConcern.Value == null) ? 0 : _readConcern.GetHashCode()); hash = 37 * hash + ((_readEncoding.Value == null) ? 0 : _readEncoding.GetHashCode()); hash = 37 * hash + ((_readPreference.Value == null) ? 0 : _readPreference.Value.GetHashCode()); + hash = 37 * hash + _timeout?.GetHashCode() ?? 0; hash = 37 * hash + ((_writeConcern.Value == null) ? 0 : _writeConcern.Value.GetHashCode()); hash = 37 * hash + ((_writeEncoding.Value == null) ? 0 : _writeEncoding.GetHashCode()); return hash; @@ -252,6 +269,10 @@ public override string ToString() parts.Add(string.Format("ReadEncoding={0}", (_readEncoding.Value == null) ? "null" : "UTF8Encoding")); } parts.Add(string.Format("ReadPreference={0}", _readPreference.Value)); + if (_timeout.HasValue) + { + parts.Add(string.Format("Timeout={0}", _timeout.Value)); + } parts.Add(string.Format("WriteConcern={0}", _writeConcern.Value)); if (_writeEncoding.HasBeenSet) { @@ -275,6 +296,10 @@ internal void ApplyDefaultValues(IInheritableMongoClientSettings clientSettings) { ReadPreference = clientSettings.ReadPreference; } + if (!_timeout.HasValue) + { + Timeout = clientSettings.Timeout; + } if (!_writeConcern.HasBeenSet) { WriteConcern = clientSettings.WriteConcern; diff --git a/src/MongoDB.Driver/MongoIdentityEvidence.cs b/src/MongoDB.Driver/MongoIdentityEvidence.cs index 17456390644..e987ac5ec7e 100644 --- a/src/MongoDB.Driver/MongoIdentityEvidence.cs +++ b/src/MongoDB.Driver/MongoIdentityEvidence.cs @@ -69,7 +69,7 @@ internal MongoIdentityEvidence() /// Returns a hash code for this instance. /// </summary> /// <returns> - /// A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. + /// A hash code for this instance, suitable for use in hashing algorithms and data structures like a hash table. /// </returns> public abstract override int GetHashCode(); } diff --git a/src/MongoDB.Driver/MongoIndexManagerBase.cs b/src/MongoDB.Driver/MongoIndexManagerBase.cs index 0ac93ee98ff..5cfbaad1d72 100644 --- a/src/MongoDB.Driver/MongoIndexManagerBase.cs +++ b/src/MongoDB.Driver/MongoIndexManagerBase.cs @@ -319,7 +319,8 @@ private CreateManyIndexesOptions ToCreateManyIndexesOptions(CreateOneIndexOption return new CreateManyIndexesOptions { CommitQuorum = options?.CommitQuorum, - MaxTime = options?.MaxTime + MaxTime = options?.MaxTime, + Timeout = options?.Timeout, }; } } diff --git a/src/MongoDB.Driver/MongoUrl.cs b/src/MongoDB.Driver/MongoUrl.cs index 2c1446fd57c..3af85a2f830 100644 --- a/src/MongoDB.Driver/MongoUrl.cs +++ b/src/MongoDB.Driver/MongoUrl.cs @@ -64,6 +64,10 @@ public class MongoUrl : IEquatable<MongoUrl> private readonly bool? _retryReads; private readonly bool? _retryWrites; private readonly TimeSpan _localThreshold; + private readonly string _proxyHost; + private readonly int? _proxyPort; + private readonly string _proxyUsername; + private readonly string _proxyPassword; private readonly ConnectionStringScheme _scheme; private readonly IEnumerable<MongoServerAddress> _servers; private readonly ServerMonitoringMode? _serverMonitoringMode; @@ -71,6 +75,7 @@ public class MongoUrl : IEquatable<MongoUrl> private readonly TimeSpan _socketTimeout; private readonly int? _srvMaxHosts; private readonly string _srvServiceName; + private readonly TimeSpan? _timeout; private readonly bool _tlsDisableCertificateRevocationCheck; private readonly string _username; private readonly bool _useTls; @@ -117,6 +122,10 @@ internal MongoUrl(MongoUrlBuilder builder) _maxConnectionPoolSize = builder.MaxConnectionPoolSize; _minConnectionPoolSize = builder.MinConnectionPoolSize; _password = builder.Password; + _proxyHost = builder.ProxyHost; + _proxyPort = builder.ProxyPort; + _proxyUsername = builder.ProxyUsername; + _proxyPassword = builder.ProxyPassword; _readConcernLevel = builder.ReadConcernLevel; _readPreference = builder.ReadPreference; _replicaSetName = builder.ReplicaSetName; @@ -129,6 +138,7 @@ internal MongoUrl(MongoUrlBuilder builder) _socketTimeout = builder.SocketTimeout; _srvMaxHosts = builder.SrvMaxHosts; _srvServiceName = builder.SrvServiceName; + _timeout = builder.Timeout; _tlsDisableCertificateRevocationCheck = builder.TlsDisableCertificateRevocationCheck; _username = builder.Username; _useTls = builder.UseTls; @@ -358,6 +368,26 @@ public string Password get { return _password; } } + /// <summary> + /// Gets the proxy host. + /// </summary> + public string ProxyHost => _proxyHost; + + /// <summary> + /// Gets the proxy port. + /// </summary> + public int? ProxyPort => _proxyPort; + + /// <summary> + /// Gets the proxy username. + /// </summary> + public string ProxyUsername => _proxyUsername; + + /// <summary> + /// Gets the proxy password. + /// </summary> + public string ProxyPassword => _proxyPassword; + /// <summary> /// Gets the read concern level. /// </summary> @@ -462,6 +492,12 @@ public string SrvServiceName get { return _srvServiceName; } } + /// <summary> + /// Gets the per-operation timeout + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout => _timeout; + /// <summary> /// Gets whether or not to disable checking certificate revocation status during the TLS handshake. /// </summary> @@ -751,11 +787,5 @@ public override string ToString() { return _url; } - - // private methods - private bool AnyWriteConcernSettingsAreSet() - { - return _fsync != null || _journal != null || _w != null || _wTimeout != null; - } } } diff --git a/src/MongoDB.Driver/MongoUrlBuilder.cs b/src/MongoDB.Driver/MongoUrlBuilder.cs index 3858228cbf6..4e82bfd5865 100644 --- a/src/MongoDB.Driver/MongoUrlBuilder.cs +++ b/src/MongoDB.Driver/MongoUrlBuilder.cs @@ -17,7 +17,6 @@ using System.Collections.Generic; using System.Linq; using System.Text; -using System.Threading; using MongoDB.Bson.IO; using MongoDB.Driver.Core.Compression; using MongoDB.Driver.Core.Configuration; @@ -60,6 +59,10 @@ public class MongoUrlBuilder private string _replicaSetName; private bool? _retryReads; private bool? _retryWrites; + private string _proxyHost; + private int? _proxyPort; + private string _proxyUsername; + private string _proxyPassword; private ConnectionStringScheme _scheme; private IEnumerable<MongoServerAddress> _servers; private ServerMonitoringMode? _serverMonitoringMode; @@ -67,6 +70,7 @@ public class MongoUrlBuilder private TimeSpan _socketTimeout; private int? _srvMaxHosts; private string _srvServiceName; + private TimeSpan? _timeout; private bool? _tlsDisableCertificateRevocationCheck; private string _username; private bool _useTls; @@ -104,6 +108,10 @@ public MongoUrlBuilder() _maxConnectionPoolSize = MongoDefaults.MaxConnectionPoolSize; _minConnectionPoolSize = MongoDefaults.MinConnectionPoolSize; _password = null; + _proxyHost = null; + _proxyPort = null; + _proxyUsername = null; + _proxyPassword = null; _readConcernLevel = null; _readPreference = null; _replicaSetName = null; @@ -116,6 +124,7 @@ public MongoUrlBuilder() _socketTimeout = MongoDefaults.SocketTimeout; _srvMaxHosts = null; _srvServiceName = MongoInternalDefaults.MongoClientSettings.SrvServiceName; + _timeout = null; _username = null; _useTls = false; _w = null; @@ -296,7 +305,7 @@ public TimeSpan HeartbeatTimeout get { return _heartbeatTimeout; } set { - if (value < TimeSpan.Zero && value != Timeout.InfiniteTimeSpan) + if (value < TimeSpan.Zero && value != System.Threading.Timeout.InfiniteTimeSpan) { throw new ArgumentOutOfRangeException("value", "HeartbeatTimeout must be greater than or equal to zero."); } @@ -438,6 +447,55 @@ public string Password set { _password = value; } } + /// <summary> + /// Gets or sets the proxy host. + /// </summary> + public string ProxyHost + { + get => _proxyHost; + set + { + _proxyHost = Ensure.IsNotNullOrEmpty(value, nameof(ProxyHost)); + } + } + + /// <summary> + /// Gets or sets the proxy port. + /// </summary> + /// <exception cref="ArgumentOutOfRangeException"></exception> + public int? ProxyPort + { + get => _proxyPort; + set + { + _proxyPort = Ensure.IsNullOrBetween(value, 1, 65535, nameof(ProxyPort)); + } + } + + /// <summary> + /// Gets or sets the proxy username. + /// </summary> + public string ProxyUsername + { + get => _proxyUsername; + set + { + _proxyUsername = Ensure.IsNotNullOrEmpty(value, nameof(ProxyUsername)); + } + } + + /// <summary> + /// Gets or sets the proxy password. + /// </summary> + public string ProxyPassword + { + get => _proxyPassword; + set + { + _proxyPassword = Ensure.IsNotNullOrEmpty(value, nameof(ProxyPassword)); + } + } + /// <summary> /// Gets or sets the read concern level. /// </summary> @@ -592,6 +650,19 @@ public string SrvServiceName } } + /// <summary> + /// Gets or sets the per-operation timeout + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get { return _timeout; } + set + { + _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } + } + /// <summary> /// Gets or sets whether to disable certificate revocation checking during the TLS handshake. /// </summary> @@ -956,6 +1027,10 @@ public override string ToString() { query.AppendFormat("socketTimeout={0}&", FormatTimeSpan(_socketTimeout)); } + if (_timeout.HasValue) + { + query.AppendFormat("timeout={0}&", _timeout == System.Threading.Timeout.InfiniteTimeSpan ? "0" : FormatTimeSpan(_timeout.Value)); + } #pragma warning disable 618 if (_waitQueueMultiple != 0.0 && _waitQueueMultiple != MongoDefaults.WaitQueueMultiple) #pragma warning restore 618 @@ -980,6 +1055,22 @@ public override string ToString() { query.AppendFormat("retryWrites={0}&", JsonConvert.ToString(_retryWrites.Value)); } + if(!string.IsNullOrEmpty(_proxyHost)) + { + query.AppendFormat("proxyHost={0}&", _proxyHost); + } + if (_proxyPort.HasValue) + { + query.AppendFormat("proxyPort={0}&", _proxyPort); + } + if (!string.IsNullOrEmpty(_proxyUsername)) + { + query.AppendFormat("proxyUsername={0}&", _proxyUsername); + } + if (!string.IsNullOrEmpty(_proxyPassword)) + { + query.AppendFormat("proxyPassword={0}&", _proxyPassword); + } if (_srvMaxHosts.HasValue) { query.AppendFormat("srvMaxHosts={0}&", _srvMaxHosts); @@ -1026,6 +1117,10 @@ private void InitializeFromConnectionString(ConnectionString connectionString) _maxConnectionPoolSize = connectionString.MaxPoolSize.GetValueOrDefault(MongoDefaults.MaxConnectionPoolSize); _minConnectionPoolSize = connectionString.MinPoolSize.GetValueOrDefault(MongoDefaults.MinConnectionPoolSize); _password = connectionString.Password; + _proxyHost = connectionString.ProxyHost; + _proxyPort = connectionString.ProxyPort; + _proxyUsername = connectionString.ProxyUsername; + _proxyPassword = connectionString.ProxyPassword; _readConcernLevel = connectionString.ReadConcernLevel; if (connectionString.ReadPreference.HasValue || connectionString.ReadPreferenceTags != null || connectionString.MaxStaleness.HasValue) { @@ -1045,6 +1140,7 @@ private void InitializeFromConnectionString(ConnectionString connectionString) _socketTimeout = connectionString.SocketTimeout.GetValueOrDefault(MongoDefaults.SocketTimeout); _srvMaxHosts = connectionString.SrvMaxHosts; _srvServiceName = connectionString.SrvServiceName ?? MongoInternalDefaults.MongoClientSettings.SrvServiceName; + _timeout = connectionString.Timeout; _tlsDisableCertificateRevocationCheck = connectionString.TlsDisableCertificateRevocationCheck; _username = connectionString.Username; _useTls = connectionString.Tls.GetValueOrDefault(false); @@ -1065,11 +1161,6 @@ private void InitializeFromConnectionString(ConnectionString connectionString) _wTimeout = connectionString.WTimeout; } - private bool AnyWriteConcernSettingsAreSet() - { - return _fsync != null || _journal != null || _w != null || _wTimeout != null; - } - private string FormatTimeSpan(TimeSpan value) { const int msInOneSecond = 1000; // milliseconds diff --git a/src/MongoDB.Driver/OperationContext.cs b/src/MongoDB.Driver/OperationContext.cs new file mode 100644 index 00000000000..7d0ce583df8 --- /dev/null +++ b/src/MongoDB.Driver/OperationContext.cs @@ -0,0 +1,217 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using System.Threading; +using System.Threading.Tasks; +using MongoDB.Driver.Core.Misc; + +namespace MongoDB.Driver +{ + internal sealed class OperationContext : IDisposable + { + // TODO: this static field is temporary here and will be removed in a future PRs in scope of CSOT. + public static readonly OperationContext NoTimeout = new(null, CancellationToken.None); + + private CancellationTokenSource _remainingTimeoutCancellationTokenSource; + private CancellationTokenSource _combinedCancellationTokenSource; + + public OperationContext(TimeSpan? timeout, CancellationToken cancellationToken) + : this(SystemClock.Instance, SystemClock.Instance.GetTimestamp(), timeout, cancellationToken) + { + } + + internal OperationContext(IClock clock, TimeSpan? timeout, CancellationToken cancellationToken) + : this(clock, clock.GetTimestamp(), timeout, cancellationToken) + { + } + + internal OperationContext(IClock clock, long initialTimestamp, TimeSpan? timeout, CancellationToken cancellationToken) + { + Clock = Ensure.IsNotNull(clock, nameof(clock)); + InitialTimestamp = initialTimestamp; + Timeout = Ensure.IsNullOrInfiniteOrGreaterThanOrEqualToZero(timeout, nameof(timeout)); + CancellationToken = cancellationToken; + RootContext = this; + } + + public CancellationToken CancellationToken { get; } + + public OperationContext RootContext { get; private init; } + + public TimeSpan RemainingTimeout + { + get + { + if (Timeout == null || Timeout == System.Threading.Timeout.InfiniteTimeSpan) + { + return System.Threading.Timeout.InfiniteTimeSpan; + } + + var result = Timeout.Value - Elapsed; + if (result < TimeSpan.Zero) + { + result = TimeSpan.Zero; + } + + return result; + } + } + + [Obsolete("Do not use this property, unless it's needed to avoid breaking changes in public API")] + public CancellationToken CombinedCancellationToken + { + get + { + if (_combinedCancellationTokenSource != null) + { + return _combinedCancellationTokenSource.Token; + } + + if (RemainingTimeout == System.Threading.Timeout.InfiniteTimeSpan) + { + return CancellationToken; + } + + _remainingTimeoutCancellationTokenSource = new CancellationTokenSource(RemainingTimeout); + _combinedCancellationTokenSource = CancellationTokenSource.CreateLinkedTokenSource(CancellationToken, _remainingTimeoutCancellationTokenSource.Token); + return _combinedCancellationTokenSource.Token; + } + } + private long InitialTimestamp { get; } + + private IClock Clock { get; } + + public TimeSpan Elapsed + { + get + { + var totalSeconds = (Clock.GetTimestamp() - InitialTimestamp) / (double)Clock.Frequency; + return TimeSpan.FromSeconds(totalSeconds); + } + } + + public TimeSpan? Timeout { get; } + + public void Dispose() + { + _remainingTimeoutCancellationTokenSource?.Dispose(); + _combinedCancellationTokenSource?.Dispose(); + } + + public OperationContext Fork() => + new (Clock, InitialTimestamp, Timeout, CancellationToken) + { + RootContext = RootContext + }; + + public bool IsTimedOut() + { + // Dotnet APIs like task.WaitAsync truncating the timeout to milliseconds. + // We should truncate the remaining timeout to the milliseconds, in order to maintain the consistent state: + // if operationContext.WaitTaskAsync() failed with TimeoutException, we want IsTimedOut() returns true. + return (int)RemainingTimeout.TotalMilliseconds == 0; + } + + public bool IsCancelledOrTimedOut() => IsTimedOut() || CancellationToken.IsCancellationRequested; + + public void ThrowIfTimedOutOrCanceled() + { + CancellationToken.ThrowIfCancellationRequested(); + if (IsTimedOut()) + { + throw new TimeoutException(); + } + } + + public void WaitTask(Task task) + { + if (task.IsCompleted) + { + task.GetAwaiter().GetResult(); // re-throws exception if any + return; + } + + var timeout = RemainingTimeout; + if (timeout == TimeSpan.Zero) + { + throw new TimeoutException(); + } + + try + { + if (!task.Wait((int)timeout.TotalMilliseconds, CancellationToken)) + { + CancellationToken.ThrowIfCancellationRequested(); + throw new TimeoutException(); + } + } + catch (AggregateException e) + { + if (e.InnerExceptions.Count == 1) + { + throw e.InnerExceptions[0]; + } + + throw; + } + } + + public async Task WaitTaskAsync(Task task) + { + if (task.IsCompleted) + { + await task.ConfigureAwait(false); // re-throws exception if any + return; + } + + var timeout = RemainingTimeout; + if (timeout == TimeSpan.Zero) + { + throw new TimeoutException(); + } + + try + { + await task.WaitAsync(timeout, CancellationToken).ConfigureAwait(false); + } + catch (TaskCanceledException) + { + CancellationToken.ThrowIfCancellationRequested(); + throw; + } + } + + public OperationContext WithTimeout(TimeSpan timeout) + { + Ensure.IsInfiniteOrGreaterThanOrEqualToZero(timeout, nameof(timeout)); + + var remainingTimeout = RemainingTimeout; + if (timeout == System.Threading.Timeout.InfiniteTimeSpan) + { + timeout = remainingTimeout; + } + else if (remainingTimeout != System.Threading.Timeout.InfiniteTimeSpan && remainingTimeout < timeout) + { + timeout = remainingTimeout; + } + + return new OperationContext(Clock, timeout, CancellationToken) + { + RootContext = RootContext + }; + } + } +} diff --git a/src/MongoDB.Driver/OperationContextExtensions.cs b/src/MongoDB.Driver/OperationContextExtensions.cs new file mode 100644 index 00000000000..0c8d4cc2375 --- /dev/null +++ b/src/MongoDB.Driver/OperationContextExtensions.cs @@ -0,0 +1,36 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using MongoDB.Driver.Core.Misc; + +namespace MongoDB.Driver +{ + internal static class OperationContextExtensions + { + public static bool IsRootContextTimeoutConfigured(this OperationContext operationContext) + { + Ensure.IsNotNull(operationContext, nameof(operationContext)); + + return operationContext.RootContext.Timeout.HasValue; + } + + public static TimeSpan RemainingTimeoutOrDefault(this OperationContext operationContext, TimeSpan defaultValue) + { + Ensure.IsNotNull(operationContext, nameof(operationContext)); + return operationContext.Timeout == null ? defaultValue : operationContext.RemainingTimeout; + } + } +} diff --git a/src/MongoDB.Driver/OperationExecutor.cs b/src/MongoDB.Driver/OperationExecutor.cs index cc521cdf762..929c28b6063 100644 --- a/src/MongoDB.Driver/OperationExecutor.cs +++ b/src/MongoDB.Driver/OperationExecutor.cs @@ -13,50 +13,135 @@ * limitations under the License. */ -using System.Threading; +using System; using System.Threading.Tasks; +using MongoDB.Driver.Core; using MongoDB.Driver.Core.Bindings; +using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.Operations; namespace MongoDB.Driver { internal sealed class OperationExecutor : IOperationExecutor { - private readonly MongoClient _client; + private readonly IMongoClient _client; + private bool _isDisposed; - public OperationExecutor(MongoClient client) + public OperationExecutor(IMongoClient client) { _client = client; } - public TResult ExecuteReadOperation<TResult>(IReadBinding binding, IReadOperation<TResult> operation, CancellationToken cancellationToken) + public void Dispose() { - return operation.Execute(binding, cancellationToken); + _isDisposed = true; } - public async Task<TResult> ExecuteReadOperationAsync<TResult>(IReadBinding binding, IReadOperation<TResult> operation, CancellationToken cancellationToken) + public TResult ExecuteReadOperation<TResult>( + OperationContext operationContext, + IClientSessionHandle session, + IReadOperation<TResult> operation, + ReadPreference readPreference, + bool allowChannelPinning) { - return await operation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false); + Ensure.IsNotNull(operationContext, nameof(operationContext)); + Ensure.IsNotNull(session, nameof(session)); + Ensure.IsNotNull(operation, nameof(operation)); + Ensure.IsNotNull(readPreference, nameof(readPreference)); + ThrowIfDisposed(); + + using var binding = CreateReadBinding(session, readPreference, allowChannelPinning); + return operation.Execute(operationContext, binding); + } + + public async Task<TResult> ExecuteReadOperationAsync<TResult>( + OperationContext operationContext, + IClientSessionHandle session, + IReadOperation<TResult> operation, + ReadPreference readPreference, + bool allowChannelPinning) + { + Ensure.IsNotNull(operationContext, nameof(operationContext)); + Ensure.IsNotNull(session, nameof(session)); + Ensure.IsNotNull(operation, nameof(operation)); + Ensure.IsNotNull(readPreference, nameof(readPreference)); + ThrowIfDisposed(); + + using var binding = CreateReadBinding(session, readPreference, allowChannelPinning); + return await operation.ExecuteAsync(operationContext, binding).ConfigureAwait(false); + } + + public TResult ExecuteWriteOperation<TResult>( + OperationContext operationContext, + IClientSessionHandle session, + IWriteOperation<TResult> operation, + bool allowChannelPinning) + { + Ensure.IsNotNull(operationContext, nameof(operationContext)); + Ensure.IsNotNull(session, nameof(session)); + Ensure.IsNotNull(operation, nameof(operation)); + ThrowIfDisposed(); + + using var binding = CreateReadWriteBinding(session, allowChannelPinning); + return operation.Execute(operationContext, binding); + } + + public async Task<TResult> ExecuteWriteOperationAsync<TResult>( + OperationContext operationContext, + IClientSessionHandle session, + IWriteOperation<TResult> operation, + bool allowChannelPinning) + { + Ensure.IsNotNull(operationContext, nameof(operationContext)); + Ensure.IsNotNull(session, nameof(session)); + Ensure.IsNotNull(operation, nameof(operation)); + ThrowIfDisposed(); + + using var binding = CreateReadWriteBinding(session, allowChannelPinning); + return await operation.ExecuteAsync(operationContext, binding).ConfigureAwait(false); } - public TResult ExecuteWriteOperation<TResult>(IWriteBinding binding, IWriteOperation<TResult> operation, CancellationToken cancellationToken) + public IClientSessionHandle StartImplicitSession() { - return operation.Execute(binding, cancellationToken); + ThrowIfDisposed(); + var options = new ClientSessionOptions { CausalConsistency = false, Snapshot = false }; + var coreSession = _client.GetClusterInternal().StartSession(options.ToCore(isImplicit: true)); + return new ClientSessionHandle(_client, options, coreSession); } - public async Task<TResult> ExecuteWriteOperationAsync<TResult>(IWriteBinding binding, IWriteOperation<TResult> operation, CancellationToken cancellationToken) + private IReadBindingHandle CreateReadBinding(IClientSessionHandle session, ReadPreference readPreference, bool allowChannelPinning) { - return await operation.ExecuteAsync(binding, cancellationToken).ConfigureAwait(false); + if (session.IsInTransaction && readPreference.ReadPreferenceMode != ReadPreferenceMode.Primary) + { + throw new InvalidOperationException("Read preference in a transaction must be primary."); + } + + if (allowChannelPinning) + { + return ChannelPinningHelper.CreateReadBinding(_client.GetClusterInternal(), session.WrappedCoreSession.Fork(), readPreference); + } + + var binding = new ReadPreferenceBinding(_client.GetClusterInternal(), readPreference, session.WrappedCoreSession.Fork()); + return new ReadBindingHandle(binding); } - public IClientSessionHandle StartImplicitSession(CancellationToken cancellationToken) + private IReadWriteBindingHandle CreateReadWriteBinding(IClientSessionHandle session, bool allowChannelPinning) { - return _client.StartImplicitSession(cancellationToken); + if (allowChannelPinning) + { + return ChannelPinningHelper.CreateReadWriteBinding(_client.GetClusterInternal(), session.WrappedCoreSession.Fork()); + } + + var binding = new WritableServerBinding(_client.GetClusterInternal(), session.WrappedCoreSession.Fork()); + return new ReadWriteBindingHandle(binding); } - public Task<IClientSessionHandle> StartImplicitSessionAsync(CancellationToken cancellationToken) + private void ThrowIfDisposed() { - return _client.StartImplicitSessionAsync(cancellationToken); + if (_isDisposed) + { + throw new ObjectDisposedException(nameof(OperationExecutor)); + } } } } diff --git a/src/MongoDB.Driver/ReadPreferenceResolver.cs b/src/MongoDB.Driver/ReadPreferenceResolver.cs deleted file mode 100644 index 6fbc9ed27f8..00000000000 --- a/src/MongoDB.Driver/ReadPreferenceResolver.cs +++ /dev/null @@ -1,42 +0,0 @@ -/* Copyright 2018-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -namespace MongoDB.Driver -{ - internal static class ReadPreferenceResolver - { - public static ReadPreference GetEffectiveReadPreference( - IClientSessionHandle session, - ReadPreference explicitReadPreference, - ReadPreference defaultReadPreference) - { - if (explicitReadPreference != null) - { - return explicitReadPreference; - } - - if (session.IsInTransaction) - { - var transactionReadPreference = session.WrappedCoreSession.CurrentTransaction.TransactionOptions.ReadPreference; - if (transactionReadPreference != null) - { - return transactionReadPreference; - } - } - - return defaultReadPreference ?? ReadPreference.Primary; - } - } -} diff --git a/src/MongoDB.Driver/RenameCollectionOptions.cs b/src/MongoDB.Driver/RenameCollectionOptions.cs index e319db23f4a..5fccf089ac7 100644 --- a/src/MongoDB.Driver/RenameCollectionOptions.cs +++ b/src/MongoDB.Driver/RenameCollectionOptions.cs @@ -13,6 +13,9 @@ * limitations under the License. */ +using System; +using MongoDB.Driver.Core.Misc; + namespace MongoDB.Driver { /// <summary> @@ -22,6 +25,7 @@ public class RenameCollectionOptions { // fields private bool? _dropTarget; + private TimeSpan? _timeout; // properties /// <summary> @@ -32,5 +36,15 @@ public bool? DropTarget get { return _dropTarget; } set { _dropTarget = value; } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } } diff --git a/src/MongoDB.Driver/ReplaceOptions.cs b/src/MongoDB.Driver/ReplaceOptions.cs index 78757fa355d..57d63b3b4d8 100644 --- a/src/MongoDB.Driver/ReplaceOptions.cs +++ b/src/MongoDB.Driver/ReplaceOptions.cs @@ -15,6 +15,7 @@ using System; using MongoDB.Bson; +using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver { @@ -49,7 +50,8 @@ internal static ReplaceOptions From(UpdateOptions updateOptions) Collation = updateOptions.Collation, Hint = updateOptions.Hint, IsUpsert = updateOptions.IsUpsert, - Let = updateOptions.Let + Let = updateOptions.Let, + Timeout = updateOptions.Timeout }; } } @@ -62,6 +64,7 @@ internal static ReplaceOptions From(UpdateOptions updateOptions) private BsonValue _hint; private bool _isUpsert; private BsonDocument _let; + private TimeSpan? _timeout; // properties /// <summary> @@ -117,6 +120,16 @@ public BsonDocument Let get { return _let; } set { _let = value; } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } /// <summary> diff --git a/src/MongoDB.Driver/Search/SearchAutocompleteTokenOrder.cs b/src/MongoDB.Driver/Search/SearchAutocompleteTokenOrder.cs index 780698e2d93..fbe8745c51c 100644 --- a/src/MongoDB.Driver/Search/SearchAutocompleteTokenOrder.cs +++ b/src/MongoDB.Driver/Search/SearchAutocompleteTokenOrder.cs @@ -24,7 +24,7 @@ public enum SearchAutocompleteTokenOrder /// Indicates that tokens in the query can appear in any order in the documents. /// </summary> Any, - + /// <summary> /// Indicates that tokens in the query must appear adjacent to each other or in the order /// specified in the query in the documents. diff --git a/src/MongoDB.Driver/Search/SearchDefinitionBuilder.cs b/src/MongoDB.Driver/Search/SearchDefinitionBuilder.cs index 21f51069093..2f813f65211 100644 --- a/src/MongoDB.Driver/Search/SearchDefinitionBuilder.cs +++ b/src/MongoDB.Driver/Search/SearchDefinitionBuilder.cs @@ -139,7 +139,7 @@ public SearchDefinition<TDocument> Equals<TField>( TField value, SearchScoreDefinition<TDocument> score = null) => Equals(new ExpressionFieldDefinition<TDocument, TField>(path), value, score); - + /// <summary> /// Creates a search definition that queries for documents where at least one element in an indexed array field is equal /// to the specified value. @@ -340,7 +340,7 @@ public SearchDefinition<TDocument> In<TField>( IEnumerable<TField> values, SearchScoreDefinition<TDocument> score = null) => In(new ExpressionFieldDefinition<TDocument>(path), values, score); - + /// <summary> /// Creates a search definition that queries for documents where the value of the field equals to any of specified values. /// </summary> @@ -639,7 +639,7 @@ public SearchDefinition<TDocument> Range<TField>( SearchScoreDefinition<TDocument> score = null) where TField : struct, IComparable<TField> => Range(new ExpressionFieldDefinition<TDocument>(path), range, score); - + /// <summary> /// Creates a search definition that queries for documents where a field is in the specified range. /// </summary> @@ -672,9 +672,9 @@ public SearchDefinition<TDocument> Range<TField>( path, new SearchRangeV2<TField>( range.Min.HasValue ? new(range.Min.Value, range.IsMinInclusive) : null, - range.Max.HasValue ? new(range.Max.Value, range.IsMaxInclusive) : null), + range.Max.HasValue ? new(range.Max.Value, range.IsMaxInclusive) : null), score); - + /// <summary> /// Creates a search definition that queries for documents where a field is in the specified range. /// </summary> @@ -688,7 +688,7 @@ public SearchDefinition<TDocument> Range<TField>( SearchRangeV2<TField> range, SearchScoreDefinition<TDocument> score = null) => Range(new ExpressionFieldDefinition<TDocument>(path), range, score); - + /// <summary> /// Creates a search definition that queries for documents where a field is in the specified range. /// </summary> @@ -702,7 +702,7 @@ public SearchDefinition<TDocument> Range<TField>( SearchRangeV2<TField> range, SearchScoreDefinition<TDocument> score = null) => Range(new ExpressionFieldDefinition<TDocument>(path), range, score); - + /// <summary> /// Creates a search definition that queries for documents where a field is in the specified range. /// </summary> diff --git a/src/MongoDB.Driver/Search/SearchPathDefinition.cs b/src/MongoDB.Driver/Search/SearchPathDefinition.cs index 7ef9058f6ef..1faa079d5d0 100644 --- a/src/MongoDB.Driver/Search/SearchPathDefinition.cs +++ b/src/MongoDB.Driver/Search/SearchPathDefinition.cs @@ -77,7 +77,7 @@ public static implicit operator SearchPathDefinition<TDocument>(List<FieldDefini new MultiSearchPathDefinition<TDocument>(fields); /// <summary> - /// Performs an implicit conversion from an array of field names to + /// Performs an implicit conversion from an array of field names to /// <see cref="SearchPathDefinition{TDocument}"/>. /// </summary> /// <param name="fieldNames">The array of field names.</param> @@ -88,7 +88,7 @@ public static implicit operator SearchPathDefinition<TDocument>(string[] fieldNa new MultiSearchPathDefinition<TDocument>(fieldNames.Select(fieldName => new StringFieldDefinition<TDocument>(fieldName))); /// <summary> - /// Performs an implicit conversion from an array of field names to + /// Performs an implicit conversion from an array of field names to /// <see cref="SearchPathDefinition{TDocument}"/>. /// </summary> /// <param name="fieldNames">The list of field names.</param> diff --git a/src/MongoDB.Driver/Search/SearchRangeV2.cs b/src/MongoDB.Driver/Search/SearchRangeV2.cs index ceb0cd14ef7..005f896de4f 100644 --- a/src/MongoDB.Driver/Search/SearchRangeV2.cs +++ b/src/MongoDB.Driver/Search/SearchRangeV2.cs @@ -16,7 +16,7 @@ namespace MongoDB.Driver.Search { /// <summary> - /// Object that specifies the boundaries for a range query. + /// Object that specifies the boundaries for a range query. /// </summary> /// <typeparam name="TValue">The type of the range value.</typeparam> public struct SearchRangeV2<TValue> @@ -43,7 +43,7 @@ public SearchRangeV2(Bound<TValue> min, Bound<TValue> max) /// <summary>Gets the lower bound of the range.</summary> public Bound<TValue> Min { get; } } - + /// <summary> /// Represents a bound value. /// </summary> @@ -54,7 +54,7 @@ public sealed class Bound<TValue> /// Gets the bound value. /// </summary> public TValue Value { get; } - + /// <summary> /// Gets whether the bound is inclusive or not. /// </summary> @@ -71,7 +71,7 @@ public Bound(TValue value, bool inclusive = false) Inclusive = inclusive; } } - + /// <summary> /// A builder for a SearchRangeV2. /// </summary> diff --git a/src/MongoDB.Driver/ServerVersion.cs b/src/MongoDB.Driver/ServerVersion.cs index b938a350d00..b7b56e419a5 100644 --- a/src/MongoDB.Driver/ServerVersion.cs +++ b/src/MongoDB.Driver/ServerVersion.cs @@ -152,7 +152,12 @@ public enum ServerVersion /// <summary> /// Server version 8.2. /// </summary> - Server82 + Server82, + + /// <summary> + /// Server version 8.3. + /// </summary> + Server83 // note: keep Server.cs and WireVersion.cs in sync as well as the extension methods below } @@ -189,7 +194,8 @@ public static ServerVersion ToServerVersion(this int wireVersion) WireVersion.Server80 => ServerVersion.Server80, WireVersion.Server81 => ServerVersion.Server81, WireVersion.Server82 => ServerVersion.Server82, - _ => throw new ArgumentException($"Invalid write version: {wireVersion}.", nameof(wireVersion)) + WireVersion.Server83 => ServerVersion.Server83, + _ => throw new ArgumentException($"Invalid wire version: {wireVersion}.", nameof(wireVersion)) }; } @@ -224,6 +230,7 @@ public static int ToWireVersion(this ServerVersion? serverVersion) ServerVersion.Server80 => WireVersion.Server80, ServerVersion.Server81 => WireVersion.Server81, ServerVersion.Server82 => WireVersion.Server82, + ServerVersion.Server83 => WireVersion.Server83, _ => throw new ArgumentException($"Invalid server version: {serverVersion}.", nameof(serverVersion)) }; } diff --git a/src/MongoDB.Driver/SortDefinition.cs b/src/MongoDB.Driver/SortDefinition.cs index b66bda799a9..a7d6a8bfaed 100644 --- a/src/MongoDB.Driver/SortDefinition.cs +++ b/src/MongoDB.Driver/SortDefinition.cs @@ -47,6 +47,9 @@ public abstract class SortDefinition<TDocument> /// <returns>A <see cref="BsonDocument"/>.</returns> public abstract BsonDocument Render(RenderArgs<TDocument> args); + // TODO: remove this and refactor Render to return a BsonValue in 4.0 + internal virtual BsonValue RenderAsBsonValue(RenderArgs<TDocument> args) => Render(args); + /// <summary> /// Performs an implicit conversion from <see cref="BsonDocument"/> to <see cref="SortDefinition{TDocument}"/>. /// </summary> diff --git a/src/MongoDB.Driver/SortDefinitionBuilder.cs b/src/MongoDB.Driver/SortDefinitionBuilder.cs index bec17d70ef3..a6c35841a9a 100644 --- a/src/MongoDB.Driver/SortDefinitionBuilder.cs +++ b/src/MongoDB.Driver/SortDefinitionBuilder.cs @@ -131,7 +131,18 @@ public static SortDefinition<TDocument> MetaTextScore<TDocument>(this SortDefini public sealed class SortDefinitionBuilder<TDocument> { /// <summary> - /// Creates an ascending sort. + /// Creates an ascending sort on a value rather than on a field of a document. For example, "$sort : 1". + /// This is used when sorting primitive values like strings or numbers, but can also be used to sort whole documents. + /// </summary> + /// <returns>A value ascending sort.</returns> + public SortDefinition<TDocument> Ascending() + { + return new ValueDirectionalSortDefinition<TDocument>(SortDirection.Ascending); + } + + /// <summary> + /// Creates an ascending sort based on a specific field within the document. For example, "$sort : { field : 1 }". + /// This is used when values are documents, and you want to sort by a particular field's value. /// </summary> /// <param name="field">The field.</param> /// <returns>An ascending sort.</returns> @@ -141,7 +152,8 @@ public SortDefinition<TDocument> Ascending(FieldDefinition<TDocument> field) } /// <summary> - /// Creates an ascending sort. + /// Creates an ascending sort based on a specific field within the document. For example, "$sort : { field : 1 }". + /// This is used when values are documents, and you want to sort by a particular field's value. /// </summary> /// <param name="field">The field.</param> /// <returns>An ascending sort.</returns> @@ -171,7 +183,18 @@ public SortDefinition<TDocument> Combine(IEnumerable<SortDefinition<TDocument>> } /// <summary> - /// Creates a descending sort. + /// Creates a descending sort on a value rather than on a field of a document. For example, "$sort : -1". + /// This is used when sorting primitive values like strings or numbers, but can also be used to sort whole documents. + /// </summary> + /// <returns>A value descending sort.</returns> + public SortDefinition<TDocument> Descending() + { + return new ValueDirectionalSortDefinition<TDocument>(SortDirection.Descending); + } + + /// <summary> + /// Creates a descending sort based on a specific field within the document. For example, "$sort: { field: -1 }". + /// This is used when values are documents, and you want to sort by a particular field's value. /// </summary> /// <param name="field">The field.</param> /// <returns>A descending sort.</returns> @@ -181,7 +204,8 @@ public SortDefinition<TDocument> Descending(FieldDefinition<TDocument> field) } /// <summary> - /// Creates a descending sort. + /// Creates a descending sort based on a specific field within the document. For example, "$sort: { field: -1 }". + /// This is used when values are documents, and you want to sort by a particular field's value. /// </summary> /// <param name="field">The field.</param> /// <returns>A descending sort.</returns> @@ -232,6 +256,11 @@ internal sealed class CombinedSortDefinition<TDocument> : SortDefinition<TDocume public CombinedSortDefinition(IEnumerable<SortDefinition<TDocument>> sorts) { _sorts = Ensure.IsNotNull(sorts, nameof(sorts)).ToList(); + + if (_sorts.Any(sort => sort is ValueDirectionalSortDefinition<TDocument>)) + { + throw new InvalidOperationException("Value-based sort cannot be combined with other sorts. When sorting by the entire element value, no other sorting criteria can be applied."); + } } public override BsonDocument Render(RenderArgs<TDocument> args) @@ -272,20 +301,25 @@ public override BsonDocument Render(RenderArgs<TDocument> args) { var renderedField = _field.Render(args); - BsonValue value; - switch (_direction) - { - case SortDirection.Ascending: - value = 1; - break; - case SortDirection.Descending: - value = -1; - break; - default: - throw new InvalidOperationException("Unknown value for " + typeof(SortDirection) + "."); - } + return new BsonDocument(renderedField.FieldName, _direction.Render()); + } + } - return new BsonDocument(renderedField.FieldName, value); + internal sealed class ValueDirectionalSortDefinition<TDocument> : SortDefinition<TDocument> + { + private readonly SortDirection _direction; + + public ValueDirectionalSortDefinition(SortDirection direction) + { + _direction = direction; + } + + public override BsonDocument Render(RenderArgs<TDocument> args) + { + throw new InvalidOperationException( + "Value-based sort cannot be rendered as a document. You might be trying to use a value-based sort where a field-based sort is expected."); } + + internal override BsonValue RenderAsBsonValue(RenderArgs<TDocument> args) => _direction.Render(); } } diff --git a/src/MongoDB.Driver/SortDirectionExtensions.cs b/src/MongoDB.Driver/SortDirectionExtensions.cs new file mode 100644 index 00000000000..c6b43d4ddc0 --- /dev/null +++ b/src/MongoDB.Driver/SortDirectionExtensions.cs @@ -0,0 +1,31 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using MongoDB.Bson; + +namespace MongoDB.Driver +{ + internal static class SortDirectionExtensions + { + internal static BsonValue Render(this SortDirection direction) => + direction switch + { + SortDirection.Ascending => 1, + SortDirection.Descending => -1, + _ => throw new InvalidOperationException($"Invalid sort direction: {direction}.") + }; + } +} \ No newline at end of file diff --git a/src/MongoDB.Driver/TransactionExecutor.cs b/src/MongoDB.Driver/TransactionExecutor.cs index cb4e5daf9a8..32f94861a2e 100644 --- a/src/MongoDB.Driver/TransactionExecutor.cs +++ b/src/MongoDB.Driver/TransactionExecutor.cs @@ -1,4 +1,4 @@ -/* Copyright 2019-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,6 @@ using System.Threading.Tasks; using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Support; namespace MongoDB.Driver { @@ -27,7 +26,6 @@ internal static class TransactionExecutor // constants private const string TransientTransactionErrorLabel = "TransientTransactionError"; private const string UnknownTransactionCommitResultLabel = "UnknownTransactionCommitResult"; - private const int MaxTimeMSExpiredErrorCode = 50; private static readonly TimeSpan __transactionTimeout = TimeSpan.FromSeconds(120); public static TResult ExecuteWithRetries<TResult>( @@ -37,13 +35,15 @@ public static TResult ExecuteWithRetries<TResult>( IClock clock, CancellationToken cancellationToken) { - var startTime = clock.UtcNow; + var transactionTimeout = transactionOptions?.Timeout ?? clientSession.Options.DefaultTransactionOptions?.Timeout; + using var operationContext = new OperationContext(clock, transactionTimeout, cancellationToken); while (true) { clientSession.StartTransaction(transactionOptions); + clientSession.WrappedCoreSession.CurrentTransaction.OperationContext = operationContext; - var callbackOutcome = ExecuteCallback(clientSession, callback, startTime, clock, cancellationToken); + var callbackOutcome = ExecuteCallback(operationContext, clientSession, callback, cancellationToken); if (callbackOutcome.ShouldRetryTransaction) { continue; @@ -53,7 +53,7 @@ public static TResult ExecuteWithRetries<TResult>( return callbackOutcome.Result; // assume callback intentionally ended the transaction } - var transactionHasBeenCommitted = CommitWithRetries(clientSession, startTime, clock, cancellationToken); + var transactionHasBeenCommitted = CommitWithRetries(operationContext, clientSession, cancellationToken); if (transactionHasBeenCommitted) { return callbackOutcome.Result; @@ -68,12 +68,15 @@ public static async Task<TResult> ExecuteWithRetriesAsync<TResult>( IClock clock, CancellationToken cancellationToken) { - var startTime = clock.UtcNow; + TimeSpan? transactionTimeout = transactionOptions?.Timeout ?? clientSession.Options.DefaultTransactionOptions?.Timeout; + using var operationContext = new OperationContext(clock, transactionTimeout, cancellationToken); + while (true) { clientSession.StartTransaction(transactionOptions); + clientSession.WrappedCoreSession.CurrentTransaction.OperationContext = operationContext; - var callbackOutcome = await ExecuteCallbackAsync(clientSession, callbackAsync, startTime, clock, cancellationToken).ConfigureAwait(false); + var callbackOutcome = await ExecuteCallbackAsync(operationContext, clientSession, callbackAsync, cancellationToken).ConfigureAwait(false); if (callbackOutcome.ShouldRetryTransaction) { continue; @@ -83,7 +86,7 @@ public static async Task<TResult> ExecuteWithRetriesAsync<TResult>( return callbackOutcome.Result; // assume callback intentionally ended the transaction } - var transactionHasBeenCommitted = await CommitWithRetriesAsync(clientSession, startTime, clock, cancellationToken).ConfigureAwait(false); + var transactionHasBeenCommitted = await CommitWithRetriesAsync(operationContext, clientSession, cancellationToken).ConfigureAwait(false); if (transactionHasBeenCommitted) { return callbackOutcome.Result; @@ -91,12 +94,13 @@ public static async Task<TResult> ExecuteWithRetriesAsync<TResult>( } } - private static bool HasTimedOut(DateTime startTime, DateTime currentTime) + private static bool HasTimedOut(OperationContext operationContext) { - return (currentTime - startTime) >= __transactionTimeout; + return operationContext.IsTimedOut() || + (operationContext.RootContext.Timeout == null && operationContext.RootContext.Elapsed > __transactionTimeout); } - private static CallbackOutcome<TResult> ExecuteCallback<TResult>(IClientSessionHandle clientSession, Func<IClientSessionHandle, CancellationToken, TResult> callback, DateTime startTime, IClock clock, CancellationToken cancellationToken) + private static CallbackOutcome<TResult> ExecuteCallback<TResult>(OperationContext operationContext, IClientSessionHandle clientSession, Func<IClientSessionHandle, CancellationToken, TResult> callback, CancellationToken cancellationToken) { try { @@ -107,10 +111,16 @@ private static CallbackOutcome<TResult> ExecuteCallback<TResult>(IClientSessionH { if (IsTransactionInStartingOrInProgressState(clientSession)) { - clientSession.AbortTransaction(cancellationToken); + AbortTransactionOptions abortOptions = null; + if (operationContext.IsRootContextTimeoutConfigured()) + { + abortOptions = new AbortTransactionOptions(operationContext.RootContext.Timeout); + } + + clientSession.AbortTransaction(abortOptions, cancellationToken); } - if (HasErrorLabel(ex, TransientTransactionErrorLabel) && !HasTimedOut(startTime, clock.UtcNow)) + if (HasErrorLabel(ex, TransientTransactionErrorLabel) && !HasTimedOut(operationContext)) { return new CallbackOutcome<TResult>.WithShouldRetryTransaction(); } @@ -119,7 +129,7 @@ private static CallbackOutcome<TResult> ExecuteCallback<TResult>(IClientSessionH } } - private static async Task<CallbackOutcome<TResult>> ExecuteCallbackAsync<TResult>(IClientSessionHandle clientSession, Func<IClientSessionHandle, CancellationToken, Task<TResult>> callbackAsync, DateTime startTime, IClock clock, CancellationToken cancellationToken) + private static async Task<CallbackOutcome<TResult>> ExecuteCallbackAsync<TResult>(OperationContext operationContext, IClientSessionHandle clientSession, Func<IClientSessionHandle, CancellationToken, Task<TResult>> callbackAsync, CancellationToken cancellationToken) { try { @@ -130,10 +140,16 @@ private static async Task<CallbackOutcome<TResult>> ExecuteCallbackAsync<TResult { if (IsTransactionInStartingOrInProgressState(clientSession)) { - await clientSession.AbortTransactionAsync(cancellationToken).ConfigureAwait(false); + AbortTransactionOptions abortOptions = null; + if (operationContext.IsRootContextTimeoutConfigured()) + { + abortOptions = new AbortTransactionOptions(operationContext.RootContext.Timeout); + } + + await clientSession.AbortTransactionAsync(abortOptions, cancellationToken).ConfigureAwait(false); } - if (HasErrorLabel(ex, TransientTransactionErrorLabel) && !HasTimedOut(startTime, clock.UtcNow)) + if (HasErrorLabel(ex, TransientTransactionErrorLabel) && !HasTimedOut(operationContext)) { return new CallbackOutcome<TResult>.WithShouldRetryTransaction(); } @@ -142,24 +158,29 @@ private static async Task<CallbackOutcome<TResult>> ExecuteCallbackAsync<TResult } } - private static bool CommitWithRetries(IClientSessionHandle clientSession, DateTime startTime, IClock clock, CancellationToken cancellationToken) + private static bool CommitWithRetries(OperationContext operationContext, IClientSessionHandle clientSession, CancellationToken cancellationToken) { while (true) { try { - clientSession.CommitTransaction(cancellationToken); + CommitTransactionOptions commitOptions = null; + if (operationContext.IsRootContextTimeoutConfigured()) + { + commitOptions = new CommitTransactionOptions(operationContext.RemainingTimeout); + } + + clientSession.CommitTransaction(commitOptions, cancellationToken); return true; } catch (Exception ex) { - var now = clock.UtcNow; // call UtcNow once since we need to facilitate predictable mocking - if (ShouldRetryCommit(ex, startTime, now)) + if (ShouldRetryCommit(operationContext, ex)) { continue; } - if (HasErrorLabel(ex, TransientTransactionErrorLabel) && !HasTimedOut(startTime, now)) + if (HasErrorLabel(ex, TransientTransactionErrorLabel) && !HasTimedOut(operationContext)) { return false; // the transaction will be retried } @@ -169,24 +190,29 @@ private static bool CommitWithRetries(IClientSessionHandle clientSession, DateTi } } - private static async Task<bool> CommitWithRetriesAsync(IClientSessionHandle clientSession, DateTime startTime, IClock clock, CancellationToken cancellationToken) + private static async Task<bool> CommitWithRetriesAsync(OperationContext operationContext, IClientSessionHandle clientSession, CancellationToken cancellationToken) { while (true) { try { - await clientSession.CommitTransactionAsync(cancellationToken).ConfigureAwait(false); + CommitTransactionOptions commitOptions = null; + if (operationContext.IsRootContextTimeoutConfigured()) + { + commitOptions = new CommitTransactionOptions(operationContext.RemainingTimeout); + } + + await clientSession.CommitTransactionAsync(commitOptions, cancellationToken).ConfigureAwait(false); return true; } catch (Exception ex) { - var now = clock.UtcNow; // call UtcNow once since we need to facilitate predictable mocking - if (ShouldRetryCommit(ex, startTime, now)) + if (ShouldRetryCommit(operationContext, ex)) { continue; } - if (HasErrorLabel(ex, TransientTransactionErrorLabel) && !HasTimedOut(startTime, now)) + if (HasErrorLabel(ex, TransientTransactionErrorLabel) && !HasTimedOut(operationContext)) { return false; // the transaction will be retried } @@ -211,7 +237,7 @@ private static bool HasErrorLabel(Exception ex, string errorLabel) private static bool IsMaxTimeMSExpiredException(Exception ex) { if (ex is MongoExecutionTimeoutException timeoutException && - timeoutException.Code == MaxTimeMSExpiredErrorCode) + timeoutException.Code == (int)ServerErrorCode.MaxTimeMSExpired) { return true; } @@ -222,7 +248,7 @@ private static bool IsMaxTimeMSExpiredException(Exception ex) if (writeConcernError != null) { var code = writeConcernError.GetValue("code", -1).ToInt32(); - if (code == MaxTimeMSExpiredErrorCode) + if (code == (int)ServerErrorCode.MaxTimeMSExpired) { return true; } @@ -246,11 +272,11 @@ private static bool IsTransactionInStartingOrInProgressState(IClientSessionHandl } } - private static bool ShouldRetryCommit(Exception ex, DateTime startTime, DateTime now) + private static bool ShouldRetryCommit(OperationContext operationContext, Exception ex) { return HasErrorLabel(ex, UnknownTransactionCommitResultLabel) && - !HasTimedOut(startTime, now) && + !HasTimedOut(operationContext) && !IsMaxTimeMSExpiredException(ex); } diff --git a/src/MongoDB.Driver/UpdateDefinitionBuilder.cs b/src/MongoDB.Driver/UpdateDefinitionBuilder.cs index be95f9a306c..9f2bac9d8ff 100644 --- a/src/MongoDB.Driver/UpdateDefinitionBuilder.cs +++ b/src/MongoDB.Driver/UpdateDefinitionBuilder.cs @@ -1685,7 +1685,7 @@ public override BsonValue Render(RenderArgs<TDocument> args) if (_sort != null) { - document["$push"][renderedField.FieldName]["$sort"] = _sort.Render(args.WithNewDocumentType((IBsonSerializer<TItem>)itemSerializer)); + document["$push"][renderedField.FieldName]["$sort"] = _sort.RenderAsBsonValue(args.WithNewDocumentType((IBsonSerializer<TItem>)itemSerializer)); } return document; diff --git a/src/MongoDB.Driver/UpdateOptions.cs b/src/MongoDB.Driver/UpdateOptions.cs index 165a81fb00b..6b849bc624c 100644 --- a/src/MongoDB.Driver/UpdateOptions.cs +++ b/src/MongoDB.Driver/UpdateOptions.cs @@ -13,8 +13,10 @@ * limitations under the License. */ +using System; using System.Collections.Generic; using MongoDB.Bson; +using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver { @@ -31,6 +33,7 @@ public class UpdateOptions private BsonValue _hint; private bool _isUpsert; private BsonDocument _let; + private TimeSpan? _timeout; // properties /// <summary> @@ -98,6 +101,16 @@ public BsonDocument Let get { return _let; } set { _let = value; } } + + /// <summary> + /// Gets or sets the operation timeout. + /// </summary> + // TODO: CSOT: Make it public when CSOT will be ready for GA + internal TimeSpan? Timeout + { + get => _timeout; + set => _timeout = Ensure.IsNullOrValidTimeout(value, nameof(Timeout)); + } } /// <summary> diff --git a/src/MongoDB.Driver/WriteModel.cs b/src/MongoDB.Driver/WriteModel.cs index c517899d304..ea1ed187205 100644 --- a/src/MongoDB.Driver/WriteModel.cs +++ b/src/MongoDB.Driver/WriteModel.cs @@ -32,7 +32,7 @@ public abstract class WriteModel<TDocument> // API, so there is type safety in how they got allowed // into the system, meaning that even though // some things below seem unsafe, they are in a roundabout - // way. In addition, we know that there will always + // way. In addition, we know that there will always // be one level of BsonDocumentWrapper for everything, even // when the type is already a BsonDocument :(. diff --git a/src/MongoDB.Shared/Hasher.cs b/src/MongoDB.Shared/Hasher.cs index 580a0621d28..7543095d80f 100644 --- a/src/MongoDB.Shared/Hasher.cs +++ b/src/MongoDB.Shared/Hasher.cs @@ -21,7 +21,7 @@ namespace MongoDB.Shared { internal class Hasher { - #region static + #region static // public static methods [MethodImpl(MethodImplOptions.AggressiveInlining)] diff --git a/tests/AstrolabeWorkloadExecutor/AstrolabeWorkloadExecutor.csproj b/tests/AstrolabeWorkloadExecutor/AstrolabeWorkloadExecutor.csproj index cd2dfe8d18c..da78dc4945f 100644 --- a/tests/AstrolabeWorkloadExecutor/AstrolabeWorkloadExecutor.csproj +++ b/tests/AstrolabeWorkloadExecutor/AstrolabeWorkloadExecutor.csproj @@ -17,10 +17,6 @@ <Description>Astrolabe workload executor.</Description> </PropertyGroup> - <ItemGroup> - <PackageReference Include="Microsoft.NETFramework.ReferenceAssemblies" PrivateAssets="All" Version="1.0.0" /> - </ItemGroup> - <ItemGroup> <ProjectReference Include="../MongoDB.Driver.Tests/MongoDB.Driver.Tests.csproj" /> </ItemGroup> diff --git a/tests/AtlasConnectivity.Tests/AtlasConnectivity.Tests.csproj b/tests/AtlasConnectivity.Tests/AtlasConnectivity.Tests.csproj index 46c5d10a98d..f5ef150c64b 100644 --- a/tests/AtlasConnectivity.Tests/AtlasConnectivity.Tests.csproj +++ b/tests/AtlasConnectivity.Tests/AtlasConnectivity.Tests.csproj @@ -11,10 +11,6 @@ <Description>Atlas connectivity tests.</Description> </PropertyGroup> - <ItemGroup> - <PackageReference Include="Microsoft.NETFramework.ReferenceAssemblies" PrivateAssets="All" Version="1.0.0" /> - </ItemGroup> - <PropertyGroup> <NoWarn> 1701;1702; <!--https://github.com/dotnet/roslyn/issues/19640--> diff --git a/tests/AtlasConnectivity.Tests/ConnectivityTests.cs b/tests/AtlasConnectivity.Tests/ConnectivityTests.cs index b5729dd8d4c..d778f83249c 100644 --- a/tests/AtlasConnectivity.Tests/ConnectivityTests.cs +++ b/tests/AtlasConnectivity.Tests/ConnectivityTests.cs @@ -23,6 +23,7 @@ namespace AtlasConnectivity.Tests { + [Trait("Category", "Integration")] public class ConnectivityTests : LoggableTestClass { // public constructors @@ -38,13 +39,11 @@ public ConnectivityTests(ITestOutputHelper testOutputHelper) [InlineData("ATLAS_FREE")] [InlineData("ATLAS_TLS11")] [InlineData("ATLAS_TLS12")] - [InlineData("ATLAS_SERVERLESS")] [InlineData("ATLAS_SRV_REPL")] [InlineData("ATLAS_SRV_SHRD")] [InlineData("ATLAS_SRV_FREE")] [InlineData("ATLAS_SRV_TLS11")] [InlineData("ATLAS_SRV_TLS12")] - [InlineData("ATLAS_SRV_SERVERLESS")] public void Connection_to_Atlas_should_work(string environmentVariableName) { var connectionString = Environment.GetEnvironmentVariable(environmentVariableName); diff --git a/tests/BuildProps/Tests.Build.props b/tests/BuildProps/Tests.Build.props index a14112ae334..20c57ed0aba 100644 --- a/tests/BuildProps/Tests.Build.props +++ b/tests/BuildProps/Tests.Build.props @@ -43,6 +43,7 @@ <ItemGroup> <PackageReference Include="FluentAssertions" Version="4.12.0" /> <PackageReference Include="Microsoft.CodeAnalysis.FxCopAnalyzers" Version="2.6.2" /> + <PackageReference Include="Microsoft.NETFramework.ReferenceAssemblies" PrivateAssets="All" Version="1.0.0" /> <PackageReference Include="Moq" Version="4.9.0" /> <PackageReference Include="xunit" Version="2.4.2" /> <PackageReference Include="JunitXml.TestLogger" Version="2.1.81" /> diff --git a/tests/MongoDB.Bson.TestHelpers/MongoDB.Bson.TestHelpers.csproj b/tests/MongoDB.Bson.TestHelpers/MongoDB.Bson.TestHelpers.csproj index 94f37f02a4b..e37dbff7c1b 100644 --- a/tests/MongoDB.Bson.TestHelpers/MongoDB.Bson.TestHelpers.csproj +++ b/tests/MongoDB.Bson.TestHelpers/MongoDB.Bson.TestHelpers.csproj @@ -2,6 +2,7 @@ <Import Project="..\BuildProps\Tests.Build.props" /> <PropertyGroup> + <IsTestProject>false</IsTestProject> <CodeAnalysisRuleSet>..\..\MongoDBLegacyTest.ruleset</CodeAnalysisRuleSet> </PropertyGroup> @@ -11,10 +12,6 @@ <Description>Helper classes applicable to test projects that reference MongoDB.Bson.</Description> </PropertyGroup> - <ItemGroup> - <PackageReference Include="Microsoft.NETFramework.ReferenceAssemblies" PrivateAssets="All" Version="1.0.0" /> - </ItemGroup> - <ItemGroup> <ProjectReference Include="..\..\src\MongoDB.Bson\MongoDB.Bson.csproj" /> <ProjectReference Include="..\MongoDB.TestHelpers\MongoDB.TestHelpers.csproj" /> diff --git a/tests/MongoDB.Bson.TestHelpers/Threading/ThreadPerTaskScheduler.cs b/tests/MongoDB.Bson.TestHelpers/Threading/ThreadPerTaskScheduler.cs index 001c0fe3a67..707122c0785 100644 --- a/tests/MongoDB.Bson.TestHelpers/Threading/ThreadPerTaskScheduler.cs +++ b/tests/MongoDB.Bson.TestHelpers/Threading/ThreadPerTaskScheduler.cs @@ -23,21 +23,21 @@ namespace MongoDB.Bson.TestHelpers // https://github.com/dotnet/samples/blob/main/csharp/parallel/ParallelExtensionsExtras/TaskSchedulers/ThreadPerTaskkScheduler.cs public sealed class ThreadPerTaskScheduler : TaskScheduler { - /// <summary>Gets the tasks currently scheduled to this scheduler.</summary> - /// <remarks>This will always return an empty enumerable, as tasks are launched as soon as they're queued.</remarks> + /// <summary>Gets the tasks currently scheduled to this scheduler.</summary> + /// <remarks>This will always return an empty enumerable, as tasks are launched as soon as they're queued.</remarks> protected override IEnumerable<Task> GetScheduledTasks() { return Enumerable.Empty<Task>(); } - /// <summary>Starts a new thread to process the provided task.</summary> - /// <param name="task">The task to be executed.</param> + /// <summary>Starts a new thread to process the provided task.</summary> + /// <param name="task">The task to be executed.</param> protected override void QueueTask(Task task) { new Thread(() => TryExecuteTask(task)) { IsBackground = true }.Start(); } - /// <summary>Runs the provided task on the current thread.</summary> - /// <param name="task">The task to be executed.</param> - /// <param name="taskWasPreviouslyQueued">Ignored.</param> - /// <returns>Whether the task could be executed on the current thread.</returns> + /// <summary>Runs the provided task on the current thread.</summary> + /// <param name="task">The task to be executed.</param> + /// <param name="taskWasPreviouslyQueued">Ignored.</param> + /// <returns>Whether the task could be executed on the current thread.</returns> protected override bool TryExecuteTaskInline(Task task, bool taskWasPreviouslyQueued) { return TryExecuteTask(task); diff --git a/tests/MongoDB.Bson.TestHelpers/Threading/ThreadingUtilities.cs b/tests/MongoDB.Bson.TestHelpers/Threading/ThreadingUtilities.cs index b0b97b6b809..a7cd44e5ce4 100644 --- a/tests/MongoDB.Bson.TestHelpers/Threading/ThreadingUtilities.cs +++ b/tests/MongoDB.Bson.TestHelpers/Threading/ThreadingUtilities.cs @@ -29,7 +29,7 @@ public static void ExecuteOnNewThreads(int threadsCount, Action<int> action, int if (exceptions.Any()) { - throw exceptions.First(); + throw new AggregateException(exceptions); } } diff --git a/tests/MongoDB.Bson.Tests/IO/BinaryPrimitivesCompatTests.cs b/tests/MongoDB.Bson.Tests/IO/BinaryPrimitivesCompatTests.cs new file mode 100644 index 00000000000..50500f314d3 --- /dev/null +++ b/tests/MongoDB.Bson.Tests/IO/BinaryPrimitivesCompatTests.cs @@ -0,0 +1,89 @@ +/* Copyright 2010-present MongoDB Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +using System; +using Xunit; +using FluentAssertions; +using MongoDB.Bson.IO; + +namespace MongoDB.Bson.Tests.IO +{ + public class BinaryPrimitivesCompatTests + { + [Fact] + public void ReadSingleLittleEndian_should_read_correctly() + { + var bytes = new byte[] { 0x00, 0x00, 0x80, 0x3F }; // 1.0f in little endian + var result = BinaryPrimitivesCompat.ReadSingleLittleEndian(bytes); + result.Should().Be(1.0f); + } + + [Fact] + public void ReadSingleLittleEndian_should_throw_on_insufficient_length() + { + var shortBuffer = new byte[3]; + var exception = Record.Exception(() => + BinaryPrimitivesCompat.ReadSingleLittleEndian(shortBuffer)); + + var e = exception.Should().BeOfType<ArgumentOutOfRangeException>().Subject; + e.ParamName.Should().Be("Length"); + } + + [Fact] + public void WriteSingleLittleEndian_should_throw_on_insufficient_length() + { + var shortBuffer = new byte[3]; + var exception = Record.Exception(() => + BinaryPrimitivesCompat.WriteSingleLittleEndian(shortBuffer, 1.23f)); + + var e = exception.Should().BeOfType<ArgumentOutOfRangeException>().Subject; + e.ParamName.Should().Be("Length"); + } + + [Fact] + public void WriteSingleLittleEndian_should_write_correctly() + { + Span<byte> buffer = new byte[4]; + BinaryPrimitivesCompat.WriteSingleLittleEndian(buffer, 1.0f); + buffer.ToArray().Should().Equal(0x00, 0x00, 0x80, 0x3F); // 1.0f little-endian + } + + [Theory] + [InlineData(0f)] + [InlineData(1.0f)] + [InlineData(-1.5f)] + [InlineData(float.MaxValue)] + [InlineData(float.MinValue)] + [InlineData(float.NaN)] + [InlineData(float.PositiveInfinity)] + [InlineData(float.NegativeInfinity)] + public void WriteAndReadSingleLittleEndian_should_roundtrip_correctly(float value) + { + Span<byte> buffer = new byte[4]; + + BinaryPrimitivesCompat.WriteSingleLittleEndian(buffer, value); + float result = BinaryPrimitivesCompat.ReadSingleLittleEndian(buffer); + + if (float.IsNaN(value)) + { + Assert.True(float.IsNaN(result)); + } + else + { + Assert.Equal(value, result); + } + } + } +} diff --git a/tests/MongoDB.Bson.Tests/Jira/CSharp515Tests.cs b/tests/MongoDB.Bson.Tests/Jira/CSharp515Tests.cs index 27de1f7abaa..04350931f01 100644 --- a/tests/MongoDB.Bson.Tests/Jira/CSharp515Tests.cs +++ b/tests/MongoDB.Bson.Tests/Jira/CSharp515Tests.cs @@ -45,7 +45,7 @@ public S(IList<T> list) } private static readonly string __discriminatorAssemblyName = "MongoDB.Bson.Tests"; - private string _jsonTemplate = ("{ '_id' : 1, 'R' : #V, 'S' : #V, 'RS' : { '_t' : 'S`1', '_v' : #V }, 'OR' : { '_t' : 'System.Collections.ObjectModel.ReadOnlyCollection`1[System.Int32]', '_v' : #V }, 'OS' : { '_t' : 'MongoDB.Bson.Tests.Jira.CSharp515.CSharp515Tests+S`1[System.Int32], " + __discriminatorAssemblyName + "', '_v' : #V } }").Replace("'", "\""); + private string _jsonTemplate = ("{ '_id' : 1, 'R' : #V, 'S' : #V, 'RS' : { '_t' : 'S<Int32>', '_v' : #V }, 'OR' : { '_t' : 'System.Collections.ObjectModel.ReadOnlyCollection`1[System.Int32]', '_v' : #V }, 'OS' : { '_t' : 'MongoDB.Bson.Tests.Jira.CSharp515.CSharp515Tests+S`1[System.Int32], " + __discriminatorAssemblyName + "', '_v' : #V } }").Replace("'", "\""); [Fact] public void TestNull() diff --git a/tests/MongoDB.Bson.Tests/MongoDB.Bson.Tests.csproj b/tests/MongoDB.Bson.Tests/MongoDB.Bson.Tests.csproj index bdf1a1d2d06..ee5cfeb07f8 100644 --- a/tests/MongoDB.Bson.Tests/MongoDB.Bson.Tests.csproj +++ b/tests/MongoDB.Bson.Tests/MongoDB.Bson.Tests.csproj @@ -11,10 +11,6 @@ <Description>MongoDB.Bson tests.</Description> </PropertyGroup> - <ItemGroup> - <PackageReference Include="Microsoft.NETFramework.ReferenceAssemblies" PrivateAssets="All" Version="1.0.0" /> - </ItemGroup> - <PropertyGroup> <NoWarn> 1701;1702; <!--https://github.com/dotnet/roslyn/issues/19640--> @@ -31,10 +27,10 @@ </NoWarn> </PropertyGroup> - <ItemGroup> + <ItemGroup Condition="'$(TargetFramework)' == 'net472'"> <Reference Include="Microsoft.CSharp" /> </ItemGroup> - + <ItemGroup> <PackageReference Include="xunit.runner.visualstudio" Version="2.4.0" /> <PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.13.0" /> diff --git a/tests/MongoDB.Bson.Tests/Serialization/BsonSerializerTests.cs b/tests/MongoDB.Bson.Tests/Serialization/BsonSerializerTests.cs index 4be63a8fffc..fdb58fcba24 100644 --- a/tests/MongoDB.Bson.Tests/Serialization/BsonSerializerTests.cs +++ b/tests/MongoDB.Bson.Tests/Serialization/BsonSerializerTests.cs @@ -135,7 +135,7 @@ public void TestSerializeOrder() Assert.True(bson.SequenceEqual(rehydrated.ToBson())); } - public class InventoryItem + public class InventoryItem : ISupportInitialize { public int Price { get; set; } diff --git a/tests/MongoDB.Bson.Tests/Serialization/Conventions/StandardDiscriminatorConventionTests.cs b/tests/MongoDB.Bson.Tests/Serialization/Conventions/StandardDiscriminatorConventionTests.cs index f1d633bd724..25fabd6b479 100644 --- a/tests/MongoDB.Bson.Tests/Serialization/Conventions/StandardDiscriminatorConventionTests.cs +++ b/tests/MongoDB.Bson.Tests/Serialization/Conventions/StandardDiscriminatorConventionTests.cs @@ -31,7 +31,7 @@ public void TestConstructorThrowsWhenElementNameContainsNulls() [Fact] public void TestConstructorThrowsWhenElementNameIsNull() { - Assert.Throws<ArgumentNullException>(() => new ScalarDiscriminatorConvention(null)); + Assert.Throws<ArgumentException>(() => new ScalarDiscriminatorConvention(null)); } [Fact] diff --git a/tests/MongoDB.Bson.Tests/Serialization/Serializers/BinaryVectorSerializerTests.cs b/tests/MongoDB.Bson.Tests/Serialization/Serializers/BinaryVectorSerializerTests.cs index 4394c626c76..0335d4efa50 100644 --- a/tests/MongoDB.Bson.Tests/Serialization/Serializers/BinaryVectorSerializerTests.cs +++ b/tests/MongoDB.Bson.Tests/Serialization/Serializers/BinaryVectorSerializerTests.cs @@ -365,10 +365,16 @@ private BsonBinaryData SerializeToBinaryData<TCollection>(TCollection collection private static (T[], byte[] VectorBson) GetTestData<T>(BinaryVectorDataType dataType, int elementsCount, byte bitsPadding) where T : struct { - var elementsSpan = new ReadOnlySpan<T>(Enumerable.Range(0, elementsCount).Select(i => Convert.ChangeType(i, typeof(T)).As<T>()).ToArray()); - byte[] vectorBsonData = [(byte)dataType, bitsPadding, .. MemoryMarshal.Cast<T, byte>(elementsSpan)]; - - return (elementsSpan.ToArray(), vectorBsonData); + var elementsSpan = new ReadOnlySpan<T>( + Enumerable.Range(0, elementsCount) + .Select(i => Convert.ChangeType(i, typeof(T)).As<T>()) + .ToArray()); + var elementsBytesLittleEndian = BitConverter.IsLittleEndian + ? MemoryMarshal.Cast<T, byte>(elementsSpan) + : BigEndianToLittleEndian(elementsSpan, dataType); + + byte[] vectorBsonData = [(byte)dataType, bitsPadding, .. elementsBytesLittleEndian]; + return (elementsSpan.ToArray(), vectorBsonData); } private static (BinaryVector<T>, byte[] VectorBson) GetTestDataBinaryVector<T>(BinaryVectorDataType dataType, int elementsCount, byte bitsPadding) @@ -409,10 +415,31 @@ private static IBsonSerializer CreateBinaryVectorSerializer<T>(BinaryVectorDataT return serializer; } + private static byte[] BigEndianToLittleEndian<T>(ReadOnlySpan<T> span, BinaryVectorDataType dataType) where T : struct + { + // Types that do NOT need conversion safe on BE + if (dataType == BinaryVectorDataType.Int8 || dataType == BinaryVectorDataType.PackedBit) + { + return MemoryMarshal.Cast<T, byte>(span).ToArray(); + } + + var elementSize = Marshal.SizeOf<T>(); + byte[] result = new byte[span.Length * elementSize]; + + for (int i = 0; i < span.Length; i++) + { + byte[] bytes = BitConverter.GetBytes((dynamic)span[i]); + Array.Reverse(bytes); // Ensure LE order + Buffer.BlockCopy(bytes, 0, result, i * elementSize, elementSize); + } + + return result; + } + public class BinaryVectorNoAttributeHolder { public BinaryVectorInt8 ValuesInt8 { get; set; } - + public BinaryVectorPackedBit ValuesPackedBit { get; set; } public BinaryVectorFloat32 ValuesFloat { get; set; } diff --git a/tests/MongoDB.Bson.Tests/Serialization/Serializers/DateOnlySerializerTests.cs b/tests/MongoDB.Bson.Tests/Serialization/Serializers/DateOnlySerializerTests.cs index e4c4eb50e91..2d838af67ca 100644 --- a/tests/MongoDB.Bson.Tests/Serialization/Serializers/DateOnlySerializerTests.cs +++ b/tests/MongoDB.Bson.Tests/Serialization/Serializers/DateOnlySerializerTests.cs @@ -342,8 +342,8 @@ public void Serializer_should_be_registered() [Theory] [ParameterAttributeData] public void WithRepresentation_should_return_expected_result( - [Values(BsonType.Document, BsonType.DateTime, BsonType.Document, BsonType.String)] BsonType oldRepresentation, - [Values(BsonType.Document, BsonType.DateTime, BsonType.Document, BsonType.String)] BsonType newRepresentation) + [Values(BsonType.Document, BsonType.DateTime, BsonType.String)] BsonType oldRepresentation, + [Values(BsonType.Document, BsonType.DateTime, BsonType.String)] BsonType newRepresentation) { var subject = new DateOnlySerializer(oldRepresentation); @@ -375,4 +375,4 @@ private class TestClass } } #endif -} \ No newline at end of file +} diff --git a/tests/MongoDB.Bson.Tests/Serialization/Serializers/DiscriminatorTests.cs b/tests/MongoDB.Bson.Tests/Serialization/Serializers/DiscriminatorTests.cs index 78fe458a3aa..def1a5e027b 100644 --- a/tests/MongoDB.Bson.Tests/Serialization/Serializers/DiscriminatorTests.cs +++ b/tests/MongoDB.Bson.Tests/Serialization/Serializers/DiscriminatorTests.cs @@ -13,7 +13,9 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; +using FluentAssertions; using MongoDB.Bson.Serialization; using MongoDB.Bson.Serialization.Attributes; using MongoDB.Bson.TestHelpers; @@ -65,6 +67,79 @@ private class H : G { } + // BaseDocument and derived classes are used for tests with generic types + // It's necessary to specify the derived specific types with BsonKnownTypes for the deserialization to work. + [BsonKnownTypes(typeof(DerivedDocument<int>))] + [BsonKnownTypes(typeof(DerivedDocument<List<Dictionary<string, int>>>))] + [BsonKnownTypes(typeof(DerivedDocumentDouble<int, string>))] + abstract class BaseDocument; + + class DerivedDocument<T> : BaseDocument + { + [BsonId] + public int Id { get; set; } + + public T Value { get; set; } + } + + class DerivedDocumentDouble<T1, T2> : BaseDocument + { + [BsonId] + public int Id { get; set; } + + public T1 Value1 { get; set; } + + public T2 Value2 { get; set; } + } + + [Fact] + public void TestDeserializeGenericType() + { + var serialized = """{ "_t" : "DerivedDocument<Int32>", "_id" : 1, "Value" : 42 }"""; + var rehydrated = BsonSerializer.Deserialize<BaseDocument>(serialized); + rehydrated.Should().BeOfType<DerivedDocument<int>>(); + } + + [Fact] + public void TestDeserializeGenericTypeWithNestedType() + { + var serialized = """{ "_t" : "DerivedDocument<List<Dictionary<String, Int32>>>", "_id" : 1, "Value" : [{ "key" : 1 }] }"""; + var rehydrated = BsonSerializer.Deserialize<BaseDocument>(serialized); + rehydrated.Should().BeOfType<DerivedDocument<List<Dictionary<string, int>>>>(); + } + + [Fact] + public void TestDeserializeGenericTypeWithTwoTypes() + { + var serialized = """{ "_t" : "DerivedDocumentDouble<Int32, String>", "_id" : 1, "Value1" : 42, "Value2" : "hello" }"""; + var rehydrated = BsonSerializer.Deserialize<BaseDocument>(serialized); + rehydrated.Should().BeOfType<DerivedDocumentDouble<int,string>>(); + } + + [Fact] + public void TestSerializeGenericType() + { + var document = new DerivedDocument<int> { Id = 1, Value = 42 }; + var serialized = document.ToJson(typeof(BaseDocument)); + serialized.Should().Be("""{ "_t" : "DerivedDocument<Int32>", "_id" : 1, "Value" : 42 }"""); + } + + [Fact] + public void TestSerializeGenericTypeWithNestedType() + { + var document = new DerivedDocument<List<Dictionary<string, int>>> { Id = 1, Value = [new() { { "key", 1 } }] }; + var serialized = document.ToJson(typeof(BaseDocument)); + serialized.Should().Be("""{ "_t" : "DerivedDocument<List<Dictionary<String, Int32>>>", "_id" : 1, "Value" : [{ "key" : 1 }] }"""); + } + + [Fact] + public void TestSerializeGenericTypeWithTwoTypes() + { + var document = new DerivedDocumentDouble<int, string> { Id = 1, Value1 = 42, Value2 = "hello"}; + var serialized = document.ToJson(typeof(BaseDocument)); + serialized.Should().Be("""{ "_t" : "DerivedDocumentDouble<Int32, String>", "_id" : 1, "Value1" : 42, "Value2" : "hello" }"""); + } + [Fact] public void TestSerializeObjectasObject() { diff --git a/tests/MongoDB.Bson.Tests/Serialization/Serializers/ReadOnlyDictionarySerializerTests.cs b/tests/MongoDB.Bson.Tests/Serialization/Serializers/ReadOnlyDictionarySerializerTests.cs index 1a8d840ddde..5c4dadfd29a 100644 --- a/tests/MongoDB.Bson.Tests/Serialization/Serializers/ReadOnlyDictionarySerializerTests.cs +++ b/tests/MongoDB.Bson.Tests/Serialization/Serializers/ReadOnlyDictionarySerializerTests.cs @@ -185,7 +185,7 @@ public void TestNominalTypeReadOnlyDictionarySubclassActualTypeReadOnlyDictionar Assert.True(bson.SequenceEqual(rehydrated.ToBson())); } - // Tests where nominal type is a class that implements IReadOnlyDictionary + // Tests where nominal type is a class that implements IReadOnlyDictionary [Fact] public void TestNominalTypeCustomIReadOnlyDictionaryActualTypeCustomIReadOnlyDictionary() diff --git a/tests/MongoDB.Bson.Tests/Serialization/Serializers/TimeOnlySerializerTests.cs b/tests/MongoDB.Bson.Tests/Serialization/Serializers/TimeOnlySerializerTests.cs index 1b1940eb8cb..ea344347f56 100644 --- a/tests/MongoDB.Bson.Tests/Serialization/Serializers/TimeOnlySerializerTests.cs +++ b/tests/MongoDB.Bson.Tests/Serialization/Serializers/TimeOnlySerializerTests.cs @@ -72,7 +72,7 @@ public void Constructor_with_representation_should_return_expected_result( [Values(BsonType.String, BsonType.Int64, BsonType.Int32, BsonType.Double)] BsonType representation, [Values(TimeOnlyUnits.Ticks, TimeOnlyUnits.Hours, TimeOnlyUnits.Minutes, TimeOnlyUnits.Seconds, - TimeOnlyUnits.Milliseconds, TimeOnlyUnits.Microseconds, TimeOnlyUnits.Ticks, TimeOnlyUnits.Nanoseconds)] + TimeOnlyUnits.Milliseconds, TimeOnlyUnits.Microseconds, TimeOnlyUnits.Nanoseconds)] TimeOnlyUnits units) { var subject = new TimeOnlySerializer(representation, units); @@ -476,4 +476,4 @@ private class TestClass } } #endif -} \ No newline at end of file +} diff --git a/tests/MongoDB.Driver.Encryption.Tests/BasicTests.cs b/tests/MongoDB.Driver.Encryption.Tests/BasicTests.cs index b717995ef77..8dc2663aed8 100644 --- a/tests/MongoDB.Driver.Encryption.Tests/BasicTests.cs +++ b/tests/MongoDB.Driver.Encryption.Tests/BasicTests.cs @@ -677,7 +677,7 @@ private static (CryptContext.StateCode stateProcessed, Binary binaryProduced, Bs private static CryptContext StartExplicitEncryptionContextWithKeyId(CryptClient client, byte[] keyId, string encryptionAlgorithm, byte[] message) { - return client.StartExplicitEncryptionContext(keyId, keyAltName: null, queryType: null, contentionFactor: null, encryptionAlgorithm, message, rangeOptions: null); + return client.StartExplicitEncryptionContext(keyId, keyAltName: null, queryType: null, contentionFactor: null, encryptionAlgorithm, message, rangeOptions: null, textOptions: null); } static IEnumerable<string> FindTestDirectories() diff --git a/tests/MongoDB.Driver.Encryption.Tests/HashCallbackTests.cs b/tests/MongoDB.Driver.Encryption.Tests/HashCallbackTests.cs index 1b011b6f7c1..9c93f14f4c7 100644 --- a/tests/MongoDB.Driver.Encryption.Tests/HashCallbackTests.cs +++ b/tests/MongoDB.Driver.Encryption.Tests/HashCallbackTests.cs @@ -26,7 +26,7 @@ public void HashTest() { var inputHex = "74657374206f66206d6163"; var expectedHex = "9ff3e52fa31c9e0fa0b08e19c40591553ea64b73709633271975bfab2db9d980"; - + var inputBytes = CallbackUtils.GetBytesFromHex(inputHex); var expectedBytes = CallbackUtils.GetBytesFromHex(expectedHex); diff --git a/tests/MongoDB.Driver.Encryption.Tests/MongoDB.Driver.Encryption.Tests.csproj b/tests/MongoDB.Driver.Encryption.Tests/MongoDB.Driver.Encryption.Tests.csproj index 42e6b24a7a1..2634984884a 100644 --- a/tests/MongoDB.Driver.Encryption.Tests/MongoDB.Driver.Encryption.Tests.csproj +++ b/tests/MongoDB.Driver.Encryption.Tests/MongoDB.Driver.Encryption.Tests.csproj @@ -1,11 +1,7 @@ <Project Sdk="Microsoft.NET.Sdk"> - <Import Project="..\BuildProps\Tests.Build.props" /> + <Import Project="..\BuildProps\Tests.Build.props" /> <PropertyGroup> - <TargetFrameworks>net472;netcoreapp3.1</TargetFrameworks> - <TargetFrameworks Condition="'$(OS)' != 'Windows_NT'">netcoreapp3.1;net6.0</TargetFrameworks> - - <Platforms>AnyCPU</Platforms> <IsPackable>false</IsPackable> <SignAssembly>true</SignAssembly> <AssemblyOriginatorKeyFile>..\..\MongoDB.Driver.snk</AssemblyOriginatorKeyFile> @@ -21,8 +17,8 @@ </PropertyGroup> <ItemGroup> - <None Include="$(CMakeCurrentSourceDir)/xunit.runner.json"> - <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory> + <None Update="xunit.runner.json"> + <CopyToOutputDirectory>Always</CopyToOutputDirectory> </None> </ItemGroup> diff --git a/tests/MongoDB.Driver.Encryption.Tests/xunit.runner.json b/tests/MongoDB.Driver.Encryption.Tests/xunit.runner.json index 991c5d665a4..df84b2cfa98 100644 --- a/tests/MongoDB.Driver.Encryption.Tests/xunit.runner.json +++ b/tests/MongoDB.Driver.Encryption.Tests/xunit.runner.json @@ -1,6 +1,6 @@ { - "$schema": "https://xunit.github.io/schema/current/xunit.runner.schema.json", - - "appDomain": "denied", - "shadowCopy": false + "longRunningTestSeconds": 10, + "parallelizeAssembly": false, + "parallelizeTestCollections": false, + "shadowCopy": false } diff --git a/tests/MongoDB.Driver.Examples/Aws/AwsAuthenticationExamples.cs b/tests/MongoDB.Driver.Examples/Aws/AwsAuthenticationExamples.cs index 8537d73e500..bba5b707e9d 100644 --- a/tests/MongoDB.Driver.Examples/Aws/AwsAuthenticationExamples.cs +++ b/tests/MongoDB.Driver.Examples/Aws/AwsAuthenticationExamples.cs @@ -53,6 +53,7 @@ namespace MongoDB.Driver.Examples.Aws /// 4. To work with EC2 container credentials from EC2 instance metadata make sure a test is launched on EC2 env and AWS_CONTAINER_CREDENTIALS_* is not set /// 5. To work with Aws WebIdentityToken make sure that AWS_WEB_IDENTITY_TOKEN_FILE, AWS_ROLE_ARN and AWS_ROLE_SESSION_NAME are configured /// </summary> + [Trait("Category", "Integration")] public class AwsAuthenticationExamples { private static readonly string __connectionStringHosts = "<host_address>"; diff --git a/tests/MongoDB.Driver.Examples/CausalConsistencyExamples.cs b/tests/MongoDB.Driver.Examples/CausalConsistencyExamples.cs index da6a023a678..a2c1e5a069b 100644 --- a/tests/MongoDB.Driver.Examples/CausalConsistencyExamples.cs +++ b/tests/MongoDB.Driver.Examples/CausalConsistencyExamples.cs @@ -22,6 +22,7 @@ namespace MongoDB.Driver.Examples { + [Trait("Category", "Integration")] public class CausalConsistencyExamples { [Fact] diff --git a/tests/MongoDB.Driver.Examples/ChangeStreamExamples.cs b/tests/MongoDB.Driver.Examples/ChangeStreamExamples.cs index d3731364c1f..730dfe80d1f 100644 --- a/tests/MongoDB.Driver.Examples/ChangeStreamExamples.cs +++ b/tests/MongoDB.Driver.Examples/ChangeStreamExamples.cs @@ -25,6 +25,7 @@ namespace MongoDB.Driver.Examples { + [Trait("Category", "Integration")] public class ChangeStreamExamples { [Fact] diff --git a/tests/MongoDB.Driver.Examples/ClientEncryptionExamples.cs b/tests/MongoDB.Driver.Examples/ClientEncryptionExamples.cs index ae7560e42a6..a2c7a54051f 100644 --- a/tests/MongoDB.Driver.Examples/ClientEncryptionExamples.cs +++ b/tests/MongoDB.Driver.Examples/ClientEncryptionExamples.cs @@ -25,6 +25,7 @@ namespace MongoDB.Driver.Examples { + [Trait("Category", "Integration")] public class ClientEncryptionExamples { private const string LocalMasterKey = "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk"; diff --git a/tests/MongoDB.Driver.Examples/ClientSideEncryption2Examples.cs b/tests/MongoDB.Driver.Examples/ClientSideEncryption2Examples.cs index 0782fad3fd1..2524e5cb378 100644 --- a/tests/MongoDB.Driver.Examples/ClientSideEncryption2Examples.cs +++ b/tests/MongoDB.Driver.Examples/ClientSideEncryption2Examples.cs @@ -27,6 +27,7 @@ namespace MongoDB.Driver.Examples { + [Trait("Category", "Integration")] public class ClientSideEncryption2Examples { private const string LocalMasterKey = "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk"; diff --git a/tests/MongoDB.Driver.Examples/DocumentationExamples.cs b/tests/MongoDB.Driver.Examples/DocumentationExamples.cs index 4ce1ca6f8a8..5903574dcd0 100644 --- a/tests/MongoDB.Driver.Examples/DocumentationExamples.cs +++ b/tests/MongoDB.Driver.Examples/DocumentationExamples.cs @@ -23,6 +23,7 @@ namespace MongoDB.Driver.Examples { + [Trait("Category", "Integration")] public class DocumentationExamples { private readonly IMongoClient client; @@ -41,7 +42,7 @@ public DocumentationExamples() [Fact] public void Example_1() { - // db.inventory.insertOne( { item: "canvas", qty: 100, tags: ["cotton"], size: { h: 28, w: 35.5, uom: "cm" } } ) + // db.inventory.insertOne( { item: "canvas", qty: 100, tags: ["cotton"], size: { h: 28, w: 35.5, uom: "cm" } } ) // Start Example 1 var document = new BsonDocument @@ -76,10 +77,10 @@ public void Example_2() [Fact] public void Example_3() { - // db.inventory.insertMany([ + // db.inventory.insertMany([ // { item: "journal", qty: 25, tags: ["blank", "red"], size: { h: 14, w: 21, uom: "cm" } }, // { item: "mat", qty: 85, tags: ["gray"], size: { h: 27.9, w: 35.5, uom: "cm" } }, - // { item: "mousepad", qty: 25, tags: ["gel", "blue"], size: { h: 19, w: 22.85, uom: "cm" } } ]) + // { item: "mousepad", qty: 25, tags: ["gel", "blue"], size: { h: 19, w: 22.85, uom: "cm" } } ]) // Start Example 3 var documents = new BsonDocument[] @@ -125,7 +126,7 @@ public void Example_6() // { item: "notebook", qty: 50, size: { h: 8.5, w: 11, uom: "in" }, status: "A" }, // { item: "paper", qty: 100, size: { h: 8.5, w: 11, uom: "in" }, status: "D" }, // { item: "planner", qty: 75, size: { h: 22.85, w: 30, uom: "cm" }, status: "D" }, - // { item: "postcard", qty: 45, size: { h: 10, w: 15.25, uom: "cm" }, status: "A" } ]) + // { item: "postcard", qty: 45, size: { h: 10, w: 15.25, uom: "cm" }, status: "A" } ]) // Start Example 6 var documents = new BsonDocument[] @@ -265,7 +266,7 @@ public void Example_13() [Fact] public void Example_14() { - // db.inventory.insertMany( [ + // db.inventory.insertMany( [ // { item: "journal", qty: 25, size: { h: 14, w: 21, uom: "cm" }, status: "A" }, // { item: "notebook", qty: 50, size: { h: 8.5, w: 11, uom: "in" }, status: "A" }, // { item: "paper", qty: 100, size: { h: 8.5, w: 11, uom: "in" }, status: "D" }, @@ -392,7 +393,7 @@ public void Example_19() [Fact] public void Example_20() { - // db.inventory.insertMany([ + // db.inventory.insertMany([ // { item: "journal", qty: 25, tags: ["blank", "red"], dim_cm: [ 14, 21 ] }, // { item: "notebook", qty: 50, tags: ["red", "blank"], dim_cm: [ 14, 21 ] }, // { item: "paper", qty: 100, tags: ["red", "blank", "plain"], dim_cm: [ 14, 21 ] }, @@ -559,7 +560,7 @@ public void Example_28() [Fact] public void Example_29() { - // db.inventory.insertMany( [ + // db.inventory.insertMany( [ // { item: "journal", instock: [ { warehouse: "A", qty: 5 }, { warehouse: "C", qty: 15 } ] }, // { item: "notebook", instock: [ { warehouse: "C", qty: 5 } ] }, // { item: "paper", instock: [ { warehouse: "A", qty: 60 }, { warehouse: "B", qty: 15 } ] }, @@ -795,7 +796,7 @@ public void Example_41() [Fact] public void Example_42() { - // db.inventory.insertMany( [ + // db.inventory.insertMany( [ // { item: "journal", status: "A", size: { h: 14, w: 21, uom: "cm" }, instock: [ { warehouse: "A", qty: 5 } ] }, // { item: "notebook", status: "A", size: { h: 8.5, w: 11, uom: "in" }, instock: [ { warehouse: "C", qty: 5 } ] }, // { item: "paper", status: "D", size: { h: 8.5, w: 11, uom: "in" }, instock: [ { warehouse: "A", qty: 60 } ] }, @@ -992,7 +993,7 @@ public void Example_50() [Fact] public void Example_51() { - // db.inventory.insertMany( [ + // db.inventory.insertMany( [ // { item: "canvas", qty: 100, size: { h: 28, w: 35.5, uom: "cm" }, status: "A" }, // { item: "journal", qty: 25, size: { h: 14, w: 21, uom: "cm" }, status: "A" }, // { item: "mat", qty: 85, size: { h: 27.9, w: 35.5, uom: "cm" }, status: "A" }, @@ -1002,7 +1003,7 @@ public void Example_51() // { item: "planner", qty: 75, size: { h: 22.85, w: 30, uom: "cm" }, status: "D" }, // { item: "postcard", qty: 45, size: { h: 10, w: 15.25, uom: "cm" }, status: "A" }, // { item: "sketchbook", qty: 80, size: { h: 14, w: 21, uom: "cm" }, status: "A" }, - // { item: "sketch pad", qty: 95, size: { h: 22.85, w: 30.5, uom: "cm" }, status: "A" } ]); + // { item: "sketch pad", qty: 95, size: { h: 22.85, w: 30.5, uom: "cm" }, status: "A" } ]); // Start Example 51 var documents = new[] @@ -1149,12 +1150,12 @@ public void Example_54() [Fact] public void Example_55() { - // db.inventory.insertMany( [ + // db.inventory.insertMany( [ // { item: "journal", qty: 25, size: { h: 14, w: 21, uom: "cm" }, status: "A" }, // { item: "notebook", qty: 50, size: { h: 8.5, w: 11, uom: "in" }, status: "P" }, // { item: "paper", qty: 100, size: { h: 8.5, w: 11, uom: "in" }, status: "D" }, // { item: "planner", qty: 75, size: { h: 22.85, w: 30, uom: "cm" }, status: "D" }, - // { item: "postcard", qty: 45, size: { h: 10, w: 15.25, uom: "cm" }, status: "A" }, ]); + // { item: "postcard", qty: 45, size: { h: 10, w: 15.25, uom: "cm" }, status: "A" }, ]); // Start Example 55 var documents = new[] @@ -1252,7 +1253,7 @@ public void Aggregation_Example_1() { RequireServer.Check(); - //db.sales.aggregate([ + //db.sales.aggregate([ // { $match : { "items.fruit":"banana" } }, // { $sort : { "date" : 1 } } //]) @@ -1347,7 +1348,7 @@ public void Aggregation_Example_3() // } //}, //{ - // $project: { day: "$_id.day", revenue: 1, items_sold: 1, + // $project: { day: "$_id.day", revenue: 1, items_sold: 1, // discount: { $cond: { if : { $lte: ["$revenue", 250] }, then: 25, else : 0 }} // } //}]) @@ -1421,7 +1422,7 @@ public void Aggregation_Example_4() // $project : { // "_id" : 0, // "name" : 1, - // airlines : { + // airlines : { // $filter : { // input : "$airlines", // as : "airline", diff --git a/tests/MongoDB.Driver.Examples/ExplicitEncryptionExamples.cs b/tests/MongoDB.Driver.Examples/ExplicitEncryptionExamples.cs index ae948248240..3676919afae 100644 --- a/tests/MongoDB.Driver.Examples/ExplicitEncryptionExamples.cs +++ b/tests/MongoDB.Driver.Examples/ExplicitEncryptionExamples.cs @@ -25,6 +25,7 @@ namespace MongoDB.Driver.Examples { + [Trait("Category", "Integration")] public class ExplicitEncryptionExamples { private const string LocalMasterKey = "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk"; diff --git a/tests/MongoDB.Driver.Examples/MongoDB.Driver.Examples.csproj b/tests/MongoDB.Driver.Examples/MongoDB.Driver.Examples.csproj index 3efd3ca9de4..c993cf89187 100644 --- a/tests/MongoDB.Driver.Examples/MongoDB.Driver.Examples.csproj +++ b/tests/MongoDB.Driver.Examples/MongoDB.Driver.Examples.csproj @@ -11,10 +11,6 @@ <Description>MongoDB.Driver examples.</Description> </PropertyGroup> - <ItemGroup> - <PackageReference Include="Microsoft.NETFramework.ReferenceAssemblies" PrivateAssets="All" Version="1.0.0" /> - </ItemGroup> - <ItemGroup> <PackageReference Include="xunit.runner.visualstudio" Version="2.4.0" /> <PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.13.0" /> diff --git a/tests/MongoDB.Driver.Examples/PrimerTestFixture.cs b/tests/MongoDB.Driver.Examples/PrimerTestFixture.cs index e6982eafa17..65288622f30 100644 --- a/tests/MongoDB.Driver.Examples/PrimerTestFixture.cs +++ b/tests/MongoDB.Driver.Examples/PrimerTestFixture.cs @@ -18,9 +18,11 @@ using System.IO; using System.Reflection; using MongoDB.Bson; +using Xunit; namespace MongoDB.Driver.Examples { + [Trait("Category", "Integration")] public abstract class PrimerTestFixture { protected static IMongoClient __client; diff --git a/tests/MongoDB.Driver.Examples/StableApiExamples.cs b/tests/MongoDB.Driver.Examples/StableApiExamples.cs index 56bcb2900e8..8d636aa63e0 100644 --- a/tests/MongoDB.Driver.Examples/StableApiExamples.cs +++ b/tests/MongoDB.Driver.Examples/StableApiExamples.cs @@ -19,6 +19,7 @@ namespace MongoDB.Driver.Examples { + [Trait("Category", "Integration")] public class StableApiExamples { [Fact] diff --git a/tests/MongoDB.Driver.Examples/TransactionExamplesForDocs/WithTransactionExample1.cs b/tests/MongoDB.Driver.Examples/TransactionExamplesForDocs/WithTransactionExample1.cs index 9251a5da4e7..66da76dc1a0 100644 --- a/tests/MongoDB.Driver.Examples/TransactionExamplesForDocs/WithTransactionExample1.cs +++ b/tests/MongoDB.Driver.Examples/TransactionExamplesForDocs/WithTransactionExample1.cs @@ -23,6 +23,7 @@ namespace MongoDB.Driver.Examples.TransactionExamplesForDocs { + [Trait("Category", "Integration")] public class WithTransactionExample1 { [Fact] diff --git a/tests/MongoDB.Driver.TestHelpers/Core/CoreTestConfiguration.cs b/tests/MongoDB.Driver.TestHelpers/Core/CoreTestConfiguration.cs index 4a50ba002d5..8919c1d79c9 100644 --- a/tests/MongoDB.Driver.TestHelpers/Core/CoreTestConfiguration.cs +++ b/tests/MongoDB.Driver.TestHelpers/Core/CoreTestConfiguration.cs @@ -29,6 +29,8 @@ using MongoDB.Driver.Core.Operations; using MongoDB.Driver.Core.Servers; using MongoDB.Driver.Core.WireProtocol.Messages.Encoders; +using MongoDB.Driver.TestHelpers; +using Xunit.Sdk; namespace MongoDB.Driver { @@ -46,13 +48,10 @@ public static class CoreTestConfiguration private static MessageEncoderSettings __messageEncoderSettings = new MessageEncoderSettings(); private static Lazy<int> __numberOfMongoses = new Lazy<int>(GetNumberOfMongoses, isThreadSafe: true); private static Lazy<ServerApi> __serverApi = new Lazy<ServerApi>(GetServerApi, isThreadSafe: true); - private static Lazy<bool> __serverless = new Lazy<bool>(GetServerless, isThreadSafe: true); private static Lazy<SemanticVersion> __serverVersion = new Lazy<SemanticVersion>(GetServerVersion, isThreadSafe: true); private static Lazy<string> __storageEngine = new Lazy<string>(GetStorageEngine, isThreadSafe: true); private static TraceSource __traceSource; - public static TimeSpan DefaultTestTimeout { get; } = TimeSpan.FromMinutes(3); - // static properties internal static IClusterInternal Cluster { @@ -106,11 +105,6 @@ public static ServerApi ServerApi get { return __serverApi.Value; } } - public static bool Serverless - { - get { return __serverless.Value; } - } - public static SemanticVersion ServerVersion => __serverVersion.Value; public static int MaxWireVersion => __maxWireVersion.Value; @@ -132,7 +126,16 @@ public static ClusterBuilder ConfigureCluster(ClusterBuilder builder) { builder = builder .ConfigureWithConnectionString(__connectionString.Value, __serverApi.Value) - .ConfigureCluster(c => c.With(serverSelectionTimeout: __defaultServerSelectionTimeout.Value)); + .ConfigureCluster(c => c.With(serverSelectionTimeout: __defaultServerSelectionTimeout.Value)) + .ConfigureServer(s => + { + if (Debugger.IsAttached) + { + s = s.With(heartbeatTimeout: TimeSpan.FromDays(1), serverMonitoringMode: ServerMonitoringMode.Poll); + } + + return s; + });; if (__connectionString.Value.Tls.HasValue && __connectionString.Value.Tls.Value && @@ -290,13 +293,6 @@ private static ServerApi GetServerApi() return new ServerApi(ServerApiVersion.V1); } - private static bool GetServerless() - { - var serverless = Environment.GetEnvironmentVariable("SERVERLESS"); - - return serverless?.ToLower() == "true"; - } - public static DatabaseNamespace GetDatabaseNamespaceForTestClass(Type testClassType) { var databaseName = TruncateDatabaseNameIfTooLong(__databaseNamespace.Value.DatabaseName + "-" + testClassType.Name); @@ -314,7 +310,7 @@ private static int GetMaxWireVersion() { var command = new BsonDocument("hello", 1); var operation = new ReadCommandOperation<BsonDocument>(DatabaseNamespace.Admin, command, BsonDocumentSerializer.Instance, __messageEncoderSettings); - var response = operation.Execute(binding, CancellationToken.None); + var response = operation.Execute(OperationContext.NoTimeout, binding); return response["maxWireVersion"].AsInt32; } } @@ -326,7 +322,7 @@ private static SemanticVersion GetServerVersion() { var command = new BsonDocument("buildinfo", 1); var operation = new ReadCommandOperation<BsonDocument>(DatabaseNamespace.Admin, command, BsonDocumentSerializer.Instance, __messageEncoderSettings); - var response = operation.Execute(binding, CancellationToken.None); + var response = operation.Execute(OperationContext.NoTimeout, binding); return SemanticVersion.Parse(response["version"].AsString); } } @@ -338,12 +334,25 @@ public static BsonDocument GetServerParameters() { var command = new BsonDocument("getParameter", new BsonString("*")); var operation = new ReadCommandOperation<BsonDocument>(DatabaseNamespace.Admin, command, BsonDocumentSerializer.Instance, __messageEncoderSettings); - var serverParameters = operation.Execute(binding, CancellationToken.None); + var serverParameters = operation.Execute(OperationContext.NoTimeout, binding); return serverParameters; } } + public static bool ShouldSkipMongocryptdTests_SERVER_106469() => + RequirePlatform.GetCurrentOperatingSystem() == SupportedOperatingSystem.Windows && + ServerVersion >= new SemanticVersion(8, 1, 9999); + + public static void SkipMongocryptdTests_SERVER_106469(bool checkForSharedLib = false) + { + if (ShouldSkipMongocryptdTests_SERVER_106469() && + (!checkForSharedLib || GetCryptSharedLibPath() == null)) + { + throw new SkipException("Test skipped because of SERVER-106469."); + } + } + internal static ICoreSessionHandle StartSession() { return StartSession(__cluster.Value); @@ -404,7 +413,7 @@ private static void DropDatabase() using (var session = StartSession()) using (var binding = CreateReadWriteBinding(session)) { - operation.Execute(binding, CancellationToken.None); + operation.Execute(OperationContext.NoTimeout, binding); } } @@ -415,7 +424,7 @@ private static IEnumerable<BsonDocument> FindDocuments(IClusterInternal cluster, { var operation = new FindOperation<BsonDocument>(collectionNamespace, BsonDocumentSerializer.Instance, __messageEncoderSettings); - return operation.Execute(binding, CancellationToken.None).ToList(); + return operation.Execute(OperationContext.NoTimeout, binding).ToList(); } } @@ -453,8 +462,7 @@ private static string GetStorageEngine() switch (clusterType) { case ClusterType.LoadBalanced: - case var _ when Serverless: - // Load balancing and serverless are only supported for servers higher than 50 + // Load balancing only supported for servers higher than 50 result = "wiredTiger"; break; case ClusterType.Sharded: @@ -485,7 +493,7 @@ private static string GetStorageEngine() break; } - return result ?? "mmapv1"; + return result ?? "wiredTiger"; string GetStorageEngineForCluster(IClusterInternal cluster) { @@ -495,7 +503,7 @@ string GetStorageEngineForCluster(IClusterInternal cluster) { var operation = new ReadCommandOperation<BsonDocument>(DatabaseNamespace.Admin, command, BsonDocumentSerializer.Instance, __messageEncoderSettings); - var response = operation.Execute(binding, CancellationToken.None); + var response = operation.Execute(OperationContext.NoTimeout, binding); if (response.TryGetValue("storageEngine", out var storageEngine) && storageEngine.AsBsonDocument.TryGetValue("name", out var name)) { return name.AsString; diff --git a/tests/MongoDB.Driver.TestHelpers/Core/FailPoint.cs b/tests/MongoDB.Driver.TestHelpers/Core/FailPoint.cs index c9f6c9f9a4e..0122eb8447b 100644 --- a/tests/MongoDB.Driver.TestHelpers/Core/FailPoint.cs +++ b/tests/MongoDB.Driver.TestHelpers/Core/FailPoint.cs @@ -88,7 +88,7 @@ public static FailPoint ConfigureAlwaysOn(IClusterInternal cluster, ICoreSession private static IServer GetWriteableServer(IClusterInternal cluster) { var selector = WritableServerSelector.Instance; - return cluster.SelectServer(selector, CancellationToken.None); + return cluster.SelectServer(OperationContext.NoTimeout, selector); } private static void MakeFailPointApplicationNameTestableIfConfigured(BsonDocument command, bool async) @@ -186,7 +186,7 @@ private void ExecuteCommand(BsonDocument command, bool waitForConnected) BsonDocumentSerializer.Instance, new MessageEncoderSettings()); - operation.Execute(_binding, CancellationToken.None); + operation.Execute(OperationContext.NoTimeout, _binding); } } } diff --git a/tests/MongoDB.Driver.TestHelpers/Core/MockClusterableServerFactory.cs b/tests/MongoDB.Driver.TestHelpers/Core/MockClusterableServerFactory.cs index d865da3f833..9bc9a17f6a9 100644 --- a/tests/MongoDB.Driver.TestHelpers/Core/MockClusterableServerFactory.cs +++ b/tests/MongoDB.Driver.TestHelpers/Core/MockClusterableServerFactory.cs @@ -93,11 +93,11 @@ public IClusterableServer CreateServer(ClusterType clusterType, ClusterId cluste mockConnectionPool.Setup(p => p.Generation).Returns(valueFunction: () => poolGeneration); Action acquireConnectionCallback = () => { connectionGeneration = poolGeneration; }; mockConnectionPool - .Setup(p => p.AcquireConnection(It.IsAny<CancellationToken>())) + .Setup(p => p.AcquireConnection(It.IsAny<OperationContext>())) .Callback(acquireConnectionCallback) .Returns(mockConnection.Object); mockConnectionPool - .Setup(p => p.AcquireConnectionAsync(It.IsAny<CancellationToken>())) + .Setup(p => p.AcquireConnectionAsync(It.IsAny<OperationContext>())) .Callback(acquireConnectionCallback) .ReturnsAsync(mockConnection.Object); mockConnectionPool.Setup(p => p.Clear(It.IsAny<bool>())).Callback(() => { ++poolGeneration; }); @@ -186,7 +186,7 @@ public void PublishDescription(ServerDescription description) var maxWireVersion = description.MaxWireVersion; var server = (Server)result.Server; var helloResult = new HelloResult(new BsonDocument { { "compressors", new BsonArray() }, { "maxWireVersion", maxWireVersion } }); - var mockConnection = Mock.Get(server._connectionPool().AcquireConnection(CancellationToken.None)); + var mockConnection = Mock.Get(server._connectionPool().AcquireConnection(OperationContext.NoTimeout)); mockConnection.SetupGet(c => c.Description) .Returns(new ConnectionDescription(new ConnectionId(description.ServerId, 0), helloResult)); } diff --git a/tests/MongoDB.Driver.TestHelpers/Core/MockConnection.cs b/tests/MongoDB.Driver.TestHelpers/Core/MockConnection.cs index 2e35c86d5ff..b2830dc1f1f 100644 --- a/tests/MongoDB.Driver.TestHelpers/Core/MockConnection.cs +++ b/tests/MongoDB.Driver.TestHelpers/Core/MockConnection.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -40,7 +40,6 @@ internal sealed class MockConnection : IConnectionHandle private DateTime _openedAtUtc; private readonly Queue<ActionQueueItem> _replyActions; private readonly List<RequestMessage> _sentMessages; - private bool? _wasReadTimeoutChanged; private readonly Action<ConnectionOpeningEvent> _openingEventHandler; private readonly Action<ConnectionOpenedEvent> _openedEventHandler; @@ -139,8 +138,6 @@ public bool IsExpired public ConnectionSettings Settings => _connectionSettings; - public bool? WasReadTimeoutChanged => _wasReadTimeoutChanged; - // methods public void Dispose() { @@ -184,7 +181,7 @@ public List<RequestMessage> GetSentMessages() return _sentMessages; } - public void Open(CancellationToken cancellationToken) + public void Open(OperationContext operationContext) { _openingEventHandler?.Invoke(new ConnectionOpeningEvent(_connectionId, _connectionSettings, null)); @@ -196,7 +193,7 @@ public void Open(CancellationToken cancellationToken) _openedEventHandler?.Invoke(new ConnectionOpenedEvent(_connectionId, _connectionSettings, TimeSpan.FromTicks(1), null)); } - public Task OpenAsync(CancellationToken cancellationToken) + public Task OpenAsync(OperationContext operationContext) { _openingEventHandler?.Invoke(new ConnectionOpeningEvent(_connectionId, _connectionSettings, null)); @@ -210,42 +207,33 @@ public Task OpenAsync(CancellationToken cancellationToken) return Task.CompletedTask; } - public void Reauthenticate(CancellationToken cancellationToken) - { - _replyActions.Dequeue().GetEffectiveMessage(); - } + public void Reauthenticate(OperationContext operationContext) + => _replyActions.Dequeue().GetEffectiveMessage(); - public async Task ReauthenticateAsync(CancellationToken cancellationToken) - { - await _replyActions.Dequeue().GetEffectiveMessageAsync().ConfigureAwait(false); - } + public Task ReauthenticateAsync(OperationContext operationContext) + => _replyActions.Dequeue().GetEffectiveMessageAsync(); - public ResponseMessage ReceiveMessage(int responseTo, IMessageEncoderSelector encoderSelector, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken) + public ResponseMessage ReceiveMessage(OperationContext operationContext, int responseTo, IMessageEncoderSelector encoderSelector, MessageEncoderSettings messageEncoderSettings) { var action = _replyActions.Dequeue(); return (ResponseMessage)action.GetEffectiveMessage(); } - public async Task<ResponseMessage> ReceiveMessageAsync(int responseTo, IMessageEncoderSelector encoderSelector, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken) + public async Task<ResponseMessage> ReceiveMessageAsync(OperationContext operationContext, int responseTo, IMessageEncoderSelector encoderSelector, MessageEncoderSettings messageEncoderSettings) { var action = _replyActions.Dequeue(); return (ResponseMessage)await action.GetEffectiveMessageAsync().ConfigureAwait(false); } - public void SendMessages(IEnumerable<RequestMessage> messages, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken) + public void SendMessage(OperationContext operationContext, RequestMessage message, MessageEncoderSettings messageEncoderSettings) { - _sentMessages.AddRange(messages); + _sentMessages.Add(message); } - public Task SendMessagesAsync(IEnumerable<RequestMessage> messages, MessageEncoderSettings messageEncoderSettings, CancellationToken cancellationToken) + public Task SendMessageAsync(OperationContext operationContext, RequestMessage message, MessageEncoderSettings messageEncoderSettings) { - _sentMessages.AddRange(messages); - return Task.FromResult<object>(null); - } - - public void SetReadTimeout(TimeSpan timeout) - { - _wasReadTimeoutChanged = true; + _sentMessages.Add(message); + return Task.CompletedTask; } // nested type diff --git a/tests/MongoDB.Driver.TestHelpers/Core/XunitExtensions/RequireServer.cs b/tests/MongoDB.Driver.TestHelpers/Core/XunitExtensions/RequireServer.cs index 9ea6c5de4e1..1efd22a60ac 100644 --- a/tests/MongoDB.Driver.TestHelpers/Core/XunitExtensions/RequireServer.cs +++ b/tests/MongoDB.Driver.TestHelpers/Core/XunitExtensions/RequireServer.cs @@ -18,7 +18,7 @@ using MongoDB.Bson; using MongoDB.Driver.Core.Clusters; using MongoDB.Driver.Core.Misc; -using Xunit; +using MongoDB.Driver.Encryption; using Xunit.Sdk; namespace MongoDB.Driver.Core.TestHelpers.XunitExtensions @@ -129,18 +129,6 @@ public RequireServer RunOn(BsonArray requirements) throw new SkipException($"Test skipped because cluster does not meet runOn requirements: {requirements}."); } - public RequireServer Serverless(bool require = true) - { - var isServerless = CoreTestConfiguration.Serverless; - - if (isServerless == require) - { - return this; - } - - throw new SkipException("Test skipped because serverless is " + (require ? "required" : "not required") + "."); - } - public RequireServer StableServer(bool stable = true) { var serverVersion = CoreTestConfiguration.ServerVersion; @@ -210,7 +198,7 @@ public RequireServer StorageEngines(params string[] storageEngines) public RequireServer Tls(bool required = true) { - var usingTls = CoreTestConfiguration.ConnectionString.Tls; + var usingTls = CoreTestConfiguration.ConnectionString.Tls ?? false; if (usingTls == required) { return this; @@ -315,18 +303,13 @@ private bool IsRequirementSatisfied(BsonElement requirement) } case "authMechanism": var actualValue = CoreTestConfiguration.GetServerParameters().GetValue("authenticationMechanisms").AsBsonArray; - var requiredValue = requirement.Value.AsString; - return actualValue.Contains(requiredValue); + return actualValue.Contains(requirement.Value.AsString); case "serverless": var serverlessValue = requirement.Value.AsString; switch (serverlessValue) { - case "allow": - return true; case "forbid": - return CoreTestConfiguration.Serverless == false; - case "require": - return CoreTestConfiguration.Serverless == true; + return true; default: throw new FormatException($"Invalid runOn requirement serverless field value: '{requirement.Value}'."); } @@ -345,12 +328,33 @@ private bool IsRequirementSatisfied(BsonElement requirement) var actualClusterType = CoreTestConfiguration.Cluster.Description.Type; var runOnClusterTypes = requirement.Value.AsBsonArray.Select(topology => MapTopologyToClusterType(topology.AsString)).ToList(); return runOnClusterTypes.Contains(actualClusterType); - case "csfle": return Feature.ClientSideEncryption.IsSupported(CoreTestConfiguration.MaxWireVersion); + case "csfle": + return IsCsfleRequirementSatisfied(requirement); default: throw new FormatException($"Unrecognized requirement field: '{requirement.Name}'."); } } + private bool IsCsfleRequirementSatisfied(BsonElement requirement) + { + var isCsfleSupported = Feature.ClientSideEncryption.IsSupported(CoreTestConfiguration.MaxWireVersion); + if (!isCsfleSupported) + { + return false; + } + + var requiredValue = requirement.Value; + if (!requiredValue.IsBsonDocument) + { + return true; + } + + // Check if minimum libmongocrypt version requirement is met + var minLibmongocryptVersion = SemanticVersion.Parse(requiredValue["minLibmongocryptVersion"].AsString); + var actualLibmongocryptVersion = SemanticVersion.Parse(Library.Version); + return SemanticVersionCompareToAsReleased(actualLibmongocryptVersion, minLibmongocryptVersion) >= 0; + } + private ClusterType MapTopologyToClusterType(string topology) { switch (topology) diff --git a/tests/MongoDB.Driver.TestHelpers/DriverTestConfiguration.cs b/tests/MongoDB.Driver.TestHelpers/DriverTestConfiguration.cs index a0d0a2a8a4c..2a710c6ee8b 100644 --- a/tests/MongoDB.Driver.TestHelpers/DriverTestConfiguration.cs +++ b/tests/MongoDB.Driver.TestHelpers/DriverTestConfiguration.cs @@ -24,7 +24,6 @@ using MongoDB.Driver.Core.Clusters; using MongoDB.Driver.Core.Configuration; using MongoDB.Driver.Core.Connections; -using MongoDB.Driver.Core.Logging; using MongoDB.Driver.Core.Servers; using MongoDB.Driver.Encryption; @@ -184,6 +183,13 @@ public static MongoClientSettings GetClientSettings() { serverSelectionTimeoutString = "30000"; } + + if (System.Diagnostics.Debugger.IsAttached) + { + clientSettings.HeartbeatTimeout = TimeSpan.FromDays(1); + clientSettings.ServerMonitoringMode = ServerMonitoringMode.Poll; + } + clientSettings.ServerSelectionTimeout = TimeSpan.FromMilliseconds(int.Parse(serverSelectionTimeoutString)); clientSettings.ClusterConfigurator = cb => CoreTestConfiguration.ConfigureLogging(cb); clientSettings.ServerApi = CoreTestConfiguration.ServerApi; @@ -197,8 +203,8 @@ public static ConnectionDescription GetConnectionDescription() { var cluster = Client.GetClusterInternal(); using (var binding = new ReadWriteBindingHandle(new WritableServerBinding(cluster, NoCoreSession.NewHandle()))) - using (var channelSource = binding.GetWriteChannelSource(default)) - using (var channel = channelSource.GetChannel(default)) + using (var channelSource = binding.GetWriteChannelSource(OperationContext.NoTimeout)) + using (var channel = channelSource.GetChannel(OperationContext.NoTimeout)) { return channel.ConnectionDescription; } diff --git a/tests/MongoDB.Driver.TestHelpers/IntegrationTest.cs b/tests/MongoDB.Driver.TestHelpers/IntegrationTest.cs index c0424339d5f..a1408959b6c 100644 --- a/tests/MongoDB.Driver.TestHelpers/IntegrationTest.cs +++ b/tests/MongoDB.Driver.TestHelpers/IntegrationTest.cs @@ -15,12 +15,11 @@ using System; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; -using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests { - [IntegrationTest] + [Trait("Category", "Integration")] public abstract class IntegrationTest<TFixture> : IClassFixture<TFixture> where TFixture : MongoDatabaseFixture { diff --git a/tests/MongoDB.Driver.TestHelpers/MongoDB.Driver.TestHelpers.csproj b/tests/MongoDB.Driver.TestHelpers/MongoDB.Driver.TestHelpers.csproj index 491068dd8bd..e1faee5876f 100644 --- a/tests/MongoDB.Driver.TestHelpers/MongoDB.Driver.TestHelpers.csproj +++ b/tests/MongoDB.Driver.TestHelpers/MongoDB.Driver.TestHelpers.csproj @@ -2,6 +2,7 @@ <Import Project="..\BuildProps\Tests.Build.props" /> <PropertyGroup> + <IsTestProject>false</IsTestProject> <CodeAnalysisRuleSet>..\..\MongoDBTest.ruleset</CodeAnalysisRuleSet> </PropertyGroup> @@ -13,7 +14,6 @@ <ItemGroup> <PackageReference Include="Microsoft.Diagnostics.Runtime" Version="2.0.226801" /> - <PackageReference Include="Microsoft.NETFramework.ReferenceAssemblies" PrivateAssets="All" Version="1.0.0" /> </ItemGroup> <ItemGroup> diff --git a/tests/MongoDB.Driver.Tests/AggregateFluentFacetTests.cs b/tests/MongoDB.Driver.Tests/AggregateFluentFacetTests.cs index c248cdc75cc..71c13c2577b 100644 --- a/tests/MongoDB.Driver.Tests/AggregateFluentFacetTests.cs +++ b/tests/MongoDB.Driver.Tests/AggregateFluentFacetTests.cs @@ -23,11 +23,11 @@ using MongoDB.Bson.Serialization.Serializers; using MongoDB.Bson.TestHelpers; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; -using MongoDB.Driver.Linq; using Xunit; namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class AggregateFluentFacetTests { #region static diff --git a/tests/MongoDB.Driver.Tests/AggregateFluentGraphLookupWithAirportsCollectionTests.cs b/tests/MongoDB.Driver.Tests/AggregateFluentGraphLookupWithAirportsCollectionTests.cs index 29142383c79..3bccb211750 100644 --- a/tests/MongoDB.Driver.Tests/AggregateFluentGraphLookupWithAirportsCollectionTests.cs +++ b/tests/MongoDB.Driver.Tests/AggregateFluentGraphLookupWithAirportsCollectionTests.cs @@ -26,6 +26,7 @@ namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class AggregateFluentGraphLookupWithAirportCollectionTests { #region static diff --git a/tests/MongoDB.Driver.Tests/AggregateFluentGraphLookupWithEmployeeCollectionTests.cs b/tests/MongoDB.Driver.Tests/AggregateFluentGraphLookupWithEmployeeCollectionTests.cs index f2df2ed9e72..291b675ccc2 100644 --- a/tests/MongoDB.Driver.Tests/AggregateFluentGraphLookupWithEmployeeCollectionTests.cs +++ b/tests/MongoDB.Driver.Tests/AggregateFluentGraphLookupWithEmployeeCollectionTests.cs @@ -27,6 +27,7 @@ namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class AggregateFluentGraphLookupWithEmployeeCollectionTests { #region static diff --git a/tests/MongoDB.Driver.Tests/AggregateFluentTests.cs b/tests/MongoDB.Driver.Tests/AggregateFluentTests.cs index ea4c6c9a3b2..405ec767829 100644 --- a/tests/MongoDB.Driver.Tests/AggregateFluentTests.cs +++ b/tests/MongoDB.Driver.Tests/AggregateFluentTests.cs @@ -31,6 +31,7 @@ namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class AggregateFluentTests { [Theory] diff --git a/tests/MongoDB.Driver.Tests/AggregateGraphLookupEnumerableFromOrToTests.cs b/tests/MongoDB.Driver.Tests/AggregateGraphLookupEnumerableFromOrToTests.cs index fee02d81a62..87c3af7d176 100644 --- a/tests/MongoDB.Driver.Tests/AggregateGraphLookupEnumerableFromOrToTests.cs +++ b/tests/MongoDB.Driver.Tests/AggregateGraphLookupEnumerableFromOrToTests.cs @@ -16,12 +16,12 @@ using System.Collections.Generic; using FluentAssertions; using MongoDB.Bson; -using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class AggregateGraphLookupEnumerableFromOrToTests { // public methods diff --git a/tests/MongoDB.Driver.Tests/AsyncCursorTests.cs b/tests/MongoDB.Driver.Tests/AsyncCursorTests.cs index e9864e8d1c8..72fe1a39f88 100644 --- a/tests/MongoDB.Driver.Tests/AsyncCursorTests.cs +++ b/tests/MongoDB.Driver.Tests/AsyncCursorTests.cs @@ -30,6 +30,7 @@ namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class AsyncCursorTests { //public methods diff --git a/tests/MongoDB.Driver.Tests/Authentication/AuthenticationHelperTests.cs b/tests/MongoDB.Driver.Tests/Authentication/AuthenticationHelperTests.cs index 7c8d275e041..90938099faf 100644 --- a/tests/MongoDB.Driver.Tests/Authentication/AuthenticationHelperTests.cs +++ b/tests/MongoDB.Driver.Tests/Authentication/AuthenticationHelperTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,10 +13,9 @@ * limitations under the License. */ -using System.Linq; using System.Net; using System.Security; -using System.Threading; +using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; using MongoDB.Driver.Authentication; @@ -51,7 +50,7 @@ public void MongoPasswordDigest_should_create_the_correct_hash(string username, [Theory] [ParameterAttributeData] - public void Authenticate_should_invoke_authenticators_when_they_exist( + public async Task Authenticate_should_invoke_authenticators_when_they_exist( [Values(false, true)] bool async) { @@ -69,21 +68,21 @@ public void Authenticate_should_invoke_authenticators_when_they_exist( if (async) { - AuthenticationHelper.AuthenticateAsync(mockConnection.Object, description, authenticator, CancellationToken.None).GetAwaiter().GetResult(); + await AuthenticationHelper.AuthenticateAsync(OperationContext.NoTimeout, mockConnection.Object, description, authenticator); - mockAuthenticator.Verify(a => a.AuthenticateAsync(mockConnection.Object, description, CancellationToken.None), Times.Once); + mockAuthenticator.Verify(a => a.AuthenticateAsync(It.IsAny<OperationContext>(), mockConnection.Object, description), Times.Once); } else { - AuthenticationHelper.Authenticate(mockConnection.Object, description, authenticator, CancellationToken.None); + AuthenticationHelper.Authenticate(OperationContext.NoTimeout, mockConnection.Object, description, authenticator); - mockAuthenticator.Verify(a => a.Authenticate(mockConnection.Object, description, CancellationToken.None), Times.Once); + mockAuthenticator.Verify(a => a.Authenticate(It.IsAny<OperationContext>(), mockConnection.Object, description), Times.Once); } } [Theory] [ParameterAttributeData] - public void Authenticate_should_not_invoke_authenticator_when_connected_to_an_arbiter( + public async Task Authenticate_should_not_invoke_authenticator_when_connected_to_an_arbiter( [Values(false, true)] bool async) { @@ -101,15 +100,15 @@ public void Authenticate_should_not_invoke_authenticator_when_connected_to_an_ar if (async) { - AuthenticationHelper.AuthenticateAsync(mockConnection.Object, description, authenticator, CancellationToken.None).GetAwaiter().GetResult(); + await AuthenticationHelper.AuthenticateAsync(OperationContext.NoTimeout, mockConnection.Object, description, authenticator); - mockAuthenticator.Verify(a => a.AuthenticateAsync(It.IsAny<IConnection>(), It.IsAny<ConnectionDescription>(), It.IsAny<CancellationToken>()), Times.Never); + mockAuthenticator.Verify(a => a.AuthenticateAsync(It.IsAny<OperationContext>(), It.IsAny<IConnection>(), It.IsAny<ConnectionDescription>()), Times.Never); } else { - AuthenticationHelper.Authenticate(mockConnection.Object, description, authenticator, CancellationToken.None); + AuthenticationHelper.Authenticate(OperationContext.NoTimeout, mockConnection.Object, description, authenticator); - mockAuthenticator.Verify(a => a.Authenticate(It.IsAny<IConnection>(), It.IsAny<ConnectionDescription>(), It.IsAny<CancellationToken>()), Times.Never); + mockAuthenticator.Verify(a => a.Authenticate(It.IsAny<OperationContext>(), It.IsAny<IConnection>(), It.IsAny<ConnectionDescription>()), Times.Never); } } } diff --git a/tests/MongoDB.Driver.Tests/Authentication/MongoAWSAuthenticatorTests.cs b/tests/MongoDB.Driver.Tests/Authentication/MongoAWSAuthenticatorTests.cs index ed668f0687b..b0a84d4d31e 100644 --- a/tests/MongoDB.Driver.Tests/Authentication/MongoAWSAuthenticatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Authentication/MongoAWSAuthenticatorTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2020–present MongoDB Inc. +/* Copyright 2010–present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ using System.Collections.Generic; using System.Net; using System.Threading; +using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; using MongoDB.Driver.Authentication; @@ -61,7 +62,7 @@ static MongoAWSAuthenticatorTests() [Theory] [ParameterAttributeData] - public void Authenticate_should_have_expected_result( + public async Task Authenticate_should_have_expected_result( [Values(false, true)] bool async) { var dateTime = DateTime.UtcNow; @@ -115,11 +116,11 @@ public void Authenticate_should_have_expected_result( if (async) { - subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); + await subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } else { - subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); + subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } SpinWait.SpinUntil(() => connection.GetSentMessages().Count >= 2, TimeSpan.FromSeconds(5)).Should().BeTrue(); @@ -139,7 +140,7 @@ public void Authenticate_should_have_expected_result( [Theory] [ParameterAttributeData] - public void Authenticate_should_send_serverApi_with_command_wire_protocol( + public async Task Authenticate_should_send_serverApi_with_command_wire_protocol( [Values(false, true)] bool useServerApi, [Values(false, true)] bool async) { @@ -195,11 +196,11 @@ public void Authenticate_should_send_serverApi_with_command_wire_protocol( if (async) { - subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); + await subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } else { - subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); + subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } SpinWait.SpinUntil(() => connection.GetSentMessages().Count >= 2, TimeSpan.FromSeconds(5)).Should().BeTrue(); @@ -217,7 +218,7 @@ public void Authenticate_should_send_serverApi_with_command_wire_protocol( [Theory] [ParameterAttributeData] - public void Authenticate_with_loadBalancedConnection_should_use_command_wire_protocol( + public async Task Authenticate_with_loadBalancedConnection_should_use_command_wire_protocol( [Values(false, true)] bool async) { var dateTime = DateTime.UtcNow; @@ -271,11 +272,11 @@ public void Authenticate_with_loadBalancedConnection_should_use_command_wire_pro if (async) { - subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); + await subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } else { - subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); + subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } SpinWait.SpinUntil(() => connection.GetSentMessages().Count >= 2, TimeSpan.FromSeconds(5)).Should().BeTrue(); @@ -293,7 +294,7 @@ public void Authenticate_with_loadBalancedConnection_should_use_command_wire_pro [Theory] [ParameterAttributeData] - public void Authenticate_should_throw_an_AuthenticationException_when_authentication_fails( + public async Task Authenticate_should_throw_an_AuthenticationException_when_authentication_fails( [Values(false, true)] bool async) { var subject = CreateAwsSaslAuthenticator(null, RandomByteGenerator.Instance, SystemClock.Instance, null); @@ -303,22 +304,16 @@ public void Authenticate_should_throw_an_AuthenticationException_when_authentica connection.EnqueueCommandResponseMessage(commandResponse); connection.Description = __descriptionCommandWireProtocol; - Exception exception; - if (async) - { - exception = Record.Exception(() => subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult()); - } - else - { - exception = Record.Exception(() => subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None)); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)) : + Record.Exception(() => subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)) ; exception.Should().BeOfType<MongoAuthenticationException>(); } [Theory] [ParameterAttributeData] - public void Authenticate_should_throw_when_server_provides_invalid_host( + public async Task Authenticate_should_throw_when_server_provides_invalid_host( [Values("", "abc..def")] string host, [Values(false, true)] bool async) { @@ -346,15 +341,9 @@ public void Authenticate_should_throw_when_server_provides_invalid_host( connection.EnqueueCommandResponseMessage(saslContinueCommandResponse); connection.Description = __descriptionCommandWireProtocol; - Exception exception; - if (async) - { - exception = Record.Exception(() => subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult()); - } - else - { - exception = Record.Exception(() => subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None)); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)) : + Record.Exception(() => subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)) ; exception.Should().BeOfType<MongoAuthenticationException>(); exception.Message.Should().Be("Server returned an invalid sts host."); @@ -362,7 +351,7 @@ public void Authenticate_should_throw_when_server_provides_invalid_host( [Theory] [ParameterAttributeData] - public void Authenticate_should_throw_when_server_provides_invalid_nonce( + public async Task Authenticate_should_throw_when_server_provides_invalid_nonce( [Values(false, true)] bool async) { var clientNonce = RandomByteGenerator.Instance.Generate(ClientNonceLength); @@ -387,15 +376,9 @@ public void Authenticate_should_throw_when_server_provides_invalid_nonce( connection.EnqueueCommandResponseMessage(saslStartCommandResponse); connection.Description = __descriptionCommandWireProtocol; - Exception exception; - if (async) - { - exception = Record.Exception(() => subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult()); - } - else - { - exception = Record.Exception(() => subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None)); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)) : + Record.Exception(() => subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)) ; exception.Should().BeOfType<MongoAuthenticationException>(); exception.Message.Should().Be("Server sent an invalid nonce."); @@ -403,7 +386,7 @@ public void Authenticate_should_throw_when_server_provides_invalid_nonce( [Theory] [ParameterAttributeData] - public void Authenticate_should_throw_when_server_provides_unexpected_field( + public async Task Authenticate_should_throw_when_server_provides_unexpected_field( [Values(false, true)] bool async) { var clientNonce = RandomByteGenerator.Instance.Generate(ClientNonceLength); @@ -432,15 +415,9 @@ public void Authenticate_should_throw_when_server_provides_unexpected_field( connection.EnqueueCommandResponseMessage(saslContinueCommandResponse); connection.Description = __descriptionCommandWireProtocol; - Exception exception; - if (async) - { - exception = Record.Exception(() => subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult()); - } - else - { - exception = Record.Exception(() => subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None)); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)) : + Record.Exception(() => subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)) ; exception.Should().BeOfType<MongoAuthenticationException>(); exception.Message.Should().Be("Server returned unexpected fields: u."); @@ -448,7 +425,7 @@ public void Authenticate_should_throw_when_server_provides_unexpected_field( [Theory] [ParameterAttributeData] - public void Authenticate_with_session_token_should_have_expected_result( + public async Task Authenticate_with_session_token_should_have_expected_result( [Values(false, true)] bool async) { var dateTime = DateTime.UtcNow; @@ -505,11 +482,11 @@ public void Authenticate_with_session_token_should_have_expected_result( if (async) { - subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); + await subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } else { - subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); + subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } SpinWait.SpinUntil(() => connection.GetSentMessages().Count >= 2, TimeSpan.FromSeconds(5)).Should().BeTrue(); diff --git a/tests/MongoDB.Driver.Tests/Authentication/MongoDBX509AuthenticatorTests.cs b/tests/MongoDB.Driver.Tests/Authentication/MongoDBX509AuthenticatorTests.cs index 451f8cddadd..4a093dc1492 100644 --- a/tests/MongoDB.Driver.Tests/Authentication/MongoDBX509AuthenticatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Authentication/MongoDBX509AuthenticatorTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ using System; using System.Net; using System.Threading; +using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; using MongoDB.Driver.Authentication; @@ -43,18 +44,18 @@ public class MongoDBX509AuthenticatorTests .Add(OppressiveLanguageConstants.LegacyHelloResponseIsWritablePrimaryFieldName, 1) .Add("maxWireVersion", WireVersion.Server47))); - [Theory] - [InlineData("")] - public void Constructor_should_throw_an_ArgumentException_when_username_is_empty(string username) + [Fact] + public void Constructor_should_throw_an_ArgumentException_when_username_is_empty() { - Action act = () => new MongoDBX509Authenticator(username, serverApi: null); + var exception = Record.Exception(() => new MongoDBX509Authenticator("", serverApi: null)); - act.ShouldThrow<ArgumentException>(); + exception.Should().BeOfType<ArgumentException>().Subject + .ParamName.Should().Be("username"); } [Theory] [ParameterAttributeData] - public void Authenticate_should_send_serverApi_with_command_wire_protocol( + public async Task Authenticate_should_send_serverApi_with_command_wire_protocol( [Values(false, true)] bool useServerApi, [Values(false, true)] bool async) { @@ -71,11 +72,11 @@ public void Authenticate_should_send_serverApi_with_command_wire_protocol( if (async) { - subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); + await subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } else { - subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); + subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } SpinWait.SpinUntil(() => connection.GetSentMessages().Count >= 1, TimeSpan.FromSeconds(5)).Should().BeTrue(); @@ -92,7 +93,7 @@ public void Authenticate_should_send_serverApi_with_command_wire_protocol( [Theory] [ParameterAttributeData] - public void Authenticate_with_loadBalancedConnection_should_use_command_wire_protocol( + public async Task Authenticate_with_loadBalancedConnection_should_use_command_wire_protocol( [Values(false, true)] bool async) { var subject = new MongoDBX509Authenticator("CN=client,OU=kerneluser,O=10Gen,L=New York City,ST=New York,C=US", null); @@ -106,11 +107,11 @@ public void Authenticate_with_loadBalancedConnection_should_use_command_wire_pro if (async) { - subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); + await subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } else { - subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); + subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } SpinWait.SpinUntil(() => connection.GetSentMessages().Count >= 1, TimeSpan.FromSeconds(5)).Should().BeTrue(); @@ -128,7 +129,7 @@ public void Authenticate_with_loadBalancedConnection_should_use_command_wire_pro [Theory] [ParameterAttributeData] - public void Authenticate_should_throw_an_AuthenticationException_when_authentication_fails( + public async Task Authenticate_should_throw_an_AuthenticationException_when_authentication_fails( [Values(false, true)] bool async) { @@ -139,22 +140,16 @@ public void Authenticate_should_throw_an_AuthenticationException_when_authentica connection.Description = CreateConnectionDescription(maxWireVersion: WireVersion.Server36); connection.EnqueueCommandResponseMessage(response); - Action act; - if (async) - { - act = () => subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)) : + Record.Exception(() => subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)); - act.ShouldThrow<MongoAuthenticationException>(); + exception.Should().BeOfType<MongoAuthenticationException>(); } [Theory] [ParameterAttributeData] - public void Authenticate_should_not_throw_when_authentication_succeeds( + public async Task Authenticate_should_not_throw_when_authentication_succeeds( [Values(false, true)] bool async) { @@ -166,22 +161,16 @@ public void Authenticate_should_not_throw_when_authentication_succeeds( connection.Description = CreateConnectionDescription(maxWireVersion: WireVersion.Server36); connection.EnqueueCommandResponseMessage(response); - Action act; - if (async) - { - act = () => subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)) : + Record.Exception(() => subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)); - act.ShouldNotThrow(); + exception.Should().BeNull(); } [Theory] [ParameterAttributeData] - public void Authenticate_should_not_throw_when_username_is_null( + public async Task Authenticate_should_not_throw_when_username_is_null( [Values(false, true)] bool async) { @@ -195,15 +184,9 @@ public void Authenticate_should_not_throw_when_username_is_null( var description = CreateConnectionDescription(maxWireVersion: WireVersion.Server36); connection.Description = description; - Exception exception; - if (async) - { - exception = Record.Exception(() => subject.AuthenticateAsync(connection, description, CancellationToken.None).GetAwaiter().GetResult()); - } - else - { - exception = Record.Exception(() => subject.Authenticate(connection, description, CancellationToken.None)); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.AuthenticateAsync(OperationContext.NoTimeout, connection, description)) : + Record.Exception(() => subject.Authenticate(OperationContext.NoTimeout, connection, description)); exception.Should().BeNull(); } diff --git a/tests/MongoDB.Driver.Tests/Authentication/PlainAuthenticatorTests.cs b/tests/MongoDB.Driver.Tests/Authentication/PlainAuthenticatorTests.cs index 7b67a33d41e..86100b111c7 100644 --- a/tests/MongoDB.Driver.Tests/Authentication/PlainAuthenticatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Authentication/PlainAuthenticatorTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ using System; using System.Net; using System.Threading; +using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; using MongoDB.Driver.Authentication; @@ -50,7 +51,7 @@ public class PlainAuthenticatorTests [Theory] [ParameterAttributeData] - public void Authenticate_should_throw_an_AuthenticationException_when_authentication_fails( + public async Task Authenticate_should_throw_an_AuthenticationException_when_authentication_fails( [Values(false, true)] bool async) { @@ -61,22 +62,16 @@ public void Authenticate_should_throw_an_AuthenticationException_when_authentica connection.EnqueueCommandResponseMessage(response); connection.Description = __descriptionCommandWireProtocol; - Action act; - if (async) - { - act = () => subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)) : + Record.Exception(() => subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)); - act.ShouldThrow<MongoAuthenticationException>(); + exception.Should().BeOfType<MongoAuthenticationException>(); } [Theory] [ParameterAttributeData] - public void Authenticate_should_not_throw_when_authentication_succeeds( + public async Task Authenticate_should_not_throw_when_authentication_succeeds( [Values(false, true)] bool async) { @@ -90,17 +85,15 @@ public void Authenticate_should_not_throw_when_authentication_succeeds( var expectedRequestId = RequestMessage.CurrentGlobalRequestId + 1; - Action act; if (async) { - act = () => subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); + await subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } else { - act = () => subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); + subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } - act.ShouldNotThrow(); SpinWait.SpinUntil(() => connection.GetSentMessages().Count >= 1, TimeSpan.FromSeconds(5)).Should().BeTrue(); var sentMessages = MessageHelper.TranslateMessagesToBsonDocuments(connection.GetSentMessages()); @@ -114,7 +107,7 @@ public void Authenticate_should_not_throw_when_authentication_succeeds( [Theory] [ParameterAttributeData] - public void Authenticate_should_send_serverApi_with_command_wire_protocol( + public async Task Authenticate_should_send_serverApi_with_command_wire_protocol( [Values(false, true)] bool useServerApi, [Values(false, true)] bool async) { @@ -131,11 +124,11 @@ public void Authenticate_should_send_serverApi_with_command_wire_protocol( if (async) { - subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); + await subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } else { - subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); + subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } SpinWait.SpinUntil(() => connection.GetSentMessages().Count >= 1, TimeSpan.FromSeconds(5)).Should().BeTrue(); @@ -152,7 +145,7 @@ public void Authenticate_should_send_serverApi_with_command_wire_protocol( [Theory] [ParameterAttributeData] - public void Authenticate_with_loadBalancedConnection_should_use_command_wire_protocol( + public async Task Authenticate_with_loadBalancedConnection_should_use_command_wire_protocol( [Values(false, true)] bool async) { var subject = CreatePlainSaslAuthenticator(null); @@ -166,11 +159,11 @@ public void Authenticate_with_loadBalancedConnection_should_use_command_wire_pro if (async) { - subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); + await subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } else { - subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); + subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } SpinWait.SpinUntil(() => connection.GetSentMessages().Count >= 1, TimeSpan.FromSeconds(5)).Should().BeTrue(); diff --git a/tests/MongoDB.Driver.Tests/Authentication/SaslPrepHelperTests.cs b/tests/MongoDB.Driver.Tests/Authentication/SaslPrepHelperTests.cs index f0db472ce65..c35b95e153c 100644 --- a/tests/MongoDB.Driver.Tests/Authentication/SaslPrepHelperTests.cs +++ b/tests/MongoDB.Driver.Tests/Authentication/SaslPrepHelperTests.cs @@ -29,9 +29,9 @@ public class SaslPrepHelperTests { [Fact] public void SaslPrepQuery_accepts_undefined_codepoint() - { + { var strWithUnassignedCodepoint = $"abc{char.ConvertFromUtf32(_unassignedCodePoint.Value)}"; - + SaslPrepHelper.SaslPrepQuery(strWithUnassignedCodepoint).Should().Be(strWithUnassignedCodepoint); } @@ -92,7 +92,7 @@ public void SaslPrepStored_returns_expected_output_when_passed_Rfc4013_examples( public void SaslPrep_throws_argument_exception_when_passed_Rfc4013_examples(string expectedError, string input) { var exception = Record.Exception(()=>SaslPrepHelper.SaslPrepStored(input)); - + exception.Should().BeOfType<ArgumentException>().Subject.Message.Should().Be(expectedError); } @@ -100,18 +100,18 @@ public void SaslPrep_throws_argument_exception_when_passed_Rfc4013_examples(stri public void SaslPrepStored_throws_argument_exception_with_RandALCat_and_LCat_characters() { var exception = Record.Exception(() => SaslPrepHelper.SaslPrepStored("\u0627\u0041\u0627")); - + exception.Should().BeOfType<ArgumentException>(); exception.Message.Should().Be("Contains both RandALCat characters and LCat characters"); } - + [Fact] public void SaslPrepStored_throws_exception_when_passed_an_undefined_codepoint() - { + { var strWithUnassignedCodepoint = $"abc{char.ConvertFromUtf32(_unassignedCodePoint.Value)}"; - + var exception = Record.Exception(()=>SaslPrepHelper.SaslPrepStored(strWithUnassignedCodepoint)); - + exception.Should().BeOfType<ArgumentException>(); exception.Message.Should().Be("Character at position 3 is unassigned"); } diff --git a/tests/MongoDB.Driver.Tests/Authentication/ScramSha1AuthenticatorTests.cs b/tests/MongoDB.Driver.Tests/Authentication/ScramSha1AuthenticatorTests.cs index 11d1abe2593..09755e37be5 100644 --- a/tests/MongoDB.Driver.Tests/Authentication/ScramSha1AuthenticatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Authentication/ScramSha1AuthenticatorTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,6 +18,7 @@ using System.Linq; using System.Net; using System.Threading; +using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; using MongoDB.Driver.Authentication; @@ -52,7 +53,7 @@ public class ScramSha1AuthenticatorTests [Theory] [ParameterAttributeData] - public void Authenticate_should_send_serverApi_with_command_wire_protocol( + public async Task Authenticate_should_send_serverApi_with_command_wire_protocol( [Values(false, true)] bool useServerApi, [Values(false, true)] bool async) { @@ -70,11 +71,11 @@ public void Authenticate_should_send_serverApi_with_command_wire_protocol( if (async) { - subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); + await subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } else { - subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); + subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } SpinWait.SpinUntil(() => connection.GetSentMessages().Count >= 2, TimeSpan.FromSeconds(5)).Should().BeTrue(); @@ -91,7 +92,7 @@ public void Authenticate_should_send_serverApi_with_command_wire_protocol( [Theory] [ParameterAttributeData] - public void Authenticate_with_loadBalancedConnection_should_use_command_wire_protocol( + public async Task Authenticate_with_loadBalancedConnection_should_use_command_wire_protocol( [Values(false, true)] bool async) { var randomStringGenerator = new ConstantRandomStringGenerator("fyko+d2lbbFgONRv9qkxdawL"); @@ -107,11 +108,11 @@ public void Authenticate_with_loadBalancedConnection_should_use_command_wire_pro if (async) { - subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); + await subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } else { - subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); + subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } SpinWait.SpinUntil(() => connection.GetSentMessages().Count >= 2, TimeSpan.FromSeconds(5)).Should().BeTrue(); @@ -128,7 +129,7 @@ public void Authenticate_with_loadBalancedConnection_should_use_command_wire_pro [Theory] [ParameterAttributeData] - public void Authenticate_should_throw_an_AuthenticationException_when_authentication_fails( + public async Task Authenticate_should_throw_an_AuthenticationException_when_authentication_fails( [Values("MongoConnectionException", "MongoNotPrimaryException")] string exceptionName, [Values(false, true)] bool async) { @@ -139,22 +140,16 @@ public void Authenticate_should_throw_an_AuthenticationException_when_authentica connection.EnqueueCommandResponseMessage(responseException); connection.Description = __descriptionCommandWireProtocol; - Exception exception; - if (async) - { - exception = Record.Exception(() => subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult()); - } - else - { - exception = Record.Exception(() => subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None)); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)) : + Record.Exception(() => subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)); exception.Should().BeOfType<MongoAuthenticationException>(); } [Theory] [ParameterAttributeData] - public void Authenticate_should_throw_when_server_provides_invalid_r_value( + public async Task Authenticate_should_throw_when_server_provides_invalid_r_value( [Values(false, true)] bool async) { @@ -168,22 +163,16 @@ public void Authenticate_should_throw_when_server_provides_invalid_r_value( connection.EnqueueCommandResponseMessage(saslStartResponse); connection.Description = __descriptionCommandWireProtocol; - Action act; - if (async) - { - act = () => subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)) : + Record.Exception(() => subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)); - act.ShouldThrow<MongoAuthenticationException>(); + exception.Should().BeOfType<MongoAuthenticationException>(); } [Theory] [ParameterAttributeData] - public void Authenticate_should_throw_when_server_provides_invalid_serverSignature( + public async Task Authenticate_should_throw_when_server_provides_invalid_serverSignature( [Values(false, true)] bool async) { @@ -200,22 +189,16 @@ public void Authenticate_should_throw_when_server_provides_invalid_serverSignatu connection.EnqueueCommandResponseMessage(saslContinueResponse); connection.Description = __descriptionCommandWireProtocol; - Action act; - if (async) - { - act = () => subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)) : + Record.Exception(() => subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)); - act.ShouldThrow<MongoAuthenticationException>(); + exception.Should().BeOfType<MongoAuthenticationException>(); } [Theory] [ParameterAttributeData] - public void Authenticate_should_not_throw_when_authentication_succeeds( + public async Task Authenticate_should_not_throw_when_authentication_succeeds( [Values(false, true)] bool useSpeculativeAuthenticate, [Values(false, true)] bool useLongAuthentication, [Values(false, true)] bool async) @@ -250,7 +233,7 @@ public void Authenticate_should_not_throw_when_authentication_succeeds( { // Call CustomizeInitialHelloCommand so that the authenticator thinks its started to speculatively // authenticate - helloCommand = subject.CustomizeInitialHelloCommand(new BsonDocument { { OppressiveLanguageConstants.LegacyHelloCommandName, 1 } }, default); + helloCommand = subject.CustomizeInitialHelloCommand(OperationContext.NoTimeout, new BsonDocument { { OppressiveLanguageConstants.LegacyHelloCommandName, 1 } }); } else { @@ -263,20 +246,15 @@ public void Authenticate_should_not_throw_when_authentication_succeeds( connection.EnqueueCommandResponseMessage(saslLastStepResponse); } - Exception exception; if (async) { - exception = Record.Exception( - () => subject.AuthenticateAsync(connection, connection.Description, CancellationToken.None) - .GetAwaiter().GetResult()); + await subject.AuthenticateAsync(OperationContext.NoTimeout, connection, connection.Description); } else { - exception = Record.Exception( - () => subject.Authenticate(connection, connection.Description, CancellationToken.None)); + subject.Authenticate(OperationContext.NoTimeout, connection, connection.Description); } - exception.Should().BeNull(); var expectedSentMessageCount = 3 - (useLongAuthentication ? 0 : 1) - (useSpeculativeAuthenticate ? 1 : 0); SpinWait.SpinUntil( () => connection.GetSentMessages().Count >= expectedSentMessageCount, @@ -369,7 +347,7 @@ public void Authenticate_should_not_throw_when_authentication_succeeds( [Theory] [ParameterAttributeData] - public void Authenticate_should_use_cache( + public async Task Authenticate_should_use_cache( [Values(false, true)] bool async) { var randomStringGenerator = new ConstantRandomStringGenerator("fyko+d2lbbFgONRv9qkxdawL"); @@ -388,13 +366,11 @@ public void Authenticate_should_use_cache( if (async) { - subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None) - .GetAwaiter() - .GetResult(); + await subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } else { - subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); + subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } SpinWait.SpinUntil(() => connection.GetSentMessages().Count >= 2, TimeSpan.FromSeconds(5)) diff --git a/tests/MongoDB.Driver.Tests/Authentication/ScramSha256AuthenticatorTests.cs b/tests/MongoDB.Driver.Tests/Authentication/ScramSha256AuthenticatorTests.cs index cb52bfbf726..315af1cd6ce 100644 --- a/tests/MongoDB.Driver.Tests/Authentication/ScramSha256AuthenticatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Authentication/ScramSha256AuthenticatorTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2018–present MongoDB Inc. +/* Copyright 2010–present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,6 +18,7 @@ using System.Linq; using System.Net; using System.Threading; +using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.TestHelpers; @@ -102,7 +103,7 @@ private static string ToUtf8Base64(string s) [Theory] [ParameterAttributeData] - public void Authenticate_should_send_serverApi_with_command_wire_protocol( + public async Task Authenticate_should_send_serverApi_with_command_wire_protocol( [Values(false, true)] bool useServerApi, [Values(false, true)] bool async) { @@ -120,11 +121,11 @@ public void Authenticate_should_send_serverApi_with_command_wire_protocol( if (async) { - subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); + await subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } else { - subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); + subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } SpinWait.SpinUntil(() => connection.GetSentMessages().Count >= 2, TimeSpan.FromSeconds(5)).Should().BeTrue(); @@ -141,7 +142,7 @@ public void Authenticate_should_send_serverApi_with_command_wire_protocol( [Theory] [ParameterAttributeData] - public void Authenticate_with_loadBalancedConnection_should_use_command_wire_protocol( + public async Task Authenticate_with_loadBalancedConnection_should_use_command_wire_protocol( [Values(false, true)] bool async) { var randomStringGenerator = new ConstantRandomStringGenerator(_clientNonce); @@ -157,11 +158,11 @@ public void Authenticate_with_loadBalancedConnection_should_use_command_wire_pro if (async) { - subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); + await subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } else { - subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); + subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } SpinWait.SpinUntil(() => connection.GetSentMessages().Count >= 2, TimeSpan.FromSeconds(5)).Should().BeTrue(); @@ -178,7 +179,7 @@ public void Authenticate_with_loadBalancedConnection_should_use_command_wire_pro [Theory] [ParameterAttributeData] - public void Authenticate_should_throw_an_AuthenticationException_when_authentication_fails( + public async Task Authenticate_should_throw_an_AuthenticationException_when_authentication_fails( [Values("MongoConnectionException", "MongoNotPrimaryException")] string exceptionName, [Values(false, true)] bool async) { @@ -189,22 +190,16 @@ public void Authenticate_should_throw_an_AuthenticationException_when_authentica connection.EnqueueCommandResponseMessage(responseException); connection.Description = __descriptionCommandWireProtocol; - Exception exception; - if (async) - { - exception = Record.Exception(() => subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult()); - } - else - { - exception = Record.Exception(() => subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None)); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)) : + Record.Exception(() => subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)); exception.Should().BeOfType<MongoAuthenticationException>(); } [Theory] [ParameterAttributeData] - public void Authenticate_should_throw_when_server_provides_invalid_r_value( + public async Task Authenticate_should_throw_when_server_provides_invalid_r_value( [Values(false, true)] bool async) { var randomStringGenerator = new ConstantRandomStringGenerator(_clientNonce); @@ -221,24 +216,16 @@ public void Authenticate_should_throw_when_server_provides_invalid_r_value( connection.EnqueueCommandResponseMessage(poisonedSaslStartResponseMessage); connection.Description = __descriptionCommandWireProtocol; - Action action; - if (async) - { - action = () => subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - action = () => subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); - } - - var exception = Record.Exception(action); + var exception = async ? + await Record.ExceptionAsync(() => subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)) : + Record.Exception(() => subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)); exception.Should().BeOfType<MongoAuthenticationException>(); } [Theory] [ParameterAttributeData] - public void Authenticate_should_throw_when_server_provides_invalid_serverSignature( + public async Task Authenticate_should_throw_when_server_provides_invalid_serverSignature( [Values(false, true)] bool async) { var randomStringGenerator = new ConstantRandomStringGenerator(_clientNonce); @@ -262,24 +249,16 @@ public void Authenticate_should_throw_when_server_provides_invalid_serverSignatu connection.EnqueueCommandResponseMessage(poisonedSaslContinueResponseMessage); connection.Description = __descriptionCommandWireProtocol; - Action act; - if (async) - { - act = () => subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); - } - - var exception = Record.Exception(act); + var exception = async ? + await Record.ExceptionAsync(() => subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)) : + Record.Exception(() => subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol)); exception.Should().BeOfType<MongoAuthenticationException>(); } [Theory] [ParameterAttributeData] - public void Authenticate_should_not_throw_when_authentication_succeeds( + public async Task Authenticate_should_not_throw_when_authentication_succeeds( [Values(false, true)] bool useSpeculativeAuthenticate, [Values(false, true)] bool useLongAuthentication, [Values(false, true)] bool async) @@ -317,7 +296,7 @@ public void Authenticate_should_not_throw_when_authentication_succeeds( { // We must call CustomizeInitialHelloCommand so that the authenticator thinks its started to speculatively // authenticate - helloCommand = subject.CustomizeInitialHelloCommand(new BsonDocument { { OppressiveLanguageConstants.LegacyHelloCommandName, 1 } }, default); + helloCommand = subject.CustomizeInitialHelloCommand(OperationContext.NoTimeout, new BsonDocument { { OppressiveLanguageConstants.LegacyHelloCommandName, 1 } }); } else { @@ -332,20 +311,15 @@ public void Authenticate_should_not_throw_when_authentication_succeeds( var expectedRequestId = RequestMessage.CurrentGlobalRequestId + 1; - Exception exception; if (async) { - exception = Record.Exception( - () => subject.AuthenticateAsync(connection, connection.Description, CancellationToken.None) - .GetAwaiter().GetResult()); + await subject.AuthenticateAsync(OperationContext.NoTimeout, connection, connection.Description); } else { - exception = Record.Exception( - () => subject.Authenticate(connection, connection.Description, CancellationToken.None)); + subject.Authenticate(OperationContext.NoTimeout, connection, connection.Description); } - exception.Should().BeNull(); var expectedSentMessageCount = 3 - (useLongAuthentication ? 0 : 1) - (useSpeculativeAuthenticate ? 1 : 0); SpinWait.SpinUntil( () => connection.GetSentMessages().Count >= expectedSentMessageCount, @@ -443,7 +417,7 @@ public void Authenticate_should_not_throw_when_authentication_succeeds( [Theory] [ParameterAttributeData] - public void Authenticate_should_use_cache( + public async Task Authenticate_should_use_cache( [Values(false, true)] bool async) { var randomStringGenerator = new ConstantRandomStringGenerator(_clientNonce); @@ -467,13 +441,11 @@ public void Authenticate_should_use_cache( if (async) { - subject.AuthenticateAsync(connection, __descriptionCommandWireProtocol, CancellationToken.None) - .GetAwaiter() - .GetResult(); + await subject.AuthenticateAsync(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } else { - subject.Authenticate(connection, __descriptionCommandWireProtocol, CancellationToken.None); + subject.Authenticate(OperationContext.NoTimeout, connection, __descriptionCommandWireProtocol); } SpinWait.SpinUntil(() => connection.GetSentMessages().Count >= 2, TimeSpan.FromSeconds(5)) @@ -488,11 +460,11 @@ public void Authenticate_should_use_cache( [Theory] [ParameterAttributeData] - public void Authenticate_should_work_regardless_of_culture( + public Task Authenticate_should_work_regardless_of_culture( [Values("da-DK", "en-US")] string name, [Values(false, true)] bool async) { - SetCultureAndResetAfterTest(name, () => + return SetCultureAndResetAfterTest(name, async Task() => { var randomStringGenerator = new ConstantRandomStringGenerator("a"); @@ -517,25 +489,22 @@ public void Authenticate_should_work_regardless_of_culture( if (async) { - subject - .AuthenticateAsync(mockConnection, __descriptionCommandWireProtocol, CancellationToken.None) - .GetAwaiter() - .GetResult(); + await subject.AuthenticateAsync(OperationContext.NoTimeout, mockConnection, __descriptionCommandWireProtocol); } else { - subject.Authenticate(mockConnection, __descriptionCommandWireProtocol, CancellationToken.None); + subject.Authenticate(OperationContext.NoTimeout, mockConnection, __descriptionCommandWireProtocol); } }); - void SetCultureAndResetAfterTest(string cultureName, Action test) + async Task SetCultureAndResetAfterTest(string cultureName, Func<Task> test) { var originalCulture = Thread.CurrentThread.CurrentCulture; Thread.CurrentThread.CurrentCulture = new System.Globalization.CultureInfo(cultureName); try { - test(); + await test(); } finally { diff --git a/tests/MongoDB.Driver.Tests/AuthenticationTests.cs b/tests/MongoDB.Driver.Tests/AuthenticationTests.cs index 4e84d4ef060..56e644f6f1a 100644 --- a/tests/MongoDB.Driver.Tests/AuthenticationTests.cs +++ b/tests/MongoDB.Driver.Tests/AuthenticationTests.cs @@ -16,7 +16,6 @@ using System; using System.Linq; using System.Security.Cryptography.X509Certificates; -using System.Threading; using FluentAssertions; using MongoDB.Bson; using MongoDB.Driver.Core.Clusters.ServerSelectors; @@ -30,6 +29,7 @@ namespace MongoDB.Driver.Tests /// <summary> /// Authentication integration tests. /// </summary> + [Trait("Category", "Integration")] public class AuthenticationTests { [Theory] @@ -336,10 +336,9 @@ private void AssertAuthenticationSucceeds( if (Feature.SpeculativeAuthentication.IsSupported(CoreTestConfiguration.MaxWireVersion) && speculativeAuthenticatationShouldSucceedIfPossible) { - var cancellationToken = CancellationToken.None; var serverSelector = new ReadPreferenceServerSelector(settings.ReadPreference); - var server = client.GetClusterInternal().SelectServer(serverSelector, cancellationToken); - var channel = server.GetChannel(cancellationToken); + var server = client.GetClusterInternal().SelectServer(OperationContext.NoTimeout, serverSelector); + var channel = server.GetChannel(OperationContext.NoTimeout); var helloResult = channel.ConnectionDescription.HelloResult; helloResult.SpeculativeAuthenticate.Should().NotBeNull(); } diff --git a/tests/MongoDB.Driver.Tests/BulkWriteErrorTests.cs b/tests/MongoDB.Driver.Tests/BulkWriteErrorTests.cs index c51ee7cc5ec..1a90fd5d437 100644 --- a/tests/MongoDB.Driver.Tests/BulkWriteErrorTests.cs +++ b/tests/MongoDB.Driver.Tests/BulkWriteErrorTests.cs @@ -20,6 +20,7 @@ using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; +using MongoDB.Driver.Core.Operations; using Xunit; namespace MongoDB.Driver.Tests @@ -34,7 +35,7 @@ public class BulkWriteErrorTests [InlineData(12582, ServerErrorCategory.DuplicateKey)] public void Should_translate_category_correctly(int code, ServerErrorCategory expectedCategory) { - var coreError = new Core.Operations.BulkWriteOperationError(0, code, "blah", new BsonDocument()); + var coreError = new BulkWriteOperationError(0, code, "blah", new BsonDocument()); var subject = BulkWriteError.FromCore(coreError); subject.Category.Should().Be(expectedCategory); diff --git a/tests/MongoDB.Driver.Tests/CausalConsistencyTests.cs b/tests/MongoDB.Driver.Tests/CausalConsistencyTests.cs index 32d3afdbcf5..c715b00f8e9 100644 --- a/tests/MongoDB.Driver.Tests/CausalConsistencyTests.cs +++ b/tests/MongoDB.Driver.Tests/CausalConsistencyTests.cs @@ -28,6 +28,7 @@ namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class CausalConsistencyTests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/ClientDocumentHelperTests.cs b/tests/MongoDB.Driver.Tests/ClientDocumentHelperTests.cs index 5f714d07ba2..7eaa434a223 100644 --- a/tests/MongoDB.Driver.Tests/ClientDocumentHelperTests.cs +++ b/tests/MongoDB.Driver.Tests/ClientDocumentHelperTests.cs @@ -31,6 +31,7 @@ namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class ClientDocumentHelperTests { private static readonly string __longAString = new string('a', 512); diff --git a/tests/MongoDB.Driver.Tests/ClientSessionExtensionsTests.cs b/tests/MongoDB.Driver.Tests/ClientSessionExtensionsTests.cs new file mode 100644 index 00000000000..6758dff71a7 --- /dev/null +++ b/tests/MongoDB.Driver.Tests/ClientSessionExtensionsTests.cs @@ -0,0 +1,68 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System.Collections.Generic; +using FluentAssertions; +using MongoDB.Driver.Core.Bindings; +using Moq; +using Xunit; + +namespace MongoDB.Driver.Tests +{ + public class ClientSessionExtensionsTests + { + [Theory] + [MemberData(nameof(GetEffectiveReadPreferenceTestCases))] + public void GetEffectiveReadPreferenceTests( + ReadPreference expectedReadPreference, + ReadPreference defaultReadPreference, + IClientSessionHandle session) + { + var result = session.GetEffectiveReadPreference(defaultReadPreference); + + result.Should().Be(expectedReadPreference); + } + + public static IEnumerable<object[]> GetEffectiveReadPreferenceTestCases() + { + var noTransactionSession = CreateSessionMock(null); + var inTransactionSession = CreateSessionMock(new TransactionOptions(readPreference: ReadPreference.Nearest)); + var inTransactionNoPreferenceSession = CreateSessionMock(new TransactionOptions()); + + yield return [ReadPreference.Primary, null, noTransactionSession]; + yield return [ReadPreference.SecondaryPreferred, ReadPreference.SecondaryPreferred, noTransactionSession]; + + yield return [ReadPreference.Nearest, ReadPreference.SecondaryPreferred, inTransactionSession]; + + yield return [ReadPreference.Primary, null, inTransactionNoPreferenceSession]; + yield return [ReadPreference.SecondaryPreferred, ReadPreference.SecondaryPreferred, inTransactionNoPreferenceSession]; + } + + private static IClientSessionHandle CreateSessionMock(TransactionOptions transactionOptions) + { + var sessionMock = new Mock<IClientSessionHandle>(); + if (transactionOptions != null) + { + sessionMock.SetupGet(s => s.IsInTransaction).Returns(true); + var coreSessionMock = new Mock<ICoreSessionHandle>(); + coreSessionMock.SetupGet(s => s.CurrentTransaction).Returns(new CoreTransaction(0, transactionOptions)); + sessionMock.SetupGet(s => s.WrappedCoreSession).Returns(coreSessionMock.Object); + } + + return sessionMock.Object; + } + } +} + diff --git a/tests/MongoDB.Driver.Tests/ClientSessionHandleTests.cs b/tests/MongoDB.Driver.Tests/ClientSessionHandleTests.cs index d05c34abb28..23021893994 100644 --- a/tests/MongoDB.Driver.Tests/ClientSessionHandleTests.cs +++ b/tests/MongoDB.Driver.Tests/ClientSessionHandleTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2017-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,7 +25,6 @@ using MongoDB.Driver.Core.Clusters; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.TestHelpers; -using MongoDB.Driver.Support; using Moq; using Xunit; @@ -317,22 +316,8 @@ public void WithTransaction_callback_should_be_processed_with_expected_result( int expectedCommitTransactionAttempts, bool async) { - var mockClock = CreateClockMock(DateTime.UtcNow, isRetryAttemptsWithTimeout, true); - + var mockClock = CreateClockMock(DateTime.UtcNow, isRetryAttemptsWithTimeout); var mockCoreSession = CreateCoreSessionMock(); - mockCoreSession.Setup(c => c.StartTransaction(It.IsAny<TransactionOptions>())); - - // CommitTransaction - if (async) - { - mockCoreSession - .Setup(c => c.CommitTransactionAsync(It.IsAny<CancellationToken>())) - .Returns(Task.FromResult(0)); - } - else - { - mockCoreSession.Setup(c => c.CommitTransaction(It.IsAny<CancellationToken>())); - } // Initialize callbacks var mockCallbackProcessing = new Mock<ICallbackProcessing>(); @@ -438,9 +423,6 @@ public void WithTransaction_callback_should_propagate_result(object value) public void WithTransaction_callback_with_a_custom_error_should_not_be_retried() { var mockCoreSession = CreateCoreSessionMock(); - mockCoreSession.Setup(c => c.StartTransaction(It.IsAny<TransactionOptions>())); - mockCoreSession.Setup(c => c.AbortTransaction(It.IsAny<CancellationToken>())); // abort ignores exceptions - mockCoreSession.Setup(c => c.CommitTransaction(It.IsAny<CancellationToken>())); var subject = CreateSubject(coreSession: mockCoreSession.Object); @@ -453,13 +435,7 @@ public void WithTransaction_callback_with_a_custom_error_should_not_be_retried() [Fact] public void WithTransaction_callback_with_a_TransientTransactionError_and_exceeded_retry_timeout_should_not_be_retried() { - var now = DateTime.UtcNow; - var mockClock = new Mock<IClock>(); - mockClock - .SetupSequence(c => c.UtcNow) - .Returns(now) - .Returns(now.AddSeconds(CalculateTime(true))); // the retry timeout has been exceeded - + var mockClock = CreateClockMock(DateTime.UtcNow, TimeSpan.FromSeconds(CalculateTime(true))); var subject = CreateSubject(clock: mockClock.Object); var exResult = Assert.Throws<MongoException>(() => subject.WithTransaction<bool>((handle, cancellationToken) => @@ -473,13 +449,7 @@ public void WithTransaction_callback_with_a_TransientTransactionError_and_exceed [ParameterAttributeData] public void WithTransaction_callback_with_a_UnknownTransactionCommitResult_should_not_be_retried([Values(true, false)] bool hasTimedOut) { - var now = DateTime.UtcNow; - var mockClock = new Mock<IClock>(); - mockClock - .SetupSequence(c => c.UtcNow) - .Returns(now) - .Returns(now.AddSeconds(CalculateTime(hasTimedOut))); - + var mockClock = CreateClockMock(DateTime.UtcNow, TimeSpan.FromSeconds(CalculateTime(hasTimedOut))); var subject = CreateSubject(clock: mockClock.Object); var exResult = Assert.Throws<MongoException>(() => subject.WithTransaction<bool>((handle, cancellationToken) => @@ -491,43 +461,41 @@ public void WithTransaction_callback_with_a_UnknownTransactionCommitResult_shoul [Theory] // sync - [InlineData(null, new[] { WithTransactionErrorState.NoError }, false /*Should exception be thrown*/, 1, true, false)] - [InlineData(null, new[] { WithTransactionErrorState.TransientTransactionError }, false /*Should exception be thrown*/, 1, false, false)] + [InlineData(null, new[] { WithTransactionErrorState.NoError }, false /*Should exception be thrown*/, 1, false)] + [InlineData(null, new[] { WithTransactionErrorState.TransientTransactionError, WithTransactionErrorState.NoError }, false /*Should exception be thrown*/, 2, false)] - [InlineData(null, new[] { WithTransactionErrorState.ErrorWithoutLabel }, true /*Should exception be thrown*/, 1, false, false)] + [InlineData(null, new[] { WithTransactionErrorState.ErrorWithoutLabel }, true /*Should exception be thrown*/, 1, false)] - [InlineData(new[] { false, false }, new[] { WithTransactionErrorState.TransientTransactionError, WithTransactionErrorState.TransientTransactionError }, false /*Should exception be thrown*/, 1, false, false)] - [InlineData(new[] { true, true }, new[] { WithTransactionErrorState.TransientTransactionError, WithTransactionErrorState.TransientTransactionError }, true /*Should exception be thrown*/, 1, null, false)] + [InlineData(new[] { false, false }, new[] { WithTransactionErrorState.TransientTransactionError, WithTransactionErrorState.TransientTransactionError, WithTransactionErrorState.NoError }, false /*Should exception be thrown*/, 3, false)] + [InlineData(new[] { true }, new[] { WithTransactionErrorState.TransientTransactionError }, true /*Should exception be thrown*/, 1, false)] - [InlineData(new[] { false, false }, new[] { WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.NoError }, false /*Should exception be thrown*/, 3, true, false)] - [InlineData(new[] { false, true }, new[] { WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.NoError }, true /*Should exception be thrown*/, 2, null, false)] + [InlineData(new[] { false, false }, new[] { WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.NoError }, false /*Should exception be thrown*/, 1, false)] + [InlineData(new[] { false, true }, new[] { WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.UnknownTransactionCommitResult }, true /*Should exception be thrown*/, 1, false)] - [InlineData(new[] { false, false }, new[] { WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.NoError }, false /*Should exception be thrown*/, 3, true, false)] + [InlineData(new[] { false, false }, new[] { WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.NoError }, false /*Should exception be thrown*/, 1, false)] // async - [InlineData(null, new[] { WithTransactionErrorState.NoError }, false /*Should exception be thrown*/, 1, true, true)] - [InlineData(null, new[] { WithTransactionErrorState.TransientTransactionError }, false /*Should exception be thrown*/, 1, false, true)] + [InlineData(null, new[] { WithTransactionErrorState.NoError }, false /*Should exception be thrown*/, 1, true)] + [InlineData(null, new[] { WithTransactionErrorState.TransientTransactionError, WithTransactionErrorState.NoError }, false /*Should exception be thrown*/, 2, true)] - [InlineData(null, new[] { WithTransactionErrorState.ErrorWithoutLabel }, true /*Should exception be thrown*/, 1, false, true)] + [InlineData(null, new[] { WithTransactionErrorState.ErrorWithoutLabel }, true /*Should exception be thrown*/, 1, true)] - [InlineData(new[] { false, false }, new[] { WithTransactionErrorState.TransientTransactionError, WithTransactionErrorState.TransientTransactionError }, false /*Should exception be thrown*/, 1, false, true)] - [InlineData(new[] { true, true }, new[] { WithTransactionErrorState.TransientTransactionError, WithTransactionErrorState.TransientTransactionError }, true /*Should exception be thrown*/, 1, null, true)] + [InlineData(new[] { false, false }, new[] { WithTransactionErrorState.TransientTransactionError, WithTransactionErrorState.TransientTransactionError, WithTransactionErrorState.NoError }, false /*Should exception be thrown*/, 3, true)] + [InlineData(new[] { true }, new[] { WithTransactionErrorState.TransientTransactionError }, true /*Should exception be thrown*/, 1, true)] - [InlineData(new[] { false, false }, new[] { WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.NoError }, false /*Should exception be thrown*/, 3, true, true)] - [InlineData(new[] { false, true }, new[] { WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.NoError }, true /*Should exception be thrown*/, 2, null, true)] + [InlineData(new[] { false, false }, new[] { WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.NoError }, false /*Should exception be thrown*/, 1, true)] + [InlineData(new[] { false, true }, new[] { WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.UnknownTransactionCommitResult }, true /*Should exception be thrown*/, 1, true)] - [InlineData(new[] { false, false }, new[] { WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.NoError }, false /*Should exception be thrown*/, 3, true, true)] + [InlineData(new[] { false, false }, new[] { WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.UnknownTransactionCommitResult, WithTransactionErrorState.NoError }, false /*Should exception be thrown*/, 1, true)] public void WithTransaction_commit_after_callback_processing_should_be_processed_with_expected_result( bool[] isRetryAttemptsWithTimeout, // the array length should be the same with a number of failed attempts from `commitTransactionErrorStates` WithTransactionErrorState[] commitTransactionErrorStates, bool shouldExceptionBeThrown, - int expectedCommitTransactionAttempts, - bool? expectedFullTransactionBeRetriedState, + int transactionCallbackAttempts, bool async) { var now = DateTime.UtcNow; - var mockClock = CreateClockMock(now, isRetryAttemptsWithTimeout, false); - + var mockClock = CreateClockMock(now, isRetryAttemptsWithTimeout); var mockCoreSession = CreateCoreSessionMock(); // Initialize commit result @@ -566,34 +534,39 @@ public void WithTransaction_commit_after_callback_processing_should_be_processed var subject = CreateSubject(coreSession: mockCoreSession.Object, clock: mockClock.Object); - // Commit processing if (async) { + var callbackMock = new Mock<Func<IClientSessionHandle, CancellationToken, Task<bool>>>(); + var exception = Record.ExceptionAsync(() => subject.WithTransactionAsync(callbackMock.Object)).GetAwaiter().GetResult(); + if (shouldExceptionBeThrown) { - Assert.ThrowsAnyAsync<MongoException>(() => TransactionExecutorReflector.CommitWithRetriesAsync(subject, now, mockClock.Object, CancellationToken.None)).GetAwaiter().GetResult(); + exception.Should().BeOfType<MongoException>(); } else { - var result = TransactionExecutorReflector.CommitWithRetriesAsync(subject, now, mockClock.Object, CancellationToken.None).Result; - expectedFullTransactionBeRetriedState.Should().Be(result); + exception.Should().BeNull(); } - mockCoreSession.Verify(handle => handle.CommitTransactionAsync(It.IsAny<CancellationToken>()), Times.Exactly(expectedCommitTransactionAttempts)); + callbackMock.Verify(c => c(It.IsAny<IClientSessionHandle>(), It.IsAny<CancellationToken>()), Times.Exactly(transactionCallbackAttempts)); + mockCoreSession.Verify(handle => handle.CommitTransactionAsync(It.IsAny<CancellationToken>()), Times.Exactly(commitTransactionErrorStates.Length)); } else { + var callbackMock = new Mock<Func<IClientSessionHandle, CancellationToken, bool>>(); + var exception = Record.Exception(() => subject.WithTransaction(callbackMock.Object)); + if (shouldExceptionBeThrown) { - Assert.ThrowsAny<MongoException>(() => TransactionExecutorReflector.CommitWithRetries(subject, now, mockClock.Object, CancellationToken.None)); + exception.Should().BeOfType<MongoException>(); } else { - var result = TransactionExecutorReflector.CommitWithRetries(subject, now, mockClock.Object, CancellationToken.None); - expectedFullTransactionBeRetriedState.Should().Be(result); + exception.Should().BeNull(); } - mockCoreSession.Verify(handle => handle.CommitTransaction(It.IsAny<CancellationToken>()), Times.Exactly(expectedCommitTransactionAttempts)); + callbackMock.Verify(c => c(It.IsAny<IClientSessionHandle>(), It.IsAny<CancellationToken>()), Times.Exactly(transactionCallbackAttempts)); + mockCoreSession.Verify(handle => handle.CommitTransaction(It.IsAny<CancellationToken>()), Times.Exactly(commitTransactionErrorStates.Length)); } } @@ -601,8 +574,6 @@ public void WithTransaction_commit_after_callback_processing_should_be_processed public void WithTransaction_should_set_valid_session_to_callback() { var mockCoreSession = CreateCoreSessionMock(); - mockCoreSession.Setup(c => c.StartTransaction(It.IsAny<TransactionOptions>())); - mockCoreSession.Setup(c => c.CommitTransaction(It.IsAny<CancellationToken>())); var subject = CreateSubject(coreSession: mockCoreSession.Object); var result = subject.WithTransaction<object>((session, cancellationToken) => session); @@ -618,9 +589,6 @@ public void WithTransaction_should_set_valid_session_to_callback() public void WithTransaction_with_error_in_callback_should_call_AbortTransaction_according_to_transaction_state(CoreTransactionState transactionState, bool shouldAbortTransactionBeCalled) { var mockCoreSession = CreateCoreSessionMock(); - mockCoreSession.Setup(c => c.StartTransaction(It.IsAny<TransactionOptions>())); - mockCoreSession.Setup(c => c.AbortTransaction(It.IsAny<CancellationToken>())); // abort ignores exceptions - mockCoreSession.Setup(c => c.CommitTransaction(It.IsAny<CancellationToken>())); var subject = CreateSubject(coreSession: mockCoreSession.Object); subject.WrappedCoreSession.CurrentTransaction.SetState(transactionState); @@ -639,7 +607,6 @@ public void WithTransaction_with_error_in_StartTransaction_should_return_control mockCoreSession .Setup(c => c.StartTransaction(It.IsAny<TransactionOptions>())) .Throws<Exception>(); - mockCoreSession.Setup(c => c.CommitTransaction(It.IsAny<CancellationToken>())); var subject = CreateSubject(coreSession: mockCoreSession.Object); Assert.Throws<Exception>(() => subject.WithTransaction<object>((handle, cancellationToken) => 1)); @@ -652,8 +619,6 @@ public void WithTransaction_with_error_in_StartTransaction_should_return_control public void WithTransaction_without_errors_should_call_transaction_infrastructure_once() { var mockCoreSession = CreateCoreSessionMock(); - mockCoreSession.Setup(c => c.StartTransaction(It.IsAny<TransactionOptions>())); - mockCoreSession.Setup(c => c.CommitTransaction(It.IsAny<CancellationToken>())); var subject = CreateSubject(coreSession: mockCoreSession.Object); SetupTransactionState(subject, true); @@ -726,7 +691,7 @@ private MongoException PrepareException(WithTransactionErrorState state) throw new ArgumentException("Not supported ErrorState", state.ToString()); } - private Mock<IClock> CreateClockMock(DateTime now, bool[] isRetryAttemptsWithTimeout, bool shouldNowBeAdded) + private Mock<IClock> CreateClockMock(DateTime now, bool[] isRetryAttemptsWithTimeout) { if (isRetryAttemptsWithTimeout == null) { @@ -734,20 +699,40 @@ private Mock<IClock> CreateClockMock(DateTime now, bool[] isRetryAttemptsWithTim } var mockClock = new Mock<IClock>(); - + SetupGetTimestamp(mockClock); var nowSetup = mockClock.SetupSequence(c => c.UtcNow); - if (shouldNowBeAdded) - { - nowSetup.Returns(now); - } + nowSetup.Returns(now); foreach (var isTimeoutAttempt in isRetryAttemptsWithTimeout) { var passedTime = CalculateTime(isTimeoutAttempt); nowSetup.Returns(now.AddSeconds(passedTime)); } + return mockClock; } + private Mock<IClock> CreateClockMock(DateTime now, params TimeSpan[] intervals) + { + var mockClock = new Mock<IClock>(); + SetupGetTimestamp(mockClock); + var nowSetup = mockClock.SetupSequence(c => c.UtcNow); + nowSetup.Returns(now); + var currentTime = now; + foreach (var interval in intervals) + { + currentTime += interval; + nowSetup.Returns(currentTime); + } + + return mockClock; + } + + private void SetupGetTimestamp(Mock<IClock> mockClock) + { + mockClock.SetupGet(m => m.Frequency).Returns(10_000_000); + mockClock.Setup(w => w.GetTimestamp()).Returns(() => mockClock.Object.UtcNow.Ticks); + } + private int CalculateTime(bool timeout) { return (int)TransactionExecutorReflector.__transactionTimeout().TotalSeconds + (timeout ? 10 : -10); @@ -770,11 +755,5 @@ internal static class ClientSessionHandleReflector internal static class TransactionExecutorReflector { public static TimeSpan __transactionTimeout() => (TimeSpan)Reflector.GetStaticFieldValue(typeof(TransactionExecutor), nameof(__transactionTimeout)); - - public static bool CommitWithRetries(IClientSessionHandle session, DateTime startTime, IClock clock, CancellationToken cancellationToken) - => (bool)Reflector.InvokeStatic(typeof(TransactionExecutor), nameof(CommitWithRetries), session, startTime, clock, cancellationToken); - - public static Task<bool> CommitWithRetriesAsync(IClientSessionHandle session, DateTime startTime, IClock clock, CancellationToken cancellationToken) - => (Task<bool>)Reflector.InvokeStatic(typeof(TransactionExecutor), nameof(CommitWithRetriesAsync), session, startTime, clock, cancellationToken); } } diff --git a/tests/MongoDB.Driver.Tests/ClusterKeyTests.cs b/tests/MongoDB.Driver.Tests/ClusterKeyTests.cs index 481da3442c7..b5cba0293ce 100644 --- a/tests/MongoDB.Driver.Tests/ClusterKeyTests.cs +++ b/tests/MongoDB.Driver.Tests/ClusterKeyTests.cs @@ -21,6 +21,7 @@ using MongoDB.Bson; using MongoDB.Driver.Core.Compression; using MongoDB.Driver.Core.Configuration; +using MongoDB.Driver.Core.Connections; using MongoDB.Driver.Core.Servers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; @@ -74,6 +75,7 @@ public void Equals_should_return_true_if_all_fields_are_equal() [InlineData("ServerMonitoringMode", true)] [InlineData("ServerSelectionTimeout", true)] [InlineData("SocketTimeout", true)] + [InlineData("Socks5ProxySettings", true)] [InlineData("SrvMaxHosts", true)] [InlineData("SslSettings", true)] [InlineData("UseTls", true)] @@ -180,6 +182,7 @@ private ClusterKey CreateSubject(string notEqualFieldName = null) var serverMonitoringMode = ServerMonitoringMode.Stream; var serverSelectionTimeout = TimeSpan.FromSeconds(6); var socketTimeout = TimeSpan.FromSeconds(4); + var socks5ProxySettings = Socks5ProxySettings.Create("localhost", 1080, "user", "password"); var srvMaxHosts = 0; var srvServiceName = "mongodb"; var sslSettings = new SslSettings @@ -228,6 +231,7 @@ private ClusterKey CreateSubject(string notEqualFieldName = null) case "ServerMonitoringMode": serverMonitoringMode = ServerMonitoringMode.Poll; break; case "ServerSelectionTimeout": serverSelectionTimeout = TimeSpan.FromSeconds(98); break; case "SocketTimeout": socketTimeout = TimeSpan.FromSeconds(99); break; + case "Socks5ProxySettings": socks5ProxySettings = Socks5ProxySettings.Create("different", 1080, "user", "password"); break; case "SrvMaxHosts": srvMaxHosts = 3; break; case "SrvServiceName": srvServiceName = "customname"; break; case "SslSettings": sslSettings.CheckCertificateRevocation = !sslSettings.CheckCertificateRevocation; break; @@ -268,6 +272,7 @@ private ClusterKey CreateSubject(string notEqualFieldName = null) serverMonitoringMode, serverSelectionTimeout, socketTimeout, + socks5ProxySettings, srvMaxHosts, srvServiceName, sslSettings, @@ -312,6 +317,7 @@ internal ClusterKey CreateSubjectWith( var serverMonitoringMode = ServerMonitoringMode.Stream; var serverSelectionTimeout = TimeSpan.FromSeconds(6); var socketTimeout = TimeSpan.FromSeconds(4); + var socks5ProxySettings = Socks5ProxySettings.Create("localhost", 1080, "user", "password"); var srvMaxHosts = 3; var srvServiceName = "customname"; var sslSettings = new SslSettings @@ -353,6 +359,7 @@ internal ClusterKey CreateSubjectWith( serverMonitoringMode, serverSelectionTimeout, socketTimeout, + socks5ProxySettings, srvMaxHosts, srvServiceName, sslSettings, diff --git a/tests/MongoDB.Driver.Tests/ClusterRegistryTests.cs b/tests/MongoDB.Driver.Tests/ClusterRegistryTests.cs index d879ccd2a38..2a63791de2f 100644 --- a/tests/MongoDB.Driver.Tests/ClusterRegistryTests.cs +++ b/tests/MongoDB.Driver.Tests/ClusterRegistryTests.cs @@ -142,6 +142,7 @@ public void GetOrCreateCluster_should_return_a_cluster_with_the_correct_settings serverMonitoringMode: ServerMonitoringMode.Stream, serverSelectionTimeout: TimeSpan.FromSeconds(11), socketTimeout: TimeSpan.FromSeconds(12), + socks5ProxySettings: null, srvMaxHosts: 0, srvServiceName: "mongodb", sslSettings: sslSettings, diff --git a/tests/MongoDB.Driver.Tests/ClusterTests.cs b/tests/MongoDB.Driver.Tests/ClusterTests.cs index 97732fc09ee..a7c31d484c2 100644 --- a/tests/MongoDB.Driver.Tests/ClusterTests.cs +++ b/tests/MongoDB.Driver.Tests/ClusterTests.cs @@ -37,6 +37,7 @@ namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class ClusterTests : LoggableTestClass { private static readonly HashSet<string> __commandsToNotCapture = new HashSet<string> @@ -86,11 +87,10 @@ public void SelectServer_loadbalancing_prose_test([Values(false, true)] bool asy var eventCapturer = CreateEventCapturer(); using (var client = CreateMongoClient(eventCapturer, applicationName)) { - var slowServer = client.GetClusterInternal().SelectServer(WritableServerSelector.Instance, default); - var fastServer = client.GetClusterInternal().SelectServer(new DelegateServerSelector((_, servers) => servers.Where(s => s.ServerId != slowServer.ServerId)), default); + var slowServer = client.GetClusterInternal().SelectServer(OperationContext.NoTimeout, WritableServerSelector.Instance); + var fastServer = client.GetClusterInternal().SelectServer(OperationContext.NoTimeout, new DelegateServerSelector((_, servers) => servers.Where(s => s.ServerId != slowServer.ServerId))); using var failPoint = FailPoint.Configure(slowServer, NoCoreSession.NewHandle(), failCommand, async); - var database = client.GetDatabase(_databaseName); CreateCollection(); var collection = database.GetCollection<BsonDocument>(_collectionName); @@ -99,8 +99,8 @@ public void SelectServer_loadbalancing_prose_test([Values(false, true)] bool asy var channels = new ConcurrentBag<IChannelHandle>(); ThreadingUtilities.ExecuteOnNewThreads(threadsCount, i => { - channels.Add(slowServer.GetChannel(default)); - channels.Add(fastServer.GetChannel(default)); + channels.Add(slowServer.GetChannel(OperationContext.NoTimeout)); + channels.Add(fastServer.GetChannel(OperationContext.NoTimeout)); }); foreach (var channel in channels) diff --git a/tests/MongoDB.Driver.Tests/ConnectionsSurvivePrimaryStepDownTests.cs b/tests/MongoDB.Driver.Tests/ConnectionsSurvivePrimaryStepDownTests.cs index 5c30e8c1895..f37cd6c4b11 100644 --- a/tests/MongoDB.Driver.Tests/ConnectionsSurvivePrimaryStepDownTests.cs +++ b/tests/MongoDB.Driver.Tests/ConnectionsSurvivePrimaryStepDownTests.cs @@ -31,6 +31,7 @@ namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class ConnectionsSurvivePrimaryStepDownTests { private readonly string _collectionName = "step-down"; diff --git a/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelChannelSourceTests.cs b/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelChannelSourceTests.cs index 76a12582048..650f074169d 100644 --- a/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelChannelSourceTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelChannelSourceTests.cs @@ -15,7 +15,7 @@ using System; using System.Reflection; -using System.Threading; +using System.Threading.Tasks; using FluentAssertions; using MongoDB.TestHelpers.XunitExtensions; using MongoDB.Driver.Core.Servers; @@ -140,25 +140,17 @@ public void Dispose_can_be_called_more_than_once() [Theory] [ParameterAttributeData] - public void GetChannel_should_return_expected_result( + public async Task GetChannel_should_return_expected_result( [Values(false, true)] bool async) { var mockChannel = new Mock<IChannelHandle>(); var subject = CreateSubject(channel: mockChannel.Object); - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; var expectedResult = new Mock<IChannelHandle>().Object; mockChannel.Setup(m => m.Fork()).Returns(expectedResult); - IChannelHandle result; - if (async) - { - result = subject.GetChannelAsync(cancellationToken).GetAwaiter().GetResult(); - } - else - { - result = subject.GetChannel(cancellationToken); - } + var result = async ? + await subject.GetChannelAsync(OperationContext.NoTimeout) : + subject.GetChannel(OperationContext.NoTimeout); result.Should().BeSameAs(expectedResult); mockChannel.Verify(m => m.Fork(), Times.Once); @@ -166,24 +158,14 @@ public void GetChannel_should_return_expected_result( [Theory] [ParameterAttributeData] - public void GetChannel_should_throw_when_disposed( + public async Task GetChannel_should_throw_when_disposed( [Values(false, true)] bool async) { var subject = CreateDisposedSubject(); - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; - - var exception = Record.Exception(() => - { - if (async) - { - subject.GetChannelAsync(cancellationToken).GetAwaiter().GetResult(); - } - else - { - subject.GetChannel(cancellationToken); - } - }); + + var exception = async ? + await Record.ExceptionAsync(() => subject.GetChannelAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.GetChannel(OperationContext.NoTimeout)); var e = exception.Should().BeOfType<ObjectDisposedException>().Subject; e.ObjectName.Should().Be(subject.GetType().FullName); diff --git a/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelReadBindingTests.cs b/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelReadBindingTests.cs index 121011542cb..1d6f317e611 100644 --- a/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelReadBindingTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelReadBindingTests.cs @@ -14,11 +14,7 @@ */ using System; -using System.Collections.Generic; -using System.Linq; using System.Reflection; -using System.Text; -using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.TestHelpers.XunitExtensions; @@ -151,29 +147,21 @@ public void Dispose_can_be_called_more_than_once() [Theory] [ParameterAttributeData] - public void GetReadChannelSource_should_return_expected_result( + public async Task GetReadChannelSource_should_return_expected_result( [Values(false, true)] bool async) { var mockChannel = new Mock<IChannelHandle>(); var mockSession = new Mock<ICoreSessionHandle>(); var subject = CreateSubject(channel: mockChannel.Object, session: mockSession.Object); - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; var forkedChannel = new Mock<IChannelHandle>().Object; var forkedSession = new Mock<ICoreSessionHandle>().Object; mockChannel.Setup(m => m.Fork()).Returns(forkedChannel); mockSession.Setup(m => m.Fork()).Returns(forkedSession); - IChannelSourceHandle result; - if (async) - { - result = subject.GetReadChannelSourceAsync(cancellationToken).GetAwaiter().GetResult(); - } - else - { - result = subject.GetReadChannelSource(cancellationToken); - } + var result = async ? + await subject.GetReadChannelSourceAsync(OperationContext.NoTimeout) : + subject.GetReadChannelSource(OperationContext.NoTimeout); var newHandle = result.Should().BeOfType<ChannelSourceHandle>().Subject; var referenceCounted = newHandle._reference(); @@ -184,24 +172,13 @@ public void GetReadChannelSource_should_return_expected_result( [Theory] [ParameterAttributeData] - public void GetReadChannelSource_should_throw_when_disposed( + public async Task GetReadChannelSource_should_throw_when_disposed( [Values(false, true)] bool async) { var subject = CreateDisposedSubject(); - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; - - var exception = Record.Exception(() => - { - if (async) - { - subject.GetReadChannelSourceAsync(cancellationToken).GetAwaiter().GetResult(); - } - else - { - subject.GetReadChannelSource(cancellationToken); - } - }); + var exception = async ? + await Record.ExceptionAsync(() => subject.GetReadChannelSourceAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.GetReadChannelSource(OperationContext.NoTimeout)); var e = exception.Should().BeOfType<ObjectDisposedException>().Subject; e.ObjectName.Should().Be(subject.GetType().FullName); diff --git a/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelReadWriteBindingTests.cs b/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelReadWriteBindingTests.cs index 15c900f87d3..acc8fdfa4ad 100644 --- a/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelReadWriteBindingTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelReadWriteBindingTests.cs @@ -14,11 +14,7 @@ */ using System; -using System.Collections.Generic; -using System.Linq; using System.Reflection; -using System.Text; -using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.TestHelpers.XunitExtensions; @@ -132,29 +128,21 @@ public void Dispose_can_be_called_more_than_once() [Theory] [ParameterAttributeData] - public void GetReadChannelSource_should_return_expected_result( + public async Task GetReadChannelSource_should_return_expected_result( [Values(false, true)] bool async) { var mockChannel = new Mock<IChannelHandle>(); var mockSession = new Mock<ICoreSessionHandle>(); var subject = CreateSubject(channel: mockChannel.Object, session: mockSession.Object); - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; var forkedChannel = new Mock<IChannelHandle>().Object; var forkedSession = new Mock<ICoreSessionHandle>().Object; mockChannel.Setup(m => m.Fork()).Returns(forkedChannel); mockSession.Setup(m => m.Fork()).Returns(forkedSession); - IChannelSourceHandle result; - if (async) - { - result = subject.GetReadChannelSourceAsync(cancellationToken).GetAwaiter().GetResult(); - } - else - { - result = subject.GetReadChannelSource(cancellationToken); - } + var result = async ? + await subject.GetReadChannelSourceAsync(OperationContext.NoTimeout) : + subject.GetReadChannelSource(OperationContext.NoTimeout); var newHandle = result.Should().BeOfType<ChannelSourceHandle>().Subject; var referenceCounted = newHandle._reference(); @@ -165,29 +153,21 @@ public void GetReadChannelSource_should_return_expected_result( [Theory] [ParameterAttributeData] - public void GetWriteChannelSource_should_return_expected_result( + public async Task GetWriteChannelSource_should_return_expected_result( [Values(false, true)] bool async) { var mockChannel = new Mock<IChannelHandle>(); var mockSession = new Mock<ICoreSessionHandle>(); var subject = CreateSubject(channel: mockChannel.Object, session: mockSession.Object); - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; var forkedChannel = new Mock<IChannelHandle>().Object; var forkedSession = new Mock<ICoreSessionHandle>().Object; mockChannel.Setup(m => m.Fork()).Returns(forkedChannel); mockSession.Setup(m => m.Fork()).Returns(forkedSession); - IChannelSourceHandle result; - if (async) - { - result = subject.GetWriteChannelSourceAsync(cancellationToken).GetAwaiter().GetResult(); - } - else - { - result = subject.GetWriteChannelSource(cancellationToken); - } + var result = async ? + await subject.GetWriteChannelSourceAsync(OperationContext.NoTimeout) : + subject.GetWriteChannelSource(OperationContext.NoTimeout); var newHandle = result.Should().BeOfType<ChannelSourceHandle>().Subject; var referenceCounted = newHandle._reference(); @@ -198,24 +178,13 @@ public void GetWriteChannelSource_should_return_expected_result( [Theory] [ParameterAttributeData] - public void GetReadChannelSource_should_throw_when_disposed( + public async Task GetReadChannelSource_should_throw_when_disposed( [Values(false, true)] bool async) { var subject = CreateDisposedSubject(); - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; - - var exception = Record.Exception(() => - { - if (async) - { - subject.GetReadChannelSourceAsync(cancellationToken).GetAwaiter().GetResult(); - } - else - { - subject.GetReadChannelSource(cancellationToken); - } - }); + var exception = async ? + await Record.ExceptionAsync(() => subject.GetReadChannelSourceAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.GetReadChannelSource(OperationContext.NoTimeout)); var e = exception.Should().BeOfType<ObjectDisposedException>().Subject; e.ObjectName.Should().Be(subject.GetType().FullName); @@ -223,24 +192,13 @@ public void GetReadChannelSource_should_throw_when_disposed( [Theory] [ParameterAttributeData] - public void GetWriteChannelSource_should_throw_when_disposed( + public async Task GetWriteChannelSource_should_throw_when_disposed( [Values(false, true)] bool async) { var subject = CreateDisposedSubject(); - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; - - var exception = Record.Exception(() => - { - if (async) - { - subject.GetWriteChannelSourceAsync(cancellationToken).GetAwaiter().GetResult(); - } - else - { - subject.GetWriteChannelSource(cancellationToken); - } - }); + var exception = async ? + await Record.ExceptionAsync(() => subject.GetWriteChannelSourceAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.GetWriteChannelSource(OperationContext.NoTimeout)); var e = exception.Should().BeOfType<ObjectDisposedException>().Subject; e.ObjectName.Should().Be(subject.GetType().FullName); diff --git a/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelSourceHandleTests.cs b/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelSourceHandleTests.cs index 212ca5f46d9..37031583a76 100644 --- a/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelSourceHandleTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelSourceHandleTests.cs @@ -15,6 +15,7 @@ using System; using System.Threading; +using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; @@ -53,29 +54,23 @@ public void Session_should_delegate_to_reference() [Theory] [ParameterAttributeData] - public void GetChannel_should_throw_if_disposed( + public async Task GetChannel_should_throw_if_disposed( [Values(false, true)] bool async) { var subject = new ChannelSourceHandle(_mockChannelSource.Object); subject.Dispose(); - Action act; - if (async) - { - act = () => subject.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.GetChannel(CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.GetChannelAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.GetChannel(OperationContext.NoTimeout)); - act.ShouldThrow<ObjectDisposedException>(); + exception.Should().BeOfType<ObjectDisposedException>(); } [Theory] [ParameterAttributeData] - public void GetChannel_should_delegate_to_reference( + public async Task GetChannel_should_delegate_to_reference( [Values(false, true)] bool async) { @@ -83,15 +78,15 @@ public void GetChannel_should_delegate_to_reference( if (async) { - subject.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult(); + await subject.GetChannelAsync(OperationContext.NoTimeout); - _mockChannelSource.Verify(s => s.GetChannelAsync(CancellationToken.None), Times.Once); + _mockChannelSource.Verify(s => s.GetChannelAsync(It.IsAny<OperationContext>()), Times.Once); } else { - subject.GetChannel(CancellationToken.None); + subject.GetChannel(OperationContext.NoTimeout); - _mockChannelSource.Verify(s => s.GetChannel(CancellationToken.None), Times.Once); + _mockChannelSource.Verify(s => s.GetChannel(It.IsAny<OperationContext>()), Times.Once); } } diff --git a/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelSourceReadWriteBindingTests.cs b/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelSourceReadWriteBindingTests.cs index d00f0151e47..fbac6fc6c7b 100644 --- a/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelSourceReadWriteBindingTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Bindings/ChannelSourceReadWriteBindingTests.cs @@ -14,11 +14,9 @@ */ using System; -using System.Threading; +using System.Threading.Tasks; using FluentAssertions; using MongoDB.TestHelpers.XunitExtensions; -using MongoDB.Driver.Core.Bindings; -using MongoDB.Driver.Core.Clusters; using Moq; using Xunit; @@ -78,84 +76,60 @@ public void Session_should_return_expected_result() [Theory] [ParameterAttributeData] - public void GetReadChannelSourceAsync_should_throw_if_disposed( + public async Task GetReadChannelSourceAsync_should_throw_if_disposed( [Values(false, true)] bool async) { var subject = new ChannelSourceReadWriteBinding(_mockChannelSource.Object, ReadPreference.Primary, NoCoreSession.NewHandle()); subject.Dispose(); - Action act; - if (async) - { - act = () => subject.GetReadChannelSourceAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.GetReadChannelSource(CancellationToken.None); - } - - act.ShouldThrow<ObjectDisposedException>(); + var exception = async ? + await Record.ExceptionAsync(() => subject.GetReadChannelSourceAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.GetReadChannelSource(OperationContext.NoTimeout)); + + exception.Should().BeOfType<ObjectDisposedException>(); } [Theory] [ParameterAttributeData] - public void GetReadChannelSource_should_fork_the_channelSource( + public async Task GetReadChannelSource_should_fork_the_channelSource( [Values(false, true)] bool async) { var subject = new ChannelSourceReadWriteBinding(_mockChannelSource.Object, ReadPreference.Primary, NoCoreSession.NewHandle()); - - if (async) - { - subject.GetReadChannelSourceAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - subject.GetReadChannelSource(CancellationToken.None); - } + var result = async ? + await subject.GetReadChannelSourceAsync(OperationContext.NoTimeout) : + subject.GetReadChannelSource(OperationContext.NoTimeout); _mockChannelSource.Verify(f => f.Fork(), Times.Once); } [Theory] [ParameterAttributeData] - public void GetWriteChannelSource_should_throw_if_disposed( + public async Task GetWriteChannelSource_should_throw_if_disposed( [Values(false, true)] bool async) { var subject = new ChannelSourceReadWriteBinding(_mockChannelSource.Object, ReadPreference.Primary, NoCoreSession.NewHandle()); subject.Dispose(); - Action act; - if (async) - { - act = () => subject.GetWriteChannelSourceAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.GetWriteChannelSource(CancellationToken.None); - } - - act.ShouldThrow<ObjectDisposedException>(); + var exception = async ? + await Record.ExceptionAsync(() => subject.GetWriteChannelSourceAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.GetWriteChannelSource(OperationContext.NoTimeout)); + + exception.Should().BeOfType<ObjectDisposedException>(); } [Theory] [ParameterAttributeData] - public void GetWriteChannelSource_should_fork_the_channelSource( + public async Task GetWriteChannelSource_should_fork_the_channelSource( [Values(false, true)] bool async) { var subject = new ChannelSourceReadWriteBinding(_mockChannelSource.Object, ReadPreference.Primary, NoCoreSession.NewHandle()); - - if (async) - { - subject.GetWriteChannelSourceAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - subject.GetWriteChannelSource(CancellationToken.None); - } + var result = async ? + await subject.GetWriteChannelSourceAsync(OperationContext.NoTimeout) : + subject.GetWriteChannelSource(OperationContext.NoTimeout); _mockChannelSource.Verify(f => f.Fork(), Times.Once); } diff --git a/tests/MongoDB.Driver.Tests/Core/Bindings/CoreServerSessionPoolTests.cs b/tests/MongoDB.Driver.Tests/Core/Bindings/CoreServerSessionPoolTests.cs index be7a3b9d3ca..03be5c55ff0 100644 --- a/tests/MongoDB.Driver.Tests/Core/Bindings/CoreServerSessionPoolTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Bindings/CoreServerSessionPoolTests.cs @@ -18,16 +18,10 @@ using System.Linq; using System.Net; using System.Reflection; -using System.Threading; -using System.Threading.Tasks; using FluentAssertions; -using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Clusters; -using MongoDB.Driver.Core.Clusters.ServerSelectors; -using MongoDB.Driver.Core.Configuration; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.Servers; -using MongoDB.Driver.Encryption; using Moq; using Xunit; @@ -205,35 +199,23 @@ public void ReleaseSession_should_have_expected_result(int[] pooledSessionWasRec } } - [Fact] - public void IsAboutToExpire_should_never_expire_in_load_balancing_mode() - { - var subject = CreateSubject(); - var mockedCluster = new TestCluster(ClusterType.LoadBalanced); - var mockedServerSessionPool = new CoreServerSessionPool(mockedCluster); - var mockSession = new Mock<ICoreServerSession>(); - var lastUsedAt = DateTime.UtcNow.AddSeconds(1741); - mockSession.SetupGet(m => m.LastUsedAt).Returns(lastUsedAt); - - var result = mockedServerSessionPool.IsAboutToExpire(mockSession.Object); - - result.Should().BeFalse(); - } - [Theory] - [InlineData(null, true)] - [InlineData(1741, true)] - [InlineData(1739, false)] - public void IsAboutToExpire_should_return_expected_result(int? lastUsedSecondsAgo, bool expectedResult) + [InlineData(ClusterType.Sharded, null, true)] + [InlineData(ClusterType.Sharded, 1741, true)] + [InlineData(ClusterType.Sharded, 1739, false)] + [InlineData(ClusterType.LoadBalanced, null, false)] + [InlineData(ClusterType.LoadBalanced, 1741, false)] + [InlineData(ClusterType.LoadBalanced, 1739, false)] + public void IsAboutToExpire_should_return_expected_result(ClusterType clusterType, int? lastUsedSecondsAgo, bool isAboutToExpire) { - var subject = CreateSubject(); + var subject = CreateSubject(clusterType); var mockSession = new Mock<ICoreServerSession>(); var lastUsedAt = lastUsedSecondsAgo == null ? (DateTime?)null : DateTime.UtcNow.AddSeconds(-lastUsedSecondsAgo.Value); mockSession.SetupGet(m => m.LastUsedAt).Returns(lastUsedAt); var result = subject.IsAboutToExpire(mockSession.Object); - result.Should().Be(expectedResult); + result.Should().Be(isAboutToExpire); } // private methods @@ -256,7 +238,7 @@ private Mock<ICoreServerSession> CreateMockSession(bool recentlyUsed) return recentlyUsed ? CreateMockRecentlyUsedSession() : CreateMockExpiredSession(); } - private CoreServerSessionPool CreateSubject() + private CoreServerSessionPool CreateSubject(ClusterType clusterType = ClusterType.Sharded) { var clusterId = new ClusterId(); var endPoint = new DnsEndPoint("localhost", 27017); @@ -270,36 +252,13 @@ private CoreServerSessionPool CreateSubject() version: new SemanticVersion(3, 6, 0), wireVersionRange: new Range<int>(6, 14)); - var clusterDescription = new ClusterDescription(clusterId, false, null, ClusterType.Sharded, [serverDescription]); + var clusterDescription = new ClusterDescription(clusterId, false, null, clusterType, [serverDescription]); var mockCluster = new Mock<IClusterInternal>(); mockCluster.SetupGet(m => m.Description).Returns(clusterDescription); return new CoreServerSessionPool(mockCluster.Object); } - - private class TestCluster : IClusterInternal - { - public TestCluster(ClusterType clusterType) - { - Description = new ClusterDescription(new ClusterId(), false, null, clusterType, Enumerable.Empty<ServerDescription>()); - } - - public ClusterId ClusterId => throw new NotImplementedException(); - - public ClusterDescription Description { get; } - - public ClusterSettings Settings => throw new NotImplementedException(); - - public event EventHandler<ClusterDescriptionChangedEventArgs> DescriptionChanged; - - public ICoreServerSession AcquireServerSession() => throw new NotImplementedException(); - public void Dispose() => throw new NotImplementedException(); - public void Initialize() => DescriptionChanged?.Invoke(this, new ClusterDescriptionChangedEventArgs(Description, Description)); - public IServer SelectServer(IServerSelector selector, CancellationToken cancellationToken) => throw new NotImplementedException(); - public Task<IServer> SelectServerAsync(IServerSelector selector, CancellationToken cancellationToken) => throw new NotImplementedException(); - public ICoreSessionHandle StartSession(CoreSessionOptions options = null) => throw new NotImplementedException(); - } } internal static class CoreServerSessionPoolReflector diff --git a/tests/MongoDB.Driver.Tests/Core/Bindings/CoreSessionTests.cs b/tests/MongoDB.Driver.Tests/Core/Bindings/CoreSessionTests.cs index 6b3496813e1..4f07e47b740 100644 --- a/tests/MongoDB.Driver.Tests/Core/Bindings/CoreSessionTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Bindings/CoreSessionTests.cs @@ -224,6 +224,7 @@ public void Dispose_should_have_expected_result( } [Fact] + [Trait("Category", "Integration")] public void StartTransaction_should_throw_when_write_concern_is_unacknowledged() { RequireServer.Check().ClusterType(ClusterType.ReplicaSet).Supports(Feature.Transactions); diff --git a/tests/MongoDB.Driver.Tests/Core/Bindings/ReadBindingHandleTests.cs b/tests/MongoDB.Driver.Tests/Core/Bindings/ReadBindingHandleTests.cs index d26b05ff749..070b01d7e48 100644 --- a/tests/MongoDB.Driver.Tests/Core/Bindings/ReadBindingHandleTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Bindings/ReadBindingHandleTests.cs @@ -14,7 +14,7 @@ */ using System; -using System.Threading; +using System.Threading.Tasks; using FluentAssertions; using MongoDB.TestHelpers.XunitExtensions; using Moq; @@ -51,29 +51,23 @@ public void Session_should_delegate_to_reference() [Theory] [ParameterAttributeData] - public void GetReadChannelSource_should_throw_if_disposed( + public async Task GetReadChannelSource_should_throw_if_disposed( [Values(false, true)] bool async) { var subject = new ReadBindingHandle(_mockReadBinding.Object); subject.Dispose(); - Action act; - if (async) - { - act = () => subject.GetReadChannelSourceAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.GetReadChannelSource(CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.GetReadChannelSourceAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.GetReadChannelSource(OperationContext.NoTimeout)); - act.ShouldThrow<ObjectDisposedException>(); + exception.Should().BeOfType<ObjectDisposedException>(); } [Theory] [ParameterAttributeData] - public void GetReadChannelSource_should_delegate_to_reference( + public async Task GetReadChannelSource_should_delegate_to_reference( [Values(false, true)] bool async) { @@ -81,15 +75,15 @@ public void GetReadChannelSource_should_delegate_to_reference( if (async) { - subject.GetReadChannelSourceAsync(CancellationToken.None).GetAwaiter().GetResult(); + await subject.GetReadChannelSourceAsync(OperationContext.NoTimeout); - _mockReadBinding.Verify(b => b.GetReadChannelSourceAsync(CancellationToken.None), Times.Once); + _mockReadBinding.Verify(b => b.GetReadChannelSourceAsync(OperationContext.NoTimeout), Times.Once); } else { - subject.GetReadChannelSource(CancellationToken.None); + subject.GetReadChannelSource(OperationContext.NoTimeout); - _mockReadBinding.Verify(b => b.GetReadChannelSource(CancellationToken.None), Times.Once); + _mockReadBinding.Verify(b => b.GetReadChannelSource(OperationContext.NoTimeout), Times.Once); } } diff --git a/tests/MongoDB.Driver.Tests/Core/Bindings/ReadPreferenceBindingTests.cs b/tests/MongoDB.Driver.Tests/Core/Bindings/ReadPreferenceBindingTests.cs index 744241191af..c3486f158e5 100644 --- a/tests/MongoDB.Driver.Tests/Core/Bindings/ReadPreferenceBindingTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Bindings/ReadPreferenceBindingTests.cs @@ -16,7 +16,6 @@ using System; using System.Net; using System.Reflection; -using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.Driver.Core.Clusters; @@ -75,29 +74,23 @@ public void Session_should_return_expected_result() [Theory] [ParameterAttributeData] - public void GetReadChannelSource_should_throw_if_disposed( + public async Task GetReadChannelSource_should_throw_if_disposed( [Values(false, true)] bool async) { var subject = new ReadPreferenceBinding(_mockCluster.Object, ReadPreference.Primary, NoCoreSession.NewHandle()); subject.Dispose(); - Action act; - if (async) - { - act = () => subject.GetReadChannelSourceAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.GetReadChannelSource(CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.GetReadChannelSourceAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.GetReadChannelSource(OperationContext.NoTimeout)); - act.ShouldThrow<ObjectDisposedException>(); + exception.Should().BeOfType<ObjectDisposedException>(); } [Theory] [ParameterAttributeData] - public void GetReadChannelSource_should_use_a_read_preference_server_selector_to_select_the_server_from_the_cluster( + public async Task GetReadChannelSource_should_use_a_read_preference_server_selector_to_select_the_server_from_the_cluster( [Values(false, true)] bool async) { @@ -118,35 +111,32 @@ public void GetReadChannelSource_should_use_a_read_preference_server_selector_to if (async) { - _mockCluster.Setup(c => c.SelectServerAsync(It.IsAny<ReadPreferenceServerSelector>(), CancellationToken.None)).Returns(Task.FromResult(selectedServer)); + _mockCluster.Setup(c => c.SelectServerAsync(It.IsAny<OperationContext>(), It.IsAny<ReadPreferenceServerSelector>())).Returns(Task.FromResult(selectedServer)); - subject.GetReadChannelSourceAsync(CancellationToken.None).GetAwaiter().GetResult(); + await subject.GetReadChannelSourceAsync(OperationContext.NoTimeout); - _mockCluster.Verify(c => c.SelectServerAsync(It.IsAny<ReadPreferenceServerSelector>(), CancellationToken.None), Times.Once); + _mockCluster.Verify(c => c.SelectServerAsync(It.IsAny<OperationContext>(), It.IsAny<ReadPreferenceServerSelector>()), Times.Once); } else { - _mockCluster.Setup(c => c.SelectServer(It.IsAny<ReadPreferenceServerSelector>(), CancellationToken.None)).Returns(selectedServer); + _mockCluster.Setup(c => c.SelectServer(It.IsAny<OperationContext>(), It.IsAny<ReadPreferenceServerSelector>())).Returns(selectedServer); - subject.GetReadChannelSource(CancellationToken.None); + subject.GetReadChannelSource(OperationContext.NoTimeout); - _mockCluster.Verify(c => c.SelectServer(It.IsAny<ReadPreferenceServerSelector>(), CancellationToken.None), Times.Once); + _mockCluster.Verify(c => c.SelectServer(It.IsAny<OperationContext>(), It.IsAny<ReadPreferenceServerSelector>()), Times.Once); } } [Theory] [ParameterAttributeData] - public void GetReadChannelSource_should_fork_the_session( + public async Task GetReadChannelSource_should_fork_the_session( [Values(false, true)] bool async) { var mockSession = new Mock<ICoreSessionHandle>(); var subject = new ReadPreferenceBinding(_mockCluster.Object, ReadPreference.Primary, mockSession.Object); - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; - var selectedServer = new Mock<IServer>().Object; - _mockCluster.Setup(m => m.SelectServer(It.IsAny<IServerSelector>(), cancellationToken)).Returns(selectedServer); - _mockCluster.Setup(m => m.SelectServerAsync(It.IsAny<IServerSelector>(), cancellationToken)).Returns(Task.FromResult(selectedServer)); + _mockCluster.Setup(m => m.SelectServer(It.IsAny<OperationContext>(), It.IsAny<IServerSelector>())).Returns(selectedServer); + _mockCluster.Setup(m => m.SelectServerAsync(It.IsAny<OperationContext>(), It.IsAny<IServerSelector>())).Returns(Task.FromResult(selectedServer)); var forkedSession = new Mock<ICoreSessionHandle>().Object; mockSession.Setup(m => m.Fork()).Returns(forkedSession); @@ -162,15 +152,9 @@ public void GetReadChannelSource_should_fork_the_session( var finalClusterDescription = initialClusterDescription.WithType(ClusterType.Standalone); _mockCluster.SetupSequence(c => c.Description).Returns(initialClusterDescription).Returns(finalClusterDescription); - IChannelSourceHandle result; - if (async) - { - result = subject.GetReadChannelSourceAsync(cancellationToken).GetAwaiter().GetResult(); - } - else - { - result = subject.GetReadChannelSource(cancellationToken); - } + var result = async ? + await subject.GetReadChannelSourceAsync(OperationContext.NoTimeout) : + subject.GetReadChannelSource(OperationContext.NoTimeout); var handle = result.Should().BeOfType<ChannelSourceHandle>().Subject; var referenceCounted = handle._reference().Should().BeOfType<ReferenceCounted<IChannelSource>>().Subject; diff --git a/tests/MongoDB.Driver.Tests/Core/Bindings/ReadWriteBindingHandleTests.cs b/tests/MongoDB.Driver.Tests/Core/Bindings/ReadWriteBindingHandleTests.cs index 33f36397c1b..7549be8c4c8 100644 --- a/tests/MongoDB.Driver.Tests/Core/Bindings/ReadWriteBindingHandleTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Bindings/ReadWriteBindingHandleTests.cs @@ -14,10 +14,9 @@ */ using System; -using System.Threading; +using System.Threading.Tasks; using FluentAssertions; using MongoDB.TestHelpers.XunitExtensions; -using MongoDB.Driver.Core.Bindings; using Moq; using Xunit; @@ -52,29 +51,23 @@ public void Session_should_delegate_to_reference() [Theory] [ParameterAttributeData] - public void GetReadChannelSource_should_throw_if_disposed( + public async Task GetReadChannelSource_should_throw_if_disposed( [Values(false, true)] bool async) { var subject = new ReadWriteBindingHandle(_mockReadWriteBinding.Object); subject.Dispose(); - Action act; - if (async) - { - act = () => subject.GetReadChannelSourceAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.GetReadChannelSource(CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.GetReadChannelSourceAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.GetReadChannelSource(OperationContext.NoTimeout)); - act.ShouldThrow<ObjectDisposedException>(); + exception.Should().BeOfType<ObjectDisposedException>(); } [Theory] [ParameterAttributeData] - public void GetReadChannelSource_should_delegate_to_reference( + public async Task GetReadChannelSource_should_delegate_to_reference( [Values(false, true)] bool async) { @@ -82,43 +75,37 @@ public void GetReadChannelSource_should_delegate_to_reference( if (async) { - subject.GetReadChannelSourceAsync(CancellationToken.None).GetAwaiter().GetResult(); + await subject.GetReadChannelSourceAsync(OperationContext.NoTimeout); - _mockReadWriteBinding.Verify(b => b.GetReadChannelSourceAsync(CancellationToken.None), Times.Once); + _mockReadWriteBinding.Verify(b => b.GetReadChannelSourceAsync(It.IsAny<OperationContext>()), Times.Once); } else { - subject.GetReadChannelSource(CancellationToken.None); + subject.GetReadChannelSource(OperationContext.NoTimeout); - _mockReadWriteBinding.Verify(b => b.GetReadChannelSource(CancellationToken.None), Times.Once); + _mockReadWriteBinding.Verify(b => b.GetReadChannelSource(It.IsAny<OperationContext>()), Times.Once); } } [Theory] [ParameterAttributeData] - public void GetWriteChannelSource_should_throw_if_disposed( + public async Task GetWriteChannelSource_should_throw_if_disposed( [Values(false, true)] bool async) { var subject = new ReadWriteBindingHandle(_mockReadWriteBinding.Object); subject.Dispose(); - Action act; - if (async) - { - act = () => subject.GetWriteChannelSourceAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.GetWriteChannelSource(CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.GetWriteChannelSourceAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.GetWriteChannelSource(OperationContext.NoTimeout)); - act.ShouldThrow<ObjectDisposedException>(); + exception.Should().BeOfType<ObjectDisposedException>(); } [Theory] [ParameterAttributeData] - public void GetWriteChannelSource_should_delegate_to_reference( + public async Task GetWriteChannelSource_should_delegate_to_reference( [Values(false, true)] bool async) { @@ -126,15 +113,15 @@ public void GetWriteChannelSource_should_delegate_to_reference( if (async) { - subject.GetWriteChannelSourceAsync(CancellationToken.None).GetAwaiter().GetResult(); + await subject.GetWriteChannelSourceAsync(OperationContext.NoTimeout); - _mockReadWriteBinding.Verify(b => b.GetWriteChannelSourceAsync(CancellationToken.None), Times.Once); + _mockReadWriteBinding.Verify(b => b.GetWriteChannelSourceAsync(It.IsAny<OperationContext>()), Times.Once); } else { - subject.GetWriteChannelSource(CancellationToken.None); + subject.GetWriteChannelSource(OperationContext.NoTimeout); - _mockReadWriteBinding.Verify(b => b.GetWriteChannelSource(CancellationToken.None), Times.Once); + _mockReadWriteBinding.Verify(b => b.GetWriteChannelSource(It.IsAny<OperationContext>()), Times.Once); } } diff --git a/tests/MongoDB.Driver.Tests/Core/Bindings/ServerChannelSourceTests.cs b/tests/MongoDB.Driver.Tests/Core/Bindings/ServerChannelSourceTests.cs index 9b20a631e7b..bd6737efdb1 100644 --- a/tests/MongoDB.Driver.Tests/Core/Bindings/ServerChannelSourceTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Bindings/ServerChannelSourceTests.cs @@ -14,13 +14,8 @@ */ using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading; using System.Threading.Tasks; using FluentAssertions; -using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Clusters; using MongoDB.Driver.Core.Servers; using MongoDB.Driver.Core.Helpers; @@ -32,40 +27,36 @@ namespace MongoDB.Driver.Core.Bindings { public class ServerChannelSourceTests { - private Mock<IServer> _mockServer; - - public ServerChannelSourceTests() - { - _mockServer = new Mock<IServer>(); - } - [Fact] public void Constructor_should_throw_when_server_is_null() { var session = new Mock<ICoreSessionHandle>().Object; - Action act = () => new ServerChannelSource(null, session); + var exception = Record.Exception(() => new ServerChannelSource(null, session)); - act.ShouldThrow<ArgumentNullException>(); + exception.Should().BeOfType<ArgumentNullException>() + .Subject.ParamName.Should().Be("server"); } [Fact] public void Constructor_should_throw_when_session_is_null() { - Action act = () => new ServerChannelSource(_mockServer.Object, null); + var server = Mock.Of<IServer>(); - act.ShouldThrow<ArgumentNullException>(); + var exception = Record.Exception(() => new ServerChannelSource(server, null)); + + exception.Should().BeOfType<ArgumentNullException>() + .Subject.ParamName.Should().Be("session"); } [Fact] public void ServerDescription_should_return_description_of_server() { var session = new Mock<ICoreSessionHandle>().Object; - var subject = new ServerChannelSource(_mockServer.Object, session); - var desc = ServerDescriptionHelper.Disconnected(new ClusterId()); + var serverMock = new Mock<IServer>(); + serverMock.SetupGet(s => s.Description).Returns(desc); - _mockServer.SetupGet(s => s.Description).Returns(desc); - + var subject = new ServerChannelSource(serverMock.Object, session); var result = subject.ServerDescription; result.Should().BeSameAs(desc); @@ -75,7 +66,7 @@ public void ServerDescription_should_return_description_of_server() public void Session_should_return_expected_result() { var session = new Mock<ICoreSessionHandle>().Object; - var subject = new ServerChannelSource(_mockServer.Object, session); + var subject = new ServerChannelSource(Mock.Of<IServer>(), session); var result = subject.Session; @@ -84,47 +75,42 @@ public void Session_should_return_expected_result() [Theory] [ParameterAttributeData] - public void GetChannel_should_throw_if_disposed( + public async Task GetChannel_should_throw_if_disposed( [Values(false, true)] bool async) { var session = new Mock<ICoreSessionHandle>().Object; - var subject = new ServerChannelSource(_mockServer.Object, session); + var subject = new ServerChannelSource(Mock.Of<IServer>(), session); subject.Dispose(); - Action act; - if (async) - { - act = () => subject.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.GetChannel(CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.GetChannelAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.GetChannel(OperationContext.NoTimeout)); - act.ShouldThrow<ObjectDisposedException>(); + exception.Should().BeOfType<ObjectDisposedException>(); } [Theory] [ParameterAttributeData] - public void GetChannel_should_get_connection_from_server( + public async Task GetChannel_should_get_connection_from_server( [Values(false, true)] bool async) { + var serverMock = new Mock<IServer>(); var session = new Mock<ICoreSessionHandle>().Object; - var subject = new ServerChannelSource(_mockServer.Object, session); + var subject = new ServerChannelSource(serverMock.Object, session); if (async) { - subject.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult(); + await subject.GetChannelAsync(OperationContext.NoTimeout); - _mockServer.Verify(s => s.GetChannelAsync(CancellationToken.None), Times.Once); + serverMock.Verify(s => s.GetChannelAsync(It.IsAny<OperationContext>()), Times.Once); } else { - subject.GetChannel(CancellationToken.None); + subject.GetChannel(OperationContext.NoTimeout); - _mockServer.Verify(s => s.GetChannel(CancellationToken.None), Times.Once); + serverMock.Verify(s => s.GetChannel(It.IsAny<OperationContext>()), Times.Once); } } @@ -132,7 +118,7 @@ public void GetChannel_should_get_connection_from_server( public void Dispose_should_dispose_session() { var mockSession = new Mock<ICoreSessionHandle>(); - var subject = new ServerChannelSource(_mockServer.Object, mockSession.Object); + var subject = new ServerChannelSource(Mock.Of<IServer>(), mockSession.Object); subject.Dispose(); diff --git a/tests/MongoDB.Driver.Tests/Core/Bindings/SingleServerReadBindingTests.cs b/tests/MongoDB.Driver.Tests/Core/Bindings/SingleServerReadBindingTests.cs index 0c8e53a5321..ab0fae6858c 100644 --- a/tests/MongoDB.Driver.Tests/Core/Bindings/SingleServerReadBindingTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Bindings/SingleServerReadBindingTests.cs @@ -17,6 +17,7 @@ using System.Net; using System.Reflection; using System.Threading; +using System.Threading.Tasks; using FluentAssertions; using MongoDB.TestHelpers.XunitExtensions; using MongoDB.Driver.Core.Servers; @@ -126,26 +127,17 @@ public void Dispose_can_be_called_more_than_once() [Theory] [ParameterAttributeData] - public void GetReadChannelSource_should_return_expected_result( + public async Task GetReadChannelSource_should_return_expected_result( [Values(false, true)] bool async) { var mockSession = new Mock<ICoreSessionHandle>(); var subject = CreateSubject(session: mockSession.Object); - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; - var forkedSession = new Mock<ICoreSessionHandle>().Object; mockSession.Setup(m => m.Fork()).Returns(forkedSession); - IChannelSourceHandle result; - if (async) - { - result = subject.GetReadChannelSourceAsync(cancellationToken).GetAwaiter().GetResult(); - } - else - { - result = subject.GetReadChannelSource(cancellationToken); - } + var result = async ? + await subject.GetReadChannelSourceAsync(OperationContext.NoTimeout) : + subject.GetReadChannelSource(OperationContext.NoTimeout); var newHandle = result.Should().BeOfType<ChannelSourceHandle>().Subject; var referenceCounted = newHandle._reference(); @@ -155,24 +147,14 @@ public void GetReadChannelSource_should_return_expected_result( [Theory] [ParameterAttributeData] - public void GetReadChannelSource_should_throw_when_disposed( + public async Task GetReadChannelSource_should_throw_when_disposed( [Values(false, true)] bool async) { var subject = CreateDisposedSubject(); - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; - - var exception = Record.Exception(() => - { - if (async) - { - subject.GetReadChannelSourceAsync(cancellationToken).GetAwaiter().GetResult(); - } - else - { - subject.GetReadChannelSource(cancellationToken); - } - }); + + var exception = async ? + await Record.ExceptionAsync(() => subject.GetReadChannelSourceAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.GetReadChannelSource(OperationContext.NoTimeout)); var e = exception.Should().BeOfType<ObjectDisposedException>().Subject; e.ObjectName.Should().Be(subject.GetType().FullName); diff --git a/tests/MongoDB.Driver.Tests/Core/Bindings/SingleServerReadWriteBindingTests.cs b/tests/MongoDB.Driver.Tests/Core/Bindings/SingleServerReadWriteBindingTests.cs index 1087d544760..2efd25ca017 100644 --- a/tests/MongoDB.Driver.Tests/Core/Bindings/SingleServerReadWriteBindingTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Bindings/SingleServerReadWriteBindingTests.cs @@ -14,11 +14,7 @@ */ using System; -using System.Collections.Generic; -using System.Linq; using System.Reflection; -using System.Text; -using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.TestHelpers.XunitExtensions; @@ -112,26 +108,17 @@ public void Dispose_can_be_called_more_than_once() [Theory] [ParameterAttributeData] - public void GetReadChannelSource_should_return_expected_result( + public async Task GetReadChannelSource_should_return_expected_result( [Values(false, true)] bool async) { var mockSession = new Mock<ICoreSessionHandle>(); var subject = CreateSubject(session: mockSession.Object); - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; - var forkedSession = new Mock<ICoreSessionHandle>().Object; mockSession.Setup(m => m.Fork()).Returns(forkedSession); - IChannelSourceHandle result; - if (async) - { - result = subject.GetReadChannelSourceAsync(cancellationToken).GetAwaiter().GetResult(); - } - else - { - result = subject.GetReadChannelSource(cancellationToken); - } + var result = async ? + await subject.GetReadChannelSourceAsync(OperationContext.NoTimeout) : + subject.GetReadChannelSource(OperationContext.NoTimeout); var newHandle = result.Should().BeOfType<ChannelSourceHandle>().Subject; var referenceCounted = newHandle._reference(); @@ -141,24 +128,13 @@ public void GetReadChannelSource_should_return_expected_result( [Theory] [ParameterAttributeData] - public void GetReadChannelSource_should_throw_when_disposed( + public async Task GetReadChannelSource_should_throw_when_disposed( [Values(false, true)] bool async) { var subject = CreateDisposedSubject(); - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; - - var exception = Record.Exception(() => - { - if (async) - { - subject.GetReadChannelSourceAsync(cancellationToken).GetAwaiter().GetResult(); - } - else - { - subject.GetReadChannelSource(cancellationToken); - } - }); + var exception = async ? + await Record.ExceptionAsync(() => subject.GetReadChannelSourceAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.GetReadChannelSource(OperationContext.NoTimeout)); var e = exception.Should().BeOfType<ObjectDisposedException>().Subject; e.ObjectName.Should().Be(subject.GetType().FullName); @@ -166,26 +142,17 @@ public void GetReadChannelSource_should_throw_when_disposed( [Theory] [ParameterAttributeData] - public void GetWriteChannelSource_should_return_expected_result( + public async Task GetWriteChannelSource_should_return_expected_result( [Values(false, true)] bool async) { var mockSession = new Mock<ICoreSessionHandle>(); var subject = CreateSubject(session: mockSession.Object); - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; - var forkedSession = new Mock<ICoreSessionHandle>().Object; mockSession.Setup(m => m.Fork()).Returns(forkedSession); - IChannelSourceHandle result; - if (async) - { - result = subject.GetWriteChannelSourceAsync(cancellationToken).GetAwaiter().GetResult(); - } - else - { - result = subject.GetWriteChannelSource(cancellationToken); - } + var result = async ? + await subject.GetWriteChannelSourceAsync(OperationContext.NoTimeout) : + subject.GetWriteChannelSource(OperationContext.NoTimeout); var newHandle = result.Should().BeOfType<ChannelSourceHandle>().Subject; var referenceCounted = newHandle._reference(); @@ -195,24 +162,13 @@ public void GetWriteChannelSource_should_return_expected_result( [Theory] [ParameterAttributeData] - public void GetWriteChannelSource_should_throw_when_disposed( + public async Task GetWriteChannelSource_should_throw_when_disposed( [Values(false, true)] bool async) { var subject = CreateDisposedSubject(); - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; - - var exception = Record.Exception(() => - { - if (async) - { - subject.GetWriteChannelSourceAsync(cancellationToken).GetAwaiter().GetResult(); - } - else - { - subject.GetWriteChannelSource(cancellationToken); - } - }); + var exception = async ? + await Record.ExceptionAsync(() => subject.GetReadChannelSourceAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.GetReadChannelSource(OperationContext.NoTimeout)); var e = exception.Should().BeOfType<ObjectDisposedException>().Subject; e.ObjectName.Should().Be(subject.GetType().FullName); diff --git a/tests/MongoDB.Driver.Tests/Core/Bindings/WritableServerBindingTests.cs b/tests/MongoDB.Driver.Tests/Core/Bindings/WritableServerBindingTests.cs index a5796c5e5a0..f16dc755894 100644 --- a/tests/MongoDB.Driver.Tests/Core/Bindings/WritableServerBindingTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Bindings/WritableServerBindingTests.cs @@ -16,7 +16,6 @@ using System; using System.Net; using System.Reflection; -using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.Driver.Core.Clusters; @@ -77,29 +76,23 @@ public void Session_should_return_expected_result() [Theory] [ParameterAttributeData] - public void GetReadChannelSource_should_throw_if_disposed( + public async Task GetReadChannelSource_should_throw_if_disposed( [Values(false, true)] bool async) { var subject = new WritableServerBinding(_mockCluster.Object, NoCoreSession.NewHandle()); subject.Dispose(); - Action act; - if (async) - { - act = () => subject.GetReadChannelSourceAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.GetReadChannelSource(CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.GetReadChannelSourceAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.GetReadChannelSource(OperationContext.NoTimeout)); - act.ShouldThrow<ObjectDisposedException>(); + exception.Should().BeOfType<ObjectDisposedException>(); } [Theory] [ParameterAttributeData] - public void GetReadChannelSource_should_use_a_writable_server_selector_to_select_the_server_from_the_cluster( + public async Task GetReadChannelSource_should_use_a_writable_server_selector_to_select_the_server_from_the_cluster( [Values(false, true)] bool async) { @@ -122,47 +115,41 @@ public void GetReadChannelSource_should_use_a_writable_server_selector_to_select if (async) { - _mockCluster.Setup(c => c.SelectServerAsync(It.IsAny<WritableServerSelector>(), CancellationToken.None)).Returns(Task.FromResult(selectedServer)); + _mockCluster.Setup(c => c.SelectServerAsync(OperationContext.NoTimeout, It.IsAny<WritableServerSelector>())).Returns(Task.FromResult(selectedServer)); - subject.GetReadChannelSourceAsync(CancellationToken.None).GetAwaiter().GetResult(); + await subject.GetReadChannelSourceAsync(OperationContext.NoTimeout); - _mockCluster.Verify(c => c.SelectServerAsync(It.IsAny<WritableServerSelector>(), CancellationToken.None), Times.Once); + _mockCluster.Verify(c => c.SelectServerAsync(OperationContext.NoTimeout, It.IsAny<WritableServerSelector>()), Times.Once); } else { - _mockCluster.Setup(c => c.SelectServer(It.IsAny<WritableServerSelector>(), CancellationToken.None)).Returns(selectedServer); + _mockCluster.Setup(c => c.SelectServer(OperationContext.NoTimeout, It.IsAny<WritableServerSelector>())).Returns(selectedServer); - subject.GetReadChannelSource(CancellationToken.None); + subject.GetReadChannelSource(OperationContext.NoTimeout); - _mockCluster.Verify(c => c.SelectServer(It.IsAny<WritableServerSelector>(), CancellationToken.None), Times.Once); + _mockCluster.Verify(c => c.SelectServer(OperationContext.NoTimeout, It.IsAny<WritableServerSelector>()), Times.Once); } } [Theory] [ParameterAttributeData] - public void GetWriteChannelSource_should_throw_if_disposed( + public async Task GetWriteChannelSource_should_throw_if_disposed( [Values(false, true)] bool async) { var subject = new WritableServerBinding(_mockCluster.Object, NoCoreSession.NewHandle()); subject.Dispose(); - Action act; - if (async) - { - act = () => subject.GetWriteChannelSourceAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.GetWriteChannelSource(CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.GetWriteChannelSourceAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.GetWriteChannelSource(OperationContext.NoTimeout)); - act.ShouldThrow<ObjectDisposedException>(); + exception.Should().BeOfType<ObjectDisposedException>(); } [Theory] [ParameterAttributeData] - public void GetWriteChannelSourceAsync_should_use_a_writable_server_selector_to_select_the_server_from_the_cluster( + public async Task GetWriteChannelSourceAsync_should_use_a_writable_server_selector_to_select_the_server_from_the_cluster( [Values(false, true)] bool async) { @@ -184,19 +171,19 @@ public void GetWriteChannelSourceAsync_should_use_a_writable_server_selector_to_ if (async) { - _mockCluster.Setup(c => c.SelectServerAsync(It.IsAny<WritableServerSelector>(), CancellationToken.None)).Returns(Task.FromResult(selectedServer)); + _mockCluster.Setup(c => c.SelectServerAsync(OperationContext.NoTimeout, It.IsAny<WritableServerSelector>())).Returns(Task.FromResult(selectedServer)); - subject.GetWriteChannelSourceAsync(CancellationToken.None).GetAwaiter().GetResult(); + await subject.GetWriteChannelSourceAsync(OperationContext.NoTimeout); - _mockCluster.Verify(c => c.SelectServerAsync(It.IsAny<WritableServerSelector>(), CancellationToken.None), Times.Once); + _mockCluster.Verify(c => c.SelectServerAsync(OperationContext.NoTimeout, It.IsAny<WritableServerSelector>()), Times.Once); } else { - _mockCluster.Setup(c => c.SelectServer(It.IsAny<WritableServerSelector>(), CancellationToken.None)).Returns(selectedServer); + _mockCluster.Setup(c => c.SelectServer(OperationContext.NoTimeout, It.IsAny<WritableServerSelector>())).Returns(selectedServer); - subject.GetWriteChannelSource(CancellationToken.None); + subject.GetWriteChannelSource(OperationContext.NoTimeout); - _mockCluster.Verify(c => c.SelectServer(It.IsAny<WritableServerSelector>(), CancellationToken.None), Times.Once); + _mockCluster.Verify(c => c.SelectServer(OperationContext.NoTimeout, It.IsAny<WritableServerSelector>()), Times.Once); } } @@ -225,25 +212,25 @@ public async Task GetWriteChannelSource_should_use_a_composite_server_selector_t if (async) { - _mockCluster.Setup(c => c.SelectServerAsync(It.Is<CompositeServerSelector>(cp => cp.ToString().Contains("PriorityServerSelector")), CancellationToken.None)).Returns(Task.FromResult(selectedServer)); + _mockCluster.Setup(c => c.SelectServerAsync(OperationContext.NoTimeout, It.Is<CompositeServerSelector>(cp => cp.ToString().Contains("PriorityServerSelector")))).Returns(Task.FromResult(selectedServer)); - await subject.GetWriteChannelSourceAsync(deprioritizedServers, CancellationToken.None); + await subject.GetWriteChannelSourceAsync(OperationContext.NoTimeout, deprioritizedServers); - _mockCluster.Verify(c => c.SelectServerAsync(It.Is<CompositeServerSelector>(cp => cp.ToString().Contains("PriorityServerSelector")), CancellationToken.None), Times.Once); + _mockCluster.Verify(c => c.SelectServerAsync(OperationContext.NoTimeout, It.Is<CompositeServerSelector>(cp => cp.ToString().Contains("PriorityServerSelector"))), Times.Once); } else { - _mockCluster.Setup(c => c.SelectServer(It.Is<CompositeServerSelector>(cp => cp.ToString().Contains("PriorityServerSelector")), CancellationToken.None)).Returns(selectedServer); + _mockCluster.Setup(c => c.SelectServer(OperationContext.NoTimeout, It.Is<CompositeServerSelector>(cp => cp.ToString().Contains("PriorityServerSelector")))).Returns(selectedServer); - subject.GetWriteChannelSource(deprioritizedServers, CancellationToken.None); + subject.GetWriteChannelSource(OperationContext.NoTimeout, deprioritizedServers); - _mockCluster.Verify(c => c.SelectServer(It.Is<CompositeServerSelector>(c => c.ToString().Contains("PriorityServerSelector")), CancellationToken.None), Times.Once); + _mockCluster.Verify(c => c.SelectServer(OperationContext.NoTimeout, It.Is<CompositeServerSelector>(c => c.ToString().Contains("PriorityServerSelector"))), Times.Once); } } [Theory] [ParameterAttributeData] - public void GetWriteChannelSource_with_mayUseSecondary_should_pass_mayUseSecondary_to_server_selector( + public async Task GetWriteChannelSource_with_mayUseSecondary_should_pass_mayUseSecondary_to_server_selector( [Values(false, true)] bool async) { @@ -269,19 +256,19 @@ public void GetWriteChannelSource_with_mayUseSecondary_should_pass_mayUseSeconda if (async) { - _mockCluster.Setup(c => c.SelectServerAsync(It.IsAny<WritableServerSelector>(), CancellationToken.None)).Returns(Task.FromResult(selectedServer)); + _mockCluster.Setup(c => c.SelectServerAsync(OperationContext.NoTimeout, It.IsAny<WritableServerSelector>())).Returns(Task.FromResult(selectedServer)); - subject.GetWriteChannelSourceAsync(mayUseSecondary, CancellationToken.None).GetAwaiter().GetResult(); + await subject.GetWriteChannelSourceAsync(OperationContext.NoTimeout, mayUseSecondary); - _mockCluster.Verify(c => c.SelectServerAsync(It.Is<WritableServerSelector>(s => s.MayUseSecondary == mayUseSecondary), CancellationToken.None), Times.Once); + _mockCluster.Verify(c => c.SelectServerAsync(OperationContext.NoTimeout, It.Is<WritableServerSelector>(s => s.MayUseSecondary == mayUseSecondary)), Times.Once); } else { - _mockCluster.Setup(c => c.SelectServer(It.IsAny<WritableServerSelector>(), CancellationToken.None)).Returns(selectedServer); + _mockCluster.Setup(c => c.SelectServer(OperationContext.NoTimeout, It.IsAny<WritableServerSelector>())).Returns(selectedServer); - subject.GetWriteChannelSource(mayUseSecondary, CancellationToken.None); + subject.GetWriteChannelSource(OperationContext.NoTimeout, mayUseSecondary); - _mockCluster.Verify(c => c.SelectServer(It.Is<WritableServerSelector>(s => s.MayUseSecondary == mayUseSecondary), CancellationToken.None), Times.Once); + _mockCluster.Verify(c => c.SelectServer(OperationContext.NoTimeout, It.Is<WritableServerSelector>(s => s.MayUseSecondary == mayUseSecondary)), Times.Once); } } diff --git a/tests/MongoDB.Driver.Tests/Core/Clusters/ClusterTests.cs b/tests/MongoDB.Driver.Tests/Core/Clusters/ClusterTests.cs index 0d2b8bd2d20..5879776f3ac 100644 --- a/tests/MongoDB.Driver.Tests/Core/Clusters/ClusterTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Clusters/ClusterTests.cs @@ -64,7 +64,7 @@ public void SupportedWireVersionRange_should_return_expected_result() { var result = Cluster.SupportedWireVersionRange; - result.Should().Be(new Range<int>(7, 27)); + result.Should().Be(new Range<int>(8, 28)); } [Fact] @@ -120,29 +120,23 @@ public void AcquireServerSession_should_call_serverSessionPool_AcquireSession() [Theory] [ParameterAttributeData] - public void SelectServer_should_throw_if_not_initialized( + public async Task SelectServer_should_throw_if_not_initialized( [Values(false, true)] bool async) { var selector = new Mock<IServerSelector>().Object; var subject = CreateSubject(); - Action act; - if (async) - { - act = () => subject.SelectServerAsync(selector, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.SelectServer(selector, CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.SelectServerAsync(OperationContext.NoTimeout, selector)) : + Record.Exception(() => subject.SelectServer(OperationContext.NoTimeout, selector)); - act.ShouldThrow<InvalidOperationException>(); + exception.Should().BeOfType<InvalidOperationException>(); } [Theory] [ParameterAttributeData] - public void SelectServer_should_throw_if_disposed( + public async Task SelectServer_should_throw_if_disposed( [Values(false, true)] bool async) { @@ -150,44 +144,32 @@ public void SelectServer_should_throw_if_disposed( var subject = CreateSubject(); subject.Dispose(); - Action act; - if (async) - { - act = () => subject.SelectServerAsync(selector, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.SelectServer(selector, CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.SelectServerAsync(OperationContext.NoTimeout, selector)) : + Record.Exception(() => subject.SelectServer(OperationContext.NoTimeout, selector)); - act.ShouldThrow<ObjectDisposedException>(); + exception.Should().BeOfType<ObjectDisposedException>(); } [Theory] [ParameterAttributeData] - public void SelectServer_should_throw_if_serverSelector_is_null( + public async Task SelectServer_should_throw_if_serverSelector_is_null( [Values(false, true)] bool async) { var subject = CreateSubject(); subject.Initialize(); - Action act; - if (async) - { - act = () => subject.SelectServerAsync(null, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.SelectServer(null, CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.SelectServerAsync(OperationContext.NoTimeout, null)) : + Record.Exception(() => subject.SelectServer(OperationContext.NoTimeout, null)); - act.ShouldThrow<ArgumentNullException>(); + exception.Should().BeOfType<ArgumentNullException>(); } [Theory] [ParameterAttributeData] - public void SelectServer_should_return_a_server_if_one_matches( + public async Task SelectServer_should_return_a_server_if_one_matches( [Values(false, true)] bool async) { @@ -200,15 +182,9 @@ public void SelectServer_should_return_a_server_if_one_matches( var selector = new DelegateServerSelector((c, s) => s); - IServer result; - if (async) - { - result = subject.SelectServerAsync(selector, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - result = subject.SelectServer(selector, CancellationToken.None); - } + var result = async ? + await subject.SelectServerAsync(OperationContext.NoTimeout, selector) : + subject.SelectServer(OperationContext.NoTimeout, selector); result.Should().NotBeNull(); @@ -219,7 +195,7 @@ public void SelectServer_should_return_a_server_if_one_matches( [Theory] [ParameterAttributeData] - public void SelectServer_should_return_second_server_if_first_cannot_be_found( + public async Task SelectServer_should_return_second_server_if_first_cannot_be_found( [Values(false, true)] bool async) { @@ -236,15 +212,10 @@ public void SelectServer_should_return_second_server_if_first_cannot_be_found( var selector = new DelegateServerSelector((c, s) => s); - IServer result; - if (async) - { - result = subject.SelectServerAsync(selector, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - result = subject.SelectServer(selector, CancellationToken.None); - } + var result = async ? + await subject.SelectServerAsync(OperationContext.NoTimeout, selector) : + subject.SelectServer(OperationContext.NoTimeout, selector); + result.Should().NotBeNull(); result.EndPoint.Should().Be(endPoint2); @@ -256,11 +227,11 @@ public void SelectServer_should_return_second_server_if_first_cannot_be_found( [Theory] [ParameterAttributeData] - public void SelectServer_should_throw_if_no_servers_match( + public async Task SelectServer_should_throw_if_no_servers_match( [Values(false, true)] bool async) { - var subject = CreateSubject(); + var subject = CreateSubject(serverSelectionTimeout: TimeSpan.FromMilliseconds(10)); subject.Initialize(); var connected = ServerDescriptionHelper.Connected(subject.Description.ClusterId); @@ -269,17 +240,11 @@ public void SelectServer_should_throw_if_no_servers_match( var selector = new DelegateServerSelector((c, s) => Enumerable.Empty<ServerDescription>()); - Action act; - if (async) - { - act = () => subject.SelectServerAsync(selector, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.SelectServer(selector, CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.SelectServerAsync(OperationContext.NoTimeout, selector)) : + Record.Exception(() => subject.SelectServer(OperationContext.NoTimeout, selector)); - act.ShouldThrow<TimeoutException>(); + exception.Should().BeOfType<TimeoutException>(); _capturedEvents.Next().Should().BeOfType<ClusterSelectingServerEvent>(); _capturedEvents.Next().Should().BeOfType<ClusterEnteredSelectionQueueEvent>(); @@ -289,7 +254,7 @@ public void SelectServer_should_throw_if_no_servers_match( [Theory] [ParameterAttributeData] - public void SelectServer_should_throw_if_the_matched_server_cannot_be_found_and_no_others_matched( + public async Task SelectServer_should_throw_if_the_matched_server_cannot_be_found_and_no_others_matched( [Values(false, true)] bool async) { @@ -303,17 +268,11 @@ public void SelectServer_should_throw_if_the_matched_server_cannot_be_found_and_ var selector = new DelegateServerSelector((c, s) => s); - Action act; - if (async) - { - act = () => subject.SelectServerAsync(selector, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.SelectServer(selector, CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.SelectServerAsync(OperationContext.NoTimeout, selector)) : + Record.Exception(() => subject.SelectServer(OperationContext.NoTimeout, selector)); - act.ShouldThrow<TimeoutException>(); + exception.Should().BeOfType<TimeoutException>(); _capturedEvents.Next().Should().BeOfType<ClusterSelectingServerEvent>(); _capturedEvents.Next().Should().BeOfType<ClusterEnteredSelectionQueueEvent>(); @@ -324,9 +283,9 @@ public void SelectServer_should_throw_if_the_matched_server_cannot_be_found_and_ [Theory] [InlineData(0, 0, false)] [InlineData(0, 0, true)] - [InlineData(28, 29, false)] - [InlineData(28, 29, true)] - public void SelectServer_should_throw_if_any_servers_are_incompatible(int min, int max, bool async) + [InlineData(29, 30, false)] + [InlineData(29, 30, true)] + public async Task SelectServer_should_throw_if_any_servers_are_incompatible(int min, int max, bool async) { var subject = CreateSubject(); subject.Initialize(); @@ -337,17 +296,11 @@ public void SelectServer_should_throw_if_any_servers_are_incompatible(int min, i var selector = new DelegateServerSelector((c, s) => s); - Action act; - if (async) - { - act = () => subject.SelectServerAsync(selector, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => subject.SelectServer(selector, CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.SelectServerAsync(OperationContext.NoTimeout, selector)) : + Record.Exception(() => subject.SelectServer(OperationContext.NoTimeout, selector)); - act.ShouldThrow<MongoIncompatibleDriverException>(); + exception.Should().BeOfType<MongoIncompatibleDriverException>(); _capturedEvents.Next().Should().BeOfType<ClusterSelectingServerEvent>(); _capturedEvents.Next().Should().BeOfType<ClusterSelectingServerFailedEvent>(); @@ -356,7 +309,7 @@ public void SelectServer_should_throw_if_any_servers_are_incompatible(int min, i [Theory] [ParameterAttributeData] - public void SelectServer_should_keep_trying_to_match_by_waiting_on_cluster_description_changes( + public async Task SelectServer_should_keep_trying_to_match_by_waiting_on_cluster_description_changes( [Values(false, true)] bool async) { @@ -369,7 +322,7 @@ public void SelectServer_should_keep_trying_to_match_by_waiting_on_cluster_descr subject.SetServerDescriptions(connecting); _capturedEvents.Clear(); - Task.Run(() => + _ = Task.Run(() => { _capturedEvents.WaitForEventOrThrowIfTimeout<ClusterEnteredSelectionQueueEvent>(TimeSpan.FromSeconds(1)); @@ -384,15 +337,9 @@ public void SelectServer_should_keep_trying_to_match_by_waiting_on_cluster_descr var selector = new DelegateServerSelector((c, s) => s); - IServer result; - if (async) - { - result = subject.SelectServerAsync(selector, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - result = subject.SelectServer(selector, CancellationToken.None); - } + var result = async ? + await subject.SelectServerAsync(OperationContext.NoTimeout, selector) : + subject.SelectServer(OperationContext.NoTimeout, selector); result.Should().NotBeNull(); @@ -429,15 +376,9 @@ public async Task SelectServer_should_ignore_deprioritized_servers_if_cluster_is { _capturedEvents.Clear(); - IServer result; - if (async) - { - result = await subject.SelectServerAsync(selector, CancellationToken.None); - } - else - { - result = subject.SelectServer(selector, CancellationToken.None); - } + var result = async ? + await subject.SelectServerAsync(OperationContext.NoTimeout, selector) : + subject.SelectServer(OperationContext.NoTimeout, selector); result.Should().NotBeNull(); @@ -469,15 +410,9 @@ public async Task SelectServer_should_return_deprioritized_servers_if_no_other_s var selector = new PriorityServerSelector(deprioritizedServers); _capturedEvents.Clear(); - IServer result; - if (async) - { - result = await subject.SelectServerAsync(selector, CancellationToken.None); - } - else - { - result = subject.SelectServer(selector, CancellationToken.None); - } + var result = async ? + await subject.SelectServerAsync(OperationContext.NoTimeout, selector) : + subject.SelectServer(OperationContext.NoTimeout, selector); result.Should().NotBeNull(); @@ -525,7 +460,7 @@ public void DescriptionChanged_should_be_raised_when_the_description_changes() [Theory] [ParameterAttributeData] - public void SelectServer_should_apply_both_pre_and_post_server_selectors( + public async Task SelectServer_should_apply_both_pre_and_post_server_selectors( [Values(false, true)] bool async) { @@ -556,15 +491,9 @@ public void SelectServer_should_apply_both_pre_and_post_server_selectors( ServerDescriptionHelper.Connected(subject.Description.ClusterId, new DnsEndPoint("localhost", 27020))); _capturedEvents.Clear(); - IServer result; - if (async) - { - result = subject.SelectServerAsync(middleSelector, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - result = subject.SelectServer(middleSelector, CancellationToken.None); - } + var result = async ? + await subject.SelectServerAsync(OperationContext.NoTimeout, middleSelector) : + subject.SelectServer(OperationContext.NoTimeout, middleSelector); ((DnsEndPoint)result.EndPoint).Port.Should().Be(27020); _capturedEvents.Next().Should().BeOfType<ClusterSelectingServerEvent>(); @@ -574,7 +503,7 @@ public void SelectServer_should_apply_both_pre_and_post_server_selectors( [Theory] [ParameterAttributeData] - public void SelectServer_should_call_custom_selector( + public async Task SelectServer_should_call_custom_selector( [Values(true, false)] bool withEligibleServers, [Values(true, false)] bool async) { @@ -596,10 +525,10 @@ public void SelectServer_should_call_custom_selector( if (withEligibleServers) { - var selectedServer = SelectServerAttempt( - subject, - new DelegateServerSelector((c, s) => s), // do not filter servers - async); + var selector = new DelegateServerSelector((c, s) => s); + var selectedServer = async ? + await subject.SelectServerAsync(OperationContext.NoTimeout, selector): + subject.SelectServer(OperationContext.NoTimeout, selector); var selectedServerPort = ((DnsEndPoint)selectedServer.EndPoint).Port; selectedServerPort.Should().Be(27020); @@ -608,12 +537,10 @@ public void SelectServer_should_call_custom_selector( } else { - var exception = Record.Exception( - () => - SelectServerAttempt( - subject, - new DelegateServerSelector((c, s) => new ServerDescription[0]), // no eligible servers - async)); + var selector = new DelegateServerSelector((c, s) => new ServerDescription[0]); + var exception = async ? + await Record.ExceptionAsync(() => subject.SelectServerAsync(OperationContext.NoTimeout, selector)) : + Record.Exception(() => subject.SelectServer(OperationContext.NoTimeout, selector)); exception.Should().BeOfType<TimeoutException>(); _capturedEvents.Next().Should().BeOfType<ClusterSelectingServerEvent>(); @@ -636,21 +563,6 @@ private StubCluster CreateSubject(TimeSpan? serverSelectionTimeout = null, Clust return new StubCluster(_settings, _mockServerFactory.Object, _capturedEvents, LoggerFactory, clusterType); } - private IServer SelectServerAttempt(Cluster cluster, IServerSelector operationSelector, bool async) - { - if (async) - { - return cluster - .SelectServerAsync(operationSelector, CancellationToken.None) - .GetAwaiter() - .GetResult(); - } - else - { - return cluster.SelectServer(operationSelector, CancellationToken.None); - } - } - // nested types private class StubCluster : Cluster { @@ -674,6 +586,8 @@ public override void Initialize() UpdateClusterDescription(Description.WithType(_clusterType ?? Settings.GetInitialClusterType())); } + public override IEnumerable<IClusterableServer> Servers => _servers.Values; + public void RemoveServer(EndPoint endPoint) { _servers.Remove(endPoint); diff --git a/tests/MongoDB.Driver.Tests/Core/Clusters/DnsMonitorTests.cs b/tests/MongoDB.Driver.Tests/Core/Clusters/DnsMonitorTests.cs index 963131dc2b3..8b270d0b09a 100644 --- a/tests/MongoDB.Driver.Tests/Core/Clusters/DnsMonitorTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Clusters/DnsMonitorTests.cs @@ -517,7 +517,7 @@ internal static class DnsMonitorReflector public static IDnsMonitoringCluster _cluster(this DnsMonitor obj) => (IDnsMonitoringCluster)Reflector.GetFieldValue(obj, nameof(_cluster)); public static IDnsResolver _dnsResolver(this DnsMonitor obj) => (IDnsResolver)Reflector.GetFieldValue(obj, nameof(_dnsResolver)); public static string _lookupDomainName(this DnsMonitor obj) => (string)Reflector.GetFieldValue(obj, nameof(_lookupDomainName)); - public static bool _processDnsResultHasEverBeenCalled(this DnsMonitor obj) => (bool)Reflector.GetFieldValue(obj, nameof(_processDnsResultHasEverBeenCalled)); + public static bool _processDnsResultHasEverBeenCalled(this DnsMonitor obj) => (bool)Reflector.GetFieldValue(obj, nameof(_processDnsResultHasEverBeenCalled)); public static string _service(this DnsMonitor obj) => (string)Reflector.GetFieldValue(obj, nameof(_service)); public static Exception _unhandledException(this DnsMonitor obj) => (Exception)Reflector.GetFieldValue(obj, nameof(_unhandledException)); diff --git a/tests/MongoDB.Driver.Tests/Core/Clusters/LoadBalancedClusterTests.cs b/tests/MongoDB.Driver.Tests/Core/Clusters/LoadBalancedClusterTests.cs index c5f65fe9fa5..2c14c12f751 100644 --- a/tests/MongoDB.Driver.Tests/Core/Clusters/LoadBalancedClusterTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Clusters/LoadBalancedClusterTests.cs @@ -17,6 +17,7 @@ using System.Linq; using System.Net; using System.Threading; +using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson.TestHelpers; using MongoDB.Driver.Core.Clusters; @@ -63,8 +64,8 @@ public void Constructor_should_initialize_instance() } [Theory] - [ParameterAttributeData()] - public void Constructor_should_handle_directConnection_correctly([Values(null, false, true)]bool directConnection) + [ParameterAttributeData] + public void Constructor_should_handle_directConnection_correctly([Values(false, true)]bool directConnection) { _settings = _settings.With(directConnection: directConnection); @@ -81,7 +82,7 @@ public void Constructor_should_handle_directConnection_correctly([Values(null, f } [Theory] - [ParameterAttributeData()] + [ParameterAttributeData] public void Constructor_should_handle_loadBalanced_correctly([Values(false, true)] bool loadBalanced) { _settings = _settings.With(loadBalanced: loadBalanced); @@ -309,7 +310,7 @@ public void ProcessDnsResults_should_throw_when_srv_records_number_is_unexpected [Theory] [ParameterAttributeData] - public void SelectServer_should_return_expected_server( + public async Task SelectServer_should_return_expected_server( [Values(ConnectionStringScheme.MongoDB, ConnectionStringScheme.MongoDBPlusSrv)] ConnectionStringScheme connectionStringScheme, [Values(false, true)] bool async) { @@ -326,15 +327,9 @@ public void SelectServer_should_return_expected_server( PublishDescription(_endPoint); - IServer result; - if (async) - { - result = subject.SelectServerAsync(Mock.Of<IServerSelector>(), CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - result = subject.SelectServer(Mock.Of<IServerSelector>(), CancellationToken.None); - } + var result = async ? + await subject.SelectServerAsync(OperationContext.NoTimeout, Mock.Of<IServerSelector>()) : + subject.SelectServer(OperationContext.NoTimeout, Mock.Of<IServerSelector>()); result.EndPoint.Should().Be(_endPoint); } @@ -342,7 +337,7 @@ public void SelectServer_should_return_expected_server( [Theory] [ParameterAttributeData] - public void SelectServer_should_throw_server_selection_timeout_if_server_has_not_been_created_in_time( + public async Task SelectServer_should_throw_server_selection_timeout_if_server_has_not_been_created_in_time( [Values(ConnectionStringScheme.MongoDB, ConnectionStringScheme.MongoDBPlusSrv)] ConnectionStringScheme connectionStringScheme, [Values(false, true)] bool async) { @@ -356,19 +351,13 @@ public void SelectServer_should_throw_server_selection_timeout_if_server_has_not var dnsException = new Exception("Dns exception"); if (connectionStringScheme == ConnectionStringScheme.MongoDBPlusSrv) { - // it has affect only on srv mode + // it has an effect only on srv mode PublishDnsException(subject, dnsException); } - Exception exception; - if (async) - { - exception = Record.Exception(() => subject.SelectServerAsync(Mock.Of<IServerSelector>(), CancellationToken.None).GetAwaiter().GetResult()); - } - else - { - exception = Record.Exception(() => subject.SelectServer(Mock.Of<IServerSelector>(), CancellationToken.None)); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.SelectServerAsync(OperationContext.NoTimeout, Mock.Of<IServerSelector>())) : + Record.Exception(() => subject.SelectServer(OperationContext.NoTimeout, Mock.Of<IServerSelector>())); var ex = exception.Should().BeOfType<TimeoutException>().Subject; ex.Message.Should().StartWith($"A timeout occurred after {serverSelectionTimeout.TotalMilliseconds}ms selecting a server. Client view of cluster state is "); @@ -385,7 +374,7 @@ public void SelectServer_should_throw_server_selection_timeout_if_server_has_not [Theory] [ParameterAttributeData] - public void SelectServer_should_be_cancelled_by_cancellationToken( + public async Task SelectServer_should_be_cancelled_by_cancellationToken( [Values(ConnectionStringScheme.MongoDB, ConnectionStringScheme.MongoDBPlusSrv)] ConnectionStringScheme connectionStringScheme, [Values(false, true)] bool async) { @@ -398,14 +387,10 @@ public void SelectServer_should_be_cancelled_by_cancellationToken( Exception exception; using (var cancellationTokenSource = new CancellationTokenSource(TimeSpan.FromMilliseconds(100))) { - if (async) - { - exception = Record.Exception(() => subject.SelectServerAsync(Mock.Of<IServerSelector>(), cancellationTokenSource.Token).GetAwaiter().GetResult()); - } - else - { - exception = Record.Exception(() => subject.SelectServer(Mock.Of<IServerSelector>(), cancellationTokenSource.Token)); - } + var operationContext = new OperationContext(Timeout.InfiniteTimeSpan, cancellationTokenSource.Token); + exception = async ? + await Record.ExceptionAsync(() => subject.SelectServerAsync(operationContext, Mock.Of<IServerSelector>())) : + Record.Exception(() => subject.SelectServer(operationContext, Mock.Of<IServerSelector>())); } exception.Should().BeOfType<OperationCanceledException>(); diff --git a/tests/MongoDB.Driver.Tests/Core/Configuration/ClusterBuilderTests.cs b/tests/MongoDB.Driver.Tests/Core/Configuration/ClusterBuilderTests.cs index 202114bcde5..83730a8f9d1 100644 --- a/tests/MongoDB.Driver.Tests/Core/Configuration/ClusterBuilderTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Configuration/ClusterBuilderTests.cs @@ -55,11 +55,13 @@ public void CreateServerMonitorFactory_should_return_expected_result(int connect var serverMonitorStreamFactory = (TcpStreamFactory)serverMonitorConnectionFactory._streamFactory(); var serverMonitorTcpStreamSettings = serverMonitorStreamFactory._settings(); serverMonitorTcpStreamSettings.ConnectTimeout.Should().Be(expectedServerMonitorConnectTimeout); - serverMonitorTcpStreamSettings.ReadTimeout.Should().Be(expectedServerMonitorSocketTimeout); - serverMonitorTcpStreamSettings.WriteTimeout.Should().Be(expectedServerMonitorSocketTimeout); + serverMonitorTcpStreamSettings.ReadTimeout.Should().Be(null); + serverMonitorTcpStreamSettings.WriteTimeout.Should().Be(null); var serverSettings = result._serverMonitorSettings(); serverSettings.ServerMonitoringMode.Should().Be(ServerMonitoringMode.Stream); + serverSettings.ConnectTimeout.Should().Be(expectedServerMonitorConnectTimeout); + serverSettings.HeartbeatTimeout.Should().Be(expectedServerMonitorSocketTimeout); } [Fact] diff --git a/tests/MongoDB.Driver.Tests/Core/Configuration/ConnectionStringTests.cs b/tests/MongoDB.Driver.Tests/Core/Configuration/ConnectionStringTests.cs index ded3872bc19..a0793d0a299 100644 --- a/tests/MongoDB.Driver.Tests/Core/Configuration/ConnectionStringTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Configuration/ConnectionStringTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -374,6 +374,10 @@ public void When_nothing_is_specified(string connectionString) subject.MaxPoolSize.Should().Be(null); subject.MinPoolSize.Should().Be(null); subject.Password.Should().BeNull(); + subject.ProxyHost.Should().BeNull(); + subject.ProxyPort.Should().Be(null); + subject.ProxyPassword.Should().BeNull(); + subject.ProxyUsername.Should().BeNull(); subject.ReadConcernLevel.Should().BeNull(); subject.ReadPreference.Should().BeNull(); subject.ReadPreferenceTags.Should().BeNull(); @@ -385,6 +389,7 @@ public void When_nothing_is_specified(string connectionString) subject.Ssl.Should().Be(null); subject.SslVerifyCertificate.Should().Be(null); #pragma warning restore 618 + subject.Timeout.Should().Be(null); subject.Tls.Should().Be(null); subject.TlsInsecure.Should().Be(null); subject.Username.Should().BeNull(); @@ -420,6 +425,10 @@ public void When_everything_is_specified() "maxLifeTime=5ms;" + "maxPoolSize=20;" + "minPoolSize=15;" + + "proxyHost=host.com;" + + "proxyPort=2020;" + + "proxyUsername=user;" + + "proxyPassword=passw;" + "readConcernLevel=majority;" + "readPreference=primary;" + "readPreferenceTags=dc:1;" + @@ -432,6 +441,7 @@ public void When_everything_is_specified() "socketTimeout=40ms;" + "ssl=false;" + "sslVerifyCertificate=true;" + + "timeout=42ms;" + "waitQueueMultiple=10;" + "waitQueueSize=30;" + "waitQueueTimeout=60ms;" + @@ -462,6 +472,10 @@ public void When_everything_is_specified() subject.MaxPoolSize.Should().Be(20); subject.MinPoolSize.Should().Be(15); subject.Password.Should().Be("pass"); + subject.ProxyHost.Should().Be("host.com"); + subject.ProxyPort.Should().Be(2020); + subject.ProxyUsername.Should().Be("user"); + subject.ProxyPassword.Should().Be("passw"); subject.ReadConcernLevel.Should().Be(ReadConcernLevel.Majority); subject.ReadPreference.Should().Be(ReadPreferenceMode.Primary); subject.ReadPreferenceTags.Single().Should().Be(new TagSet(new[] { new Tag("dc", "1") })); @@ -476,6 +490,7 @@ public void When_everything_is_specified() subject.Ssl.Should().BeFalse(); subject.SslVerifyCertificate.Should().Be(true); #pragma warning restore 618 + subject.Timeout.Should().Be(TimeSpan.FromMilliseconds(42)); subject.Tls.Should().BeFalse(); subject.TlsInsecure.Should().Be(false); subject.Username.Should().Be("user"); @@ -1048,6 +1063,22 @@ public void When_sslVerifyCertificate_is_specified(string connectionString, bool #pragma warning restore 618 } + [Theory] + [InlineData("mongodb://localhost?timeoutMS=0", -1)] + [InlineData("mongodb://localhost?timeout=0", -1)] + [InlineData("mongodb://localhost?timeout=15ms", 15)] + [InlineData("mongodb://localhost?timeoutMS=15", 15)] + [InlineData("mongodb://localhost?timeout=15", 1000 * 15)] + [InlineData("mongodb://localhost?timeout=15s", 1000 * 15)] + [InlineData("mongodb://localhost?timeout=15m", 1000 * 60 * 15)] + [InlineData("mongodb://localhost?timeout=15h", 1000 * 60 * 60 * 15)] + public void When_timeout_is_specified(string connectionString, int milliseconds) + { + var subject = new ConnectionString(connectionString); + + subject.Timeout.Should().Be(TimeSpan.FromMilliseconds(milliseconds)); + } + [Theory] [InlineData("mongodb://localhost?tls=true", true)] [InlineData("mongodb://localhost?tls=false", false)] @@ -1200,6 +1231,60 @@ public void When_srvServiceName_is_specified_without_a_srv_scheme() exception.Message.Should().Contain("srvServiceName"); } + [Theory] + [InlineData("mongodb://localhost?proxyHost=222.222.222.12", "222.222.222.12", null, null, null)] + [InlineData("mongodb://localhost?proxyHost=222.222.222.12&proxyPort=8080", "222.222.222.12", 8080, null, null)] + [InlineData("mongodb://localhost?proxyHost=example.com", "example.com", null, null, null)] + [InlineData("mongodb://localhost?proxyHost=example.com&proxyPort=8080", "example.com", 8080, null, null)] + [InlineData("mongodb://localhost?proxyHost=example.com&proxyUsername=user&proxyPassword=passw", "example.com", null, "user", "passw")] + [InlineData("mongodb://localhost?proxyHost=example.com&proxyPort=8080&proxyUsername=user&proxyPassword=passw", "example.com", 8080, "user", "passw")] + public void When_proxy_parameters_are_specified(string connectionString, string host, int? port, string username, string password) + { + var subject = new ConnectionString(connectionString); + + subject.ProxyHost.Should().Be(host); + subject.ProxyPort.Should().Be(port); + subject.ProxyUsername.Should().Be(username); + subject.ProxyPassword.Should().Be(password); + } + + [Theory] + [InlineData("mongodb://localhost?proxyHost=localhost&proxyHost=localhost", "proxyHost")] + [InlineData("mongodb://localhost?proxyHost=localhost&proxyPort=2222&proxyPort=2222", "proxyPort")] + [InlineData("mongodb://localhost?proxyHost=localhost&proxyUsername=2222&proxyUsername=2222", "proxyUsername")] + [InlineData("mongodb://localhost?proxyHost=localhost&proxyPassword=2222&proxyPassword=2222", "proxyPassword")] + public void When_proxyParameter_is_specified_more_than_once(string connectionString, string parameterName) + { + var exception = Record.Exception(() => new ConnectionString(connectionString)); + + exception.Should().BeOfType<MongoConfigurationException>(); + exception.Message.Should().Contain(parameterName); + exception.Message.Should().Contain("Multiple"); + } + + [Theory] + [InlineData("mongodb://localhost?proxyPort=2020", "proxyPort")] + [InlineData("mongodb://localhost?proxyUsername=user", "proxyUsername")] + [InlineData("mongodb://localhost?proxyPassword=pasw", "proxyPassword")] + public void When_proxyParameter_is_specified_without_proxyHost(string connectionString, string parameterName) + { + var exception = Record.Exception(() => new ConnectionString(connectionString)); + + exception.Should().BeOfType<MongoConfigurationException>(); + exception.Message.Should().Contain(parameterName); + } + + [Theory] + [InlineData("mongodb://localhost?proxyHost=host.com&proxyUsername=user")] + [InlineData("mongodb://localhost?proxyHost=host.com&proxyPassword=pasw")] + public void When_proxyPassword_and_proxyUsername_are_not_specified_together(string connectionString) + { + var exception = Record.Exception(() => new ConnectionString(connectionString)); + + exception.Should().BeOfType<MongoConfigurationException>(); + exception.Message.Should().Contain("proxyUsername and proxyPassword"); + } + [Theory] [ParameterAttributeData] public void Valid_srvMaxHosts_with_mongodbsrv_scheme_should_be_valid([Values(0, 42)]int srvMaxHosts) diff --git a/tests/MongoDB.Driver.Tests/Core/Configuration/CryptClientSettingsTests.cs b/tests/MongoDB.Driver.Tests/Core/Configuration/CryptClientSettingsTests.cs index ecf4c978841..c7e816fad08 100644 --- a/tests/MongoDB.Driver.Tests/Core/Configuration/CryptClientSettingsTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Configuration/CryptClientSettingsTests.cs @@ -67,7 +67,7 @@ public void Equals_should_return_true_when_equals() }; var kmsProviders1 = new Dictionary<string, IReadOnlyDictionary<string, object>>() { - { "schemaMapKey", new Dictionary<string, object>() { { "kmsKey", "kmsValue" } } } + { "schemaMapKey", new Dictionary<string, object>() { { "kmsKey", "kmsValue" } } } }; var kmsProviders2 = new Dictionary<string, IReadOnlyDictionary<string, object>>() { diff --git a/tests/MongoDB.Driver.Tests/Core/ConnectionPools/ExclusiveConnectionPoolTests.cs b/tests/MongoDB.Driver.Tests/Core/ConnectionPools/ExclusiveConnectionPoolTests.cs index 56570e094f9..7da9b2f2627 100644 --- a/tests/MongoDB.Driver.Tests/Core/ConnectionPools/ExclusiveConnectionPoolTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/ConnectionPools/ExclusiveConnectionPoolTests.cs @@ -175,7 +175,7 @@ public void AcquireConnection_should_iterate_over_all_dormant_connections() // acquire all connections and return them var allConnections = Enumerable.Range(0, connectionsCount) - .Select(i => subject.AcquireConnection(default)) + .Select(i => subject.AcquireConnection(OperationContext.NoTimeout)) .ToArray(); var connectionNotToExpire = allConnections[allConnections.Length / 2].ConnectionId; @@ -216,22 +216,17 @@ public void Constructor_should_throw_when_exceptionHandler_is_null() [Theory] [ParameterAttributeData] - public void AcquireConnection_should_throw_an_InvalidOperationException_if_not_initialized( + public async Task AcquireConnection_should_throw_an_InvalidOperationException_if_not_initialized( [Values(false, true)] bool async) { _capturedEvents.Clear(); - Action act; - if (async) - { - act = () => _subject.AcquireConnectionAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => _subject.AcquireConnection(CancellationToken.None); - } - act.ShouldThrow<InvalidOperationException>(); + var exception = async ? + await Record.ExceptionAsync(() => _subject.AcquireConnectionAsync(OperationContext.NoTimeout)) : + Record.Exception(() => _subject.AcquireConnection(OperationContext.NoTimeout)); + + exception.Should().BeOfType<InvalidOperationException>(); _capturedEvents.Next().Should().BeOfType<ConnectionPoolCheckingOutConnectionEvent>(); var connectionPoolCheckingOutConnectionFailedEvent = _capturedEvents.Next(); var e = connectionPoolCheckingOutConnectionFailedEvent.Should().BeOfType<ConnectionPoolCheckingOutConnectionFailedEvent>().Subject; @@ -241,24 +236,19 @@ public void AcquireConnection_should_throw_an_InvalidOperationException_if_not_i [Theory] [ParameterAttributeData] - public void AcquireConnection_should_throw_an_ObjectDisposedException_after_disposing( + public async Task AcquireConnection_should_throw_an_ObjectDisposedException_after_disposing( [Values(false, true)] bool async) { _capturedEvents.Clear(); _subject.Dispose(); - Action act; - if (async) - { - act = () => _subject.AcquireConnectionAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => _subject.AcquireConnection(CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => _subject.AcquireConnectionAsync(OperationContext.NoTimeout)) : + Record.Exception(() => _subject.AcquireConnection(OperationContext.NoTimeout)); + + exception.Should().BeOfType<ObjectDisposedException>(); - act.ShouldThrow<ObjectDisposedException>(); _capturedEvents.Next().Should().BeOfType<ConnectionPoolClosingEvent>(); _capturedEvents.Next().Should().BeOfType<ConnectionPoolClosedEvent>(); _capturedEvents.Next().Should().BeOfType<ConnectionPoolCheckingOutConnectionEvent>(); @@ -270,22 +260,16 @@ public void AcquireConnection_should_throw_an_ObjectDisposedException_after_disp [Theory] [ParameterAttributeData] - public void AcquireConnection_should_return_a_connection( + public async Task AcquireConnection_should_return_a_connection( [Values(false, true)] bool async) { InitializeAndWait(); _capturedEvents.Clear(); - IConnectionHandle connection; - if (async) - { - connection = _subject.AcquireConnectionAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - connection = _subject.AcquireConnection(CancellationToken.None); - } + var connection = async ? + await _subject.AcquireConnectionAsync(OperationContext.NoTimeout) : + _subject.AcquireConnection(OperationContext.NoTimeout); connection.Should().NotBeNull(); _subject.AvailableCount.Should().Be(_settings.MaxConnections - 1); @@ -328,10 +312,10 @@ public async Task AcquireConnection_should_invoke_error_handling_before_releasin .Setup(c => c.Settings) .Returns(new ConnectionSettings()); connectionMock - .Setup(c => c.Open(It.IsAny<CancellationToken>())) + .Setup(c => c.Open(It.IsAny<OperationContext>())) .Throws(exception); connectionMock - .Setup(c => c.OpenAsync(It.IsAny<CancellationToken>())) + .Setup(c => c.OpenAsync(It.IsAny<OperationContext>())) .Throws(exception); return connectionMock.Object; @@ -345,27 +329,18 @@ public async Task AcquireConnection_should_invoke_error_handling_before_releasin subject.Initialize(); subject.SetReady(); - try - { - if (async) - { - _ = await subject.AcquireConnectionAsync(default); - } - else - { - _ = subject.AcquireConnection(default); - } - } - catch (MongoConnectionException) - { - subject.AvailableCount.Should().Be(maxConnections); - mockConnectionExceptionHandler.Verify(handler => handler.HandleExceptionOnOpen(exception), Times.Once); - } + var resultException = async ? + await Record.ExceptionAsync(() => subject.AcquireConnectionAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.AcquireConnection(OperationContext.NoTimeout)); + + resultException.Should().BeOfType<MongoConnectionException>(); + subject.AvailableCount.Should().Be(maxConnections); + mockConnectionExceptionHandler.Verify(handler => handler.HandleExceptionOnOpen(exception), Times.Once); } [Theory] [ParameterAttributeData] - internal void AcquireConnection_should_track_checked_out_reasons( + internal async Task AcquireConnection_should_track_checked_out_reasons( [Values(CheckOutReason.Cursor, CheckOutReason.Transaction)] CheckOutReason reason, [Values(1, 3, 5)] int attempts, [Values(false, true)] bool async) @@ -390,15 +365,9 @@ internal void AcquireConnection_should_track_checked_out_reasons( List<IConnectionHandle> connections = new(); for (int attempt = 1; attempt <= attempts; attempt++) { - IConnectionHandle connection; - if (async) - { - connection = subject.AcquireConnectionAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - connection = subject.AcquireConnection(CancellationToken.None); - } + var connection = async ? + await subject.AcquireConnectionAsync(OperationContext.NoTimeout) : + subject.AcquireConnection(OperationContext.NoTimeout); ((ICheckOutReasonTracker)connection).SetCheckOutReasonIfNotAlreadySet(reason); connections.Add(connection); @@ -448,7 +417,7 @@ IEnumerable<CheckOutReason> GetEnumItemsExcept(CheckOutReason reason) [Theory] [ParameterAttributeData] - public void AcquireConnection_should_increase_count_up_to_the_max_number_of_connections( + public async Task AcquireConnection_should_increase_count_up_to_the_max_number_of_connections( [Values(false, true)] bool async) { @@ -459,15 +428,9 @@ public void AcquireConnection_should_increase_count_up_to_the_max_number_of_conn for (int i = 0; i < _settings.MaxConnections; i++) { - IConnection connection; - if (async) - { - connection = _subject.AcquireConnectionAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - connection = _subject.AcquireConnection(CancellationToken.None); - } + var connection = async ? + await _subject.AcquireConnectionAsync(OperationContext.NoTimeout) : + _subject.AcquireConnection(OperationContext.NoTimeout); connections.Add(connection); } @@ -494,21 +457,15 @@ public void AcquireConnection_should_increase_count_up_to_the_max_number_of_conn [Theory] [ParameterAttributeData] - public void AcquiredConnection_should_return_connections_to_the_pool_when_disposed( + public async Task AcquiredConnection_should_return_connections_to_the_pool_when_disposed( [Values(false, true)] bool async) { InitializeAndWait(); - IConnectionHandle connection; - if (async) - { - connection = _subject.AcquireConnectionAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - connection = _subject.AcquireConnection(CancellationToken.None); - } + var connection = async ? + await _subject.AcquireConnectionAsync(OperationContext.NoTimeout) : + _subject.AcquireConnection(OperationContext.NoTimeout); _capturedEvents.Clear(); @@ -523,7 +480,7 @@ public void AcquiredConnection_should_return_connections_to_the_pool_when_dispos [Theory] [ParameterAttributeData] - public void AcquiredConnection_should_not_return_connections_to_the_pool_when_disposed_and_expired( + public async Task AcquiredConnection_should_not_return_connections_to_the_pool_when_disposed_and_expired( [Values(false, true)] bool async) { @@ -538,15 +495,9 @@ public void AcquiredConnection_should_not_return_connections_to_the_pool_when_di InitializeAndWait(); - IConnectionHandle connection; - if (async) - { - connection = _subject.AcquireConnectionAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - connection = _subject.AcquireConnection(CancellationToken.None); - } + var connection = async ? + await _subject.AcquireConnectionAsync(OperationContext.NoTimeout) : + _subject.AcquireConnection(OperationContext.NoTimeout); _capturedEvents.Clear(); @@ -566,7 +517,7 @@ public void AcquiredConnection_should_not_return_connections_to_the_pool_when_di [Theory] [ParameterAttributeData] - public void AcquireConnection_should_throw_a_TimeoutException_when_all_connections_are_checked_out( + public async Task AcquireConnection_should_throw_a_TimeoutException_when_all_connections_are_checked_out( [Values(false, true)] bool async) { @@ -574,30 +525,18 @@ public void AcquireConnection_should_throw_a_TimeoutException_when_all_connectio var connections = new List<IConnection>(); for (int i = 0; i < _settings.MaxConnections; i++) { - IConnection connection; - if (async) - { - connection = _subject.AcquireConnectionAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - connection = _subject.AcquireConnection(CancellationToken.None); - } + var connection = async ? + await _subject.AcquireConnectionAsync(OperationContext.NoTimeout) : + _subject.AcquireConnection(OperationContext.NoTimeout); connections.Add(connection); } _capturedEvents.Clear(); - Action act; - if (async) - { - act = () => _subject.AcquireConnectionAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => _subject.AcquireConnection(CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => _subject.AcquireConnectionAsync(OperationContext.NoTimeout)) : + Record.Exception(() => _subject.AcquireConnection(OperationContext.NoTimeout)); - act.ShouldThrow<TimeoutException>(); + exception.Should().BeOfType<TimeoutException>(); _capturedEvents.Next().Should().BeOfType<ConnectionPoolCheckingOutConnectionEvent>(); var connectionPoolCheckingOutConnectionFailedEvent = _capturedEvents.Next(); @@ -643,7 +582,7 @@ public void AcquireConnection_should_timeout_when_non_sufficient_reused_connecti .Setup(c => c.Settings) .Returns(new ConnectionSettings()); connectionMock - .Setup(c => c.Open(It.IsAny<CancellationToken>())) + .Setup(c => c.Open(It.IsAny<OperationContext>())) .Callback(() => { if (establishingCount.CurrentCount > 0) @@ -654,7 +593,7 @@ public void AcquireConnection_should_timeout_when_non_sufficient_reused_connecti blockEstablishmentEvent.Wait(); }); connectionMock - .Setup(c => c.OpenAsync(It.IsAny<CancellationToken>())) + .Setup(c => c.OpenAsync(It.IsAny<OperationContext>())) .Returns(() => { if (establishingCount.CurrentCount > 0) @@ -681,7 +620,6 @@ public void AcquireConnection_should_timeout_when_non_sufficient_reused_connecti // block further establishments blockEstablishmentEvent.Reset(); - var allConnections = new List<IConnection>(); var actualTimeouts = 0; var expectedTimeouts = maxAcquiringCount - maxConnecting; @@ -735,7 +673,7 @@ public void AcquireConnection_should_timeout_when_non_sufficient_reused_connecti [Theory] [ParameterAttributeData] - public void AcquiredConnection_should_not_throw_exceptions_when_disposed_after_the_pool_was_disposed( + public async Task AcquiredConnection_should_not_throw_exceptions_when_disposed_after_the_pool_was_disposed( [Values(false, true)] bool async) { @@ -744,13 +682,13 @@ public void AcquiredConnection_should_not_throw_exceptions_when_disposed_after_t IConnectionHandle connection2; if (async) { - connection1 = _subject.AcquireConnectionAsync(CancellationToken.None).GetAwaiter().GetResult(); - connection2 = _subject.AcquireConnectionAsync(CancellationToken.None).GetAwaiter().GetResult(); + connection1 = await _subject.AcquireConnectionAsync(OperationContext.NoTimeout); + connection2 = await _subject.AcquireConnectionAsync(OperationContext.NoTimeout); } else { - connection1 = _subject.AcquireConnection(CancellationToken.None); - connection2 = _subject.AcquireConnection(CancellationToken.None); + connection1 = _subject.AcquireConnection(OperationContext.NoTimeout); + connection2 = _subject.AcquireConnection(OperationContext.NoTimeout); } _capturedEvents.Clear(); @@ -818,14 +756,14 @@ public void Acquire_and_release_connection_stress_test( .Setup(c => c.Settings) .Returns(new ConnectionSettings()); connectionMock - .Setup(c => c.Open(It.IsAny<CancellationToken>())) + .Setup(c => c.Open(It.IsAny<OperationContext>())) .Callback(() => { var sleepMS = random.Next(minEstablishingTime, maxEstablishingTime); Thread.Sleep(sleepMS); }); connectionMock - .Setup(c => c.OpenAsync(It.IsAny<CancellationToken>())) + .Setup(c => c.OpenAsync(It.IsAny<OperationContext>())) .Returns(async () => { var sleepMS = random.Next(minEstablishingTime, maxEstablishingTime); @@ -916,22 +854,16 @@ public void Clear_should_throw_an_ObjectDisposedException_after_disposing() [Theory] [ParameterAttributeData] - public void Clear_should_cause_existing_connections_to_be_expired( + public async Task Clear_should_cause_existing_connections_to_be_expired( [Values(false, true)] bool async) { _subject.Initialize(); _subject.SetReady(); - IConnectionHandle connection; - if (async) - { - connection = _subject.AcquireConnectionAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - connection = _subject.AcquireConnection(CancellationToken.None); - } + var connection = async ? + await _subject.AcquireConnectionAsync(OperationContext.NoTimeout) : + _subject.AcquireConnection(OperationContext.NoTimeout); connection.IsExpired.Should().BeFalse(); _subject.Clear(closeInUseConnections: false); @@ -940,7 +872,7 @@ public void Clear_should_cause_existing_connections_to_be_expired( [Theory] [ParameterAttributeData] - public void Clear_with_serviceId_should_cause_only_expected_connections_to_be_expired( + public async Task Clear_with_serviceId_should_cause_only_expected_connections_to_be_expired( [Values(false, true)] bool async) { var serviceId = ObjectId.GenerateNewId(); @@ -970,7 +902,9 @@ public void Clear_with_serviceId_should_cause_only_expected_connections_to_be_ex subject.Initialize(); subject.SetReady(); - var connection = AcquireConnection(subject, async); + var connection = async ? + await subject.AcquireConnectionAsync(OperationContext.NoTimeout) : + subject.AcquireConnection(OperationContext.NoTimeout); connection.IsExpired.Should().BeFalse(); var randomServiceId = ObjectId.GenerateNewId(); @@ -1036,7 +970,7 @@ public void In_use_marker_should_work_as_expected( var mockConnection = new Mock<IConnection>(); mockConnection.SetupGet(c => c.ConnectionId).Returns(new ConnectionId(serverId, ci)); mockConnection - .Setup(c => c.Open(It.IsAny<CancellationToken>())) + .Setup(c => c.Open(It.IsAny<OperationContext>())) .Callback(() => { if (minPoolSize == 0 || ci == 2) // ignore connection 1 created in minPoolSize logic @@ -1050,7 +984,7 @@ public void In_use_marker_should_work_as_expected( }); mockConnection - .Setup(c => c.OpenAsync(It.IsAny<CancellationToken>())) + .Setup(c => c.OpenAsync(It.IsAny<OperationContext>())) .Returns(async () => { if (minPoolSize == 0 || ci == 2) // ignore connection 1 created in minPoolSize logic @@ -1142,7 +1076,7 @@ public void Maintenance_should_call_connection_dispose_when_connection_authentic var authenticationException = new MongoAuthenticationException(connectionId, "test message"); var authenticationFailedConnection = new Mock<IConnection>(); authenticationFailedConnection - .Setup(c => c.Open(It.IsAny<CancellationToken>())) // an authentication exception is thrown from _connectionInitializer.InitializeConnection + .Setup(c => c.Open(It.IsAny<OperationContext>())) // an authentication exception is thrown from _connectionInitializer.InitializeConnection // that in turn is called from OpenAsync .Throws(authenticationException); authenticationFailedConnection.SetupGet(c => c.ConnectionId).Returns(connectionId); @@ -1232,7 +1166,7 @@ public void MaxConnecting_queue_should_be_cleared_on_pool_clear( .Returns(new ConnectionSettings()); connectionMock - .Setup(c => c.Open(It.IsAny<CancellationToken>())) + .Setup(c => c.Open(It.IsAny<OperationContext>())) .Callback(() => { allEstablishing.Signal(); @@ -1240,7 +1174,7 @@ public void MaxConnecting_queue_should_be_cleared_on_pool_clear( }); connectionMock - .Setup(c => c.OpenAsync(It.IsAny<CancellationToken>())) + .Setup(c => c.OpenAsync(It.IsAny<OperationContext>())) .Returns(() => { allEstablishing.Signal(); @@ -1490,7 +1424,7 @@ public void WaitQueue_should_throw_when_full( .Returns(new ConnectionSettings()); connectionMock - .Setup(c => c.Open(It.IsAny<CancellationToken>())) + .Setup(c => c.Open(It.IsAny<OperationContext>())) .Callback(() => { allAcquiringCountdownEvent.Signal(); @@ -1498,7 +1432,7 @@ public void WaitQueue_should_throw_when_full( }); connectionMock - .Setup(c => c.OpenAsync(It.IsAny<CancellationToken>())) + .Setup(c => c.OpenAsync(It.IsAny<OperationContext>())) .Returns(() => { allAcquiringCountdownEvent.Signal(); @@ -1582,7 +1516,7 @@ public void WaitQueue_should_be_cleared_on_pool_clear( .Returns(new ConnectionSettings()); connectionMock - .Setup(c => c.Open(It.IsAny<CancellationToken>())) + .Setup(c => c.Open(It.IsAny<OperationContext>())) .Callback(() => { allEstablishing.Signal(); @@ -1590,7 +1524,7 @@ public void WaitQueue_should_be_cleared_on_pool_clear( }); connectionMock - .Setup(c => c.OpenAsync(It.IsAny<CancellationToken>())) + .Setup(c => c.OpenAsync(It.IsAny<OperationContext>())) .Returns(() => { allEstablishing.Signal(); @@ -1686,13 +1620,13 @@ private IConnection AcquireConnection(ExclusiveConnectionPool subject, bool asyn if (async) { return subject - .AcquireConnectionAsync(CancellationToken.None) + .AcquireConnectionAsync(OperationContext.NoTimeout) .GetAwaiter() .GetResult(); } else { - return subject.AcquireConnection(CancellationToken.None); + return subject.AcquireConnection(OperationContext.NoTimeout); } } diff --git a/tests/MongoDB.Driver.Tests/Core/ConnectionPools/MaintenanceHelperTests.cs b/tests/MongoDB.Driver.Tests/Core/ConnectionPools/MaintenanceHelperTests.cs index 4e8a23a13fb..2b4970bd723 100644 --- a/tests/MongoDB.Driver.Tests/Core/ConnectionPools/MaintenanceHelperTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/ConnectionPools/MaintenanceHelperTests.cs @@ -168,7 +168,7 @@ public void Stop_should_trigger_immediate_maintenance_call( IConnection acquiredConnection = null; if (checkOutConnection) { - acquiredConnection = pool.AcquireConnection(CancellationToken.None); + acquiredConnection = pool.AcquireConnection(OperationContext.NoTimeout); acquiredConnection.ConnectionId.LongLocalValue.Should().Be(1); } @@ -178,7 +178,7 @@ public void Stop_should_trigger_immediate_maintenance_call( var requestInPlayTimeout = TimeSpan.FromMilliseconds(100); if (!closeInUseConnection && checkOutConnection) { - // connection in progress should be not touched + // connection in progress should be not touched Thread.Sleep(requestInPlayTimeout); } else diff --git a/tests/MongoDB.Driver.Tests/Core/Connections/BinaryConnectionFactoryTests.cs b/tests/MongoDB.Driver.Tests/Core/Connections/BinaryConnectionFactoryTests.cs index 2483adf1922..9277d7bc976 100644 --- a/tests/MongoDB.Driver.Tests/Core/Connections/BinaryConnectionFactoryTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Connections/BinaryConnectionFactoryTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,6 +15,7 @@ using System; using System.Net; +using System.Threading; using FluentAssertions; using MongoDB.Bson.TestHelpers; using MongoDB.Driver.Core.Clusters; @@ -29,19 +30,22 @@ namespace MongoDB.Driver.Core.Connections public class BinaryConnectionFactoryTests { [Fact] - public void Constructor_should_throw_an_ArgumentNullException_when_connectionSettings_is_null() + public void Constructor_should_throw_an_ArgumentNullException_when_settings_is_null() { var streamFactory = new Mock<IStreamFactory>().Object; var eventSubscriber = new Mock<IEventSubscriber>().Object; - Action act = () => new BinaryConnectionFactory( + var exception = Record.Exception(() => new BinaryConnectionFactory( settings: null, streamFactory, eventSubscriber, serverApi: null, - loggerFactory: null); + loggerFactory: null, + socketReadTimeout: Timeout.InfiniteTimeSpan, + socketWriteTimeout: Timeout.InfiniteTimeSpan)); - act.ShouldThrow<ArgumentNullException>(); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("settings"); } [Fact] @@ -49,14 +53,17 @@ public void Constructor_should_throw_an_ArgumentNullException_when_streamFactory { var eventSubscriber = new Mock<IEventSubscriber>().Object; - Action act = () => new BinaryConnectionFactory( - new ConnectionSettings(), + var exception = Record.Exception(() => new BinaryConnectionFactory( + settings: new ConnectionSettings(), streamFactory: null, eventSubscriber, serverApi: null, - loggerFactory: null); + loggerFactory: null, + socketReadTimeout: Timeout.InfiniteTimeSpan, + socketWriteTimeout: Timeout.InfiniteTimeSpan)); - act.ShouldThrow<ArgumentNullException>(); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("streamFactory"); } [Fact] @@ -69,10 +76,14 @@ public void CreateConnection_should_throw_an_ArgumentNullException_when_serverId streamFactory, eventSubscriber, serverApi: null, - loggerFactory: null); + loggerFactory: null, + socketReadTimeout: Timeout.InfiniteTimeSpan, + socketWriteTimeout: Timeout.InfiniteTimeSpan); + + var exception = Record.Exception(() => subject.CreateConnection(null, new DnsEndPoint("localhost", 27017))); - Action act = () => subject.CreateConnection(null, new DnsEndPoint("localhost", 27017)); - act.ShouldThrow<ArgumentNullException>(); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("serverId"); } [Fact] @@ -85,12 +96,15 @@ public void CreateConnection_should_throw_an_ArgumentNullException_when_endPoint streamFactory, eventSubscriber, serverApi: null, - loggerFactory: null); + loggerFactory: null, + socketReadTimeout: Timeout.InfiniteTimeSpan, + socketWriteTimeout: Timeout.InfiniteTimeSpan); var serverId = new ServerId(new ClusterId(), new DnsEndPoint("localhost", 27017)); + var exception = Record.Exception(() => subject.CreateConnection(serverId, null)); - Action act = () => subject.CreateConnection(serverId, null); - act.ShouldThrow<ArgumentNullException>(); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("endPoint"); } [Fact] @@ -104,7 +118,9 @@ public void CreateConnection_should_return_a_BinaryConnection() streamFactory, eventSubscriber, serverApi, - loggerFactory: null); + loggerFactory: null, + socketReadTimeout: Timeout.InfiniteTimeSpan, + socketWriteTimeout: Timeout.InfiniteTimeSpan); var serverId = new ServerId(new ClusterId(), new DnsEndPoint("localhost", 27017)); diff --git a/tests/MongoDB.Driver.Tests/Core/Connections/BinaryConnectionTests.cs b/tests/MongoDB.Driver.Tests/Core/Connections/BinaryConnectionTests.cs index 9a904c3db08..f9e7cbdb952 100644 --- a/tests/MongoDB.Driver.Tests/Core/Connections/BinaryConnectionTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Connections/BinaryConnectionTests.cs @@ -14,7 +14,6 @@ */ using System; -using System.Collections.Generic; using System.IO; using System.Net; using System.Net.Sockets; @@ -59,6 +58,13 @@ public BinaryConnectionTests(Xunit.Abstractions.ITestOutputHelper output) : base { _capturedEvents = new EventCapturer(); _mockStreamFactory = new Mock<IStreamFactory>(); + var stream = new MemoryStream(); + _mockStreamFactory + .Setup(s => s.CreateStream(It.IsAny<EndPoint>(), It.IsAny<CancellationToken>())) + .Returns(stream); + _mockStreamFactory + .Setup(s => s.CreateStreamAsync(It.IsAny<EndPoint>(), It.IsAny<CancellationToken>())) + .ReturnsAsync(stream); _endPoint = new DnsEndPoint("localhost", 27017); _serverId = new ServerId(new ClusterId(), _endPoint); @@ -70,16 +76,16 @@ public BinaryConnectionTests(Xunit.Abstractions.ITestOutputHelper output) : base _mockConnectionInitializer = new Mock<IConnectionInitializer>(); _mockConnectionInitializer - .Setup(i => i.SendHello(It.IsAny<IConnection>(), CancellationToken.None)) + .Setup(i => i.SendHello(It.IsAny<OperationContext>(), It.IsAny<IConnection>())) .Returns(_connectionInitializerContext); _mockConnectionInitializer - .Setup(i => i.Authenticate(It.IsAny<IConnection>(), It.IsAny<ConnectionInitializerContext>(), CancellationToken.None)) + .Setup(i => i.Authenticate(It.IsAny<OperationContext>(), It.IsAny<IConnection>(), It.IsAny<ConnectionInitializerContext>())) .Returns(_connectionInitializerContextAfterAuthentication); _mockConnectionInitializer - .Setup(i => i.SendHelloAsync(It.IsAny<IConnection>(), CancellationToken.None)) + .Setup(i => i.SendHelloAsync(It.IsAny<OperationContext>(), It.IsAny<IConnection>())) .ReturnsAsync(_connectionInitializerContext); _mockConnectionInitializer - .Setup(i => i.AuthenticateAsync(It.IsAny<IConnection>(), It.IsAny<ConnectionInitializerContext>(), CancellationToken.None)) + .Setup(i => i.AuthenticateAsync(It.IsAny<OperationContext>(), It.IsAny<IConnection>(), It.IsAny<ConnectionInitializerContext>())) .ReturnsAsync(_connectionInitializerContextAfterAuthentication); _subject = new BinaryConnection( @@ -89,7 +95,9 @@ public BinaryConnectionTests(Xunit.Abstractions.ITestOutputHelper output) : base streamFactory: _mockStreamFactory.Object, connectionInitializer: _mockConnectionInitializer.Object, eventSubscriber: _capturedEvents, - LoggerFactory); + loggerFactory: LoggerFactory, + socketReadTimeout: TimeSpan.FromMilliseconds(1000), + socketWriteTimeout: TimeSpan.FromMilliseconds(1000)); } [Fact] @@ -104,7 +112,7 @@ public void Dispose_should_raise_the_correct_events() [Theory] [ParameterAttributeData] - public void Open_should_always_create_description_if_handshake_was_successful([Values(false, true)] bool async) + public async Task Open_should_always_create_description_if_handshake_was_successful([Values(false, true)] bool async) { var serviceId = ObjectId.GenerateNewId(); var connectionDescription = new ConnectionDescription( @@ -113,27 +121,21 @@ public void Open_should_always_create_description_if_handshake_was_successful([V var socketException = new SocketException(); _mockConnectionInitializer - .Setup(i => i.SendHello(It.IsAny<IConnection>(), CancellationToken.None)) + .Setup(i => i.SendHello(It.IsAny<OperationContext>(), It.IsAny<IConnection>())) .Returns(new ConnectionInitializerContext(connectionDescription, null)); _mockConnectionInitializer - .Setup(i => i.SendHelloAsync(It.IsAny<IConnection>(), CancellationToken.None)) + .Setup(i => i.SendHelloAsync(It.IsAny<OperationContext>(), It.IsAny<IConnection>())) .ReturnsAsync(new ConnectionInitializerContext(connectionDescription, null)); _mockConnectionInitializer - .Setup(i => i.Authenticate(It.IsAny<IConnection>(), It.IsAny<ConnectionInitializerContext>(), CancellationToken.None)) + .Setup(i => i.Authenticate(It.IsAny<OperationContext>(), It.IsAny<IConnection>(), It.IsAny<ConnectionInitializerContext>())) .Throws(socketException); _mockConnectionInitializer - .Setup(i => i.AuthenticateAsync(It.IsAny<IConnection>(), It.IsAny<ConnectionInitializerContext>(), CancellationToken.None)) + .Setup(i => i.AuthenticateAsync(It.IsAny<OperationContext>(), It.IsAny<IConnection>(), It.IsAny<ConnectionInitializerContext>())) .ThrowsAsync(socketException); - Exception exception; - if (async) - { - exception = Record.Exception(() => _subject.OpenAsync(CancellationToken.None).GetAwaiter().GetResult()); - } - else - { - exception = Record.Exception(() => _subject.Open(CancellationToken.None)); - } + var exception = async ? + await Record.ExceptionAsync(() => _subject.OpenAsync(OperationContext.NoTimeout)) : + Record.Exception(() => _subject.Open(OperationContext.NoTimeout)); _subject.Description.Should().Be(connectionDescription); var ex = exception.Should().BeOfType<MongoConnectionException>().Subject; @@ -153,10 +155,10 @@ public async Task Open_should_create_authenticators_only_once( var mockStreamFactory = new Mock<IStreamFactory>(); using var stream = new IgnoreWritesMemoryStream(memoryStream.ToArray()); mockStreamFactory - .Setup(s => s.CreateStream(It.IsAny<EndPoint>(), CancellationToken.None)) + .Setup(s => s.CreateStream(It.IsAny<EndPoint>(), It.IsAny<CancellationToken>())) .Returns(stream); mockStreamFactory - .Setup(s => s.CreateStreamAsync(It.IsAny<EndPoint>(), CancellationToken.None)) + .Setup(s => s.CreateStreamAsync(It.IsAny<EndPoint>(), It.IsAny<CancellationToken>())) .ReturnsAsync(stream); var connectionInitializer = new ConnectionInitializer( @@ -167,7 +169,7 @@ public async Task Open_should_create_authenticators_only_once( var authenticatorMock = new Mock<IAuthenticator>(); authenticatorMock - .Setup(a => a.CustomizeInitialHelloCommand(It.IsAny<BsonDocument>(), It.IsAny<CancellationToken>())) + .Setup(a => a.CustomizeInitialHelloCommand(It.IsAny<OperationContext>(), It.IsAny<BsonDocument>())) .Returns(new BsonDocument(OppressiveLanguageConstants.LegacyHelloCommandName, 1)); var authenticatorFactoryMock = new Mock<IAuthenticatorFactory>(); @@ -182,15 +184,17 @@ public async Task Open_should_create_authenticators_only_once( streamFactory: mockStreamFactory.Object, connectionInitializer: connectionInitializer, eventSubscriber: _capturedEvents, - LoggerFactory); + loggerFactory: LoggerFactory, + socketReadTimeout: Timeout.InfiniteTimeSpan, + socketWriteTimeout: Timeout.InfiniteTimeSpan); if (async) { - await subject.OpenAsync(CancellationToken.None); + await subject.OpenAsync(OperationContext.NoTimeout); } else { - subject.Open(CancellationToken.None); + subject.Open(OperationContext.NoTimeout); } authenticatorFactoryMock.Verify(f => f.Create(), Times.Once()); @@ -207,52 +211,37 @@ ResponseMessage CreateResponseMessage() [Theory] [ParameterAttributeData] - public void Open_should_throw_an_ObjectDisposedException_if_the_connection_is_disposed( + public async Task Open_should_throw_an_ObjectDisposedException_if_the_connection_is_disposed( [Values(false, true)] bool async) { _subject.Dispose(); - Action act; - if (async) - { - act = () => _subject.OpenAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => _subject.Open(CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => _subject.OpenAsync(OperationContext.NoTimeout)) : + Record.Exception(() => _subject.Open(OperationContext.NoTimeout)); - act.ShouldThrow<ObjectDisposedException>(); + exception.Should().BeOfType<ObjectDisposedException>(); } [Theory] [ParameterAttributeData] - public void Open_should_raise_the_correct_events_upon_failure( + public async Task Open_should_raise_the_correct_events_upon_failure( [Values(false, true)] bool async) { - Action act; - if (async) - { - var result = new TaskCompletionSource<ConnectionInitializerContext>(); - result.SetException(new SocketException()); - _mockConnectionInitializer.Setup(i => i.SendHelloAsync(It.IsAny<IConnection>(), It.IsAny<CancellationToken>())) - .Returns(result.Task); + _mockConnectionInitializer.Setup(i => i.SendHelloAsync(It.IsAny<OperationContext>(), It.IsAny<IConnection>())) + .Throws<SocketException>(); + _mockConnectionInitializer.Setup(i => i.SendHello(It.IsAny<OperationContext>(), It.IsAny<IConnection>())) + .Throws<SocketException>(); - act = () => _subject.OpenAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - _mockConnectionInitializer.Setup(i => i.SendHello(It.IsAny<IConnection>(), It.IsAny<CancellationToken>())) - .Throws<SocketException>(); - - act = () => _subject.Open(CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => _subject.OpenAsync(OperationContext.NoTimeout)) : + Record.Exception(() => _subject.Open(OperationContext.NoTimeout)); - act.ShouldThrow<MongoConnectionException>() - .WithInnerException<SocketException>() - .And.ConnectionId.Should().Be(_subject.ConnectionId); + exception.Should().BeOfType<MongoConnectionException>().Subject + .ConnectionId.Should().Be(_subject.ConnectionId); + exception.InnerException.Should().BeOfType<SocketException>(); _capturedEvents.Next().Should().BeOfType<ConnectionOpeningEvent>(); _capturedEvents.Next().Should().BeOfType<ConnectionOpeningFailedEvent>(); @@ -261,17 +250,17 @@ public void Open_should_raise_the_correct_events_upon_failure( [Theory] [ParameterAttributeData] - public void Open_should_setup_the_description( + public async Task Open_should_setup_the_description( [Values(false, true)] bool async) { if (async) { - _subject.OpenAsync(CancellationToken.None).GetAwaiter().GetResult(); + await _subject.OpenAsync(OperationContext.NoTimeout); } else { - _subject.Open(CancellationToken.None); + _subject.Open(OperationContext.NoTimeout); } _subject.Description.Should().NotBeNull(); @@ -291,32 +280,27 @@ public void Open_should_not_complete_the_second_call_until_the_first_is_complete { var task1IsBlocked = false; var completionSource = new TaskCompletionSource<Stream>(); - _mockStreamFactory.Setup(f => f.CreateStream(_endPoint, CancellationToken.None)) - .Returns(() => { task1IsBlocked = true; return completionSource.Task.GetAwaiter().GetResult(); }); - _mockStreamFactory.Setup(f => f.CreateStreamAsync(_endPoint, CancellationToken.None)) - .Returns(() => { task1IsBlocked = true; return completionSource.Task; }); - - Task openTask1; - if (async1) - { + _mockStreamFactory.Setup(f => f.CreateStream(_endPoint, It.IsAny<CancellationToken>())) + .Returns(() => + { + task1IsBlocked = true; + return completionSource.Task.GetAwaiter().GetResult(); + }); + _mockStreamFactory.Setup(f => f.CreateStreamAsync(_endPoint, It.IsAny<CancellationToken>())) + .Returns(() => + { + task1IsBlocked = true; + return completionSource.Task; + }); - openTask1 = _subject.OpenAsync(CancellationToken.None); - } - else - { - openTask1 = Task.Run(() => _subject.Open(CancellationToken.None)); - } + var openTask1 = async1 ? + _subject.OpenAsync(OperationContext.NoTimeout) : + Task.Run(() => _subject.Open(OperationContext.NoTimeout)); SpinWait.SpinUntil(() => task1IsBlocked, TimeSpan.FromSeconds(5)).Should().BeTrue(); - Task openTask2; - if (async2) - { - openTask2 = _subject.OpenAsync(CancellationToken.None); - } - else - { - openTask2 = Task.Run(() => _subject.Open(CancellationToken.None)); - } + var openTask2 = async2 ? + _subject.OpenAsync(OperationContext.NoTimeout) : + Task.Run(() => _subject.Open(OperationContext.NoTimeout)); openTask1.IsCompleted.Should().BeFalse(); openTask2.IsCompleted.Should().BeFalse(); @@ -341,32 +325,32 @@ public async Task Reauthentication_should_use_the_same_auth_context_as_in_initia if (async) { - await _subject.OpenAsync(CancellationToken.None); + await _subject.OpenAsync(OperationContext.NoTimeout); } else { - _subject.Open(CancellationToken.None); + _subject.Open(OperationContext.NoTimeout); } _subject._connectionInitializerContext().Should().Be(_connectionInitializerContextAfterAuthentication); if (async) { - await _subject.ReauthenticateAsync(CancellationToken.None); - _mockConnectionInitializer.Verify(c => c.AuthenticateAsync(It.IsAny<IConnection>(), It.Is<ConnectionInitializerContext>(cxt => cxt == _connectionInitializerContext), CancellationToken.None), Times.Exactly(1)); - _mockConnectionInitializer.Verify(c => c.AuthenticateAsync(It.IsAny<IConnection>(), It.Is<ConnectionInitializerContext>(cxt => cxt == _connectionInitializerContextAfterAuthentication), CancellationToken.None), Times.Exactly(1)); + await _subject.ReauthenticateAsync(OperationContext.NoTimeout); + _mockConnectionInitializer.Verify(c => c.AuthenticateAsync(It.IsAny<OperationContext>(), It.IsAny<IConnection>(), _connectionInitializerContext), Times.Once); + _mockConnectionInitializer.Verify(c => c.AuthenticateAsync(It.IsAny<OperationContext>(), It.IsAny<IConnection>(), _connectionInitializerContextAfterAuthentication), Times.Once); } else { - _subject.Reauthenticate(CancellationToken.None); - _mockConnectionInitializer.Verify(c => c.Authenticate(It.IsAny<IConnection>(), It.Is<ConnectionInitializerContext>(cxt => cxt == _connectionInitializerContext), CancellationToken.None), Times.Exactly(1)); - _mockConnectionInitializer.Verify(c => c.Authenticate(It.IsAny<IConnection>(), It.Is<ConnectionInitializerContext>(cxt => cxt == _connectionInitializerContextAfterAuthentication), CancellationToken.None), Times.Exactly(1)); + _subject.Reauthenticate(OperationContext.NoTimeout); + _mockConnectionInitializer.Verify(c => c.Authenticate(It.IsAny<OperationContext>(), It.IsAny<IConnection>(), _connectionInitializerContext), Times.Once); + _mockConnectionInitializer.Verify(c => c.Authenticate(It.IsAny<OperationContext>(), It.IsAny<IConnection>(), _connectionInitializerContextAfterAuthentication), Times.Once); } } [Theory] [ParameterAttributeData] - public void ReceiveMessage_should_throw_a_FormatException_when_message_is_an_invalid_size( + public async Task ReceiveMessage_should_throw_a_FormatException_when_message_is_an_invalid_size( [Values(-1, 48000001)] int length, [Values(false, true)] @@ -375,29 +359,21 @@ public void ReceiveMessage_should_throw_a_FormatException_when_message_is_an_inv using (var stream = new BlockingMemoryStream()) { var bytes = BitConverter.GetBytes(length); + if (!BitConverter.IsLittleEndian) + { + Array.Reverse(bytes); + } stream.Write(bytes, 0, bytes.Length); stream.Seek(0, SeekOrigin.Begin); + + _mockStreamFactory.Setup(f => f.CreateStreamAsync(_endPoint, It.IsAny<CancellationToken>())).ReturnsAsync(stream); + _mockStreamFactory.Setup(f => f.CreateStream(_endPoint, It.IsAny<CancellationToken>())).Returns(stream); + await _subject.OpenAsync(OperationContext.NoTimeout); var encoderSelector = new ReplyMessageEncoderSelector<BsonDocument>(BsonDocumentSerializer.Instance); - Exception exception; - if (async) - { - _mockStreamFactory - .Setup(f => f.CreateStreamAsync(_endPoint, CancellationToken.None)) - .ReturnsAsync(stream); - _subject.OpenAsync(CancellationToken.None).GetAwaiter().GetResult(); - exception = Record - .Exception(() => _subject.ReceiveMessageAsync(10, encoderSelector, _messageEncoderSettings, CancellationToken.None) - .GetAwaiter() - .GetResult()); - } - else - { - _mockStreamFactory.Setup(f => f.CreateStream(_endPoint, CancellationToken.None)) - .Returns(stream); - _subject.Open(CancellationToken.None); - exception = Record.Exception(() => _subject.ReceiveMessage(10, encoderSelector, _messageEncoderSettings, CancellationToken.None)); - } + var exception = async ? + await Record.ExceptionAsync(() => _subject.ReceiveMessageAsync(OperationContext.NoTimeout, 10, encoderSelector, _messageEncoderSettings)) : + Record.Exception(() => _subject.ReceiveMessage(OperationContext.NoTimeout, 10, encoderSelector, _messageEncoderSettings)); exception.Should().BeOfType<MongoConnectionException>(); var e = exception.InnerException.Should().BeOfType<FormatException>().Subject; @@ -407,100 +383,72 @@ public void ReceiveMessage_should_throw_a_FormatException_when_message_is_an_inv [Theory] [ParameterAttributeData] - public void ReceiveMessage_should_throw_an_ArgumentNullException_when_the_encoderSelector_is_null( + public async Task ReceiveMessage_should_throw_an_ArgumentNullException_when_the_encoderSelector_is_null( [Values(false, true)] bool async) { - IMessageEncoderSelector encoderSelector = null; + var exception = async ? + await Record.ExceptionAsync(() => _subject.ReceiveMessageAsync(OperationContext.NoTimeout, 10, null, _messageEncoderSettings)) : + Record.Exception(() => _subject.ReceiveMessage(OperationContext.NoTimeout, 10, null, _messageEncoderSettings)); - Action act; - if (async) - { - act = () => _subject.ReceiveMessageAsync(10, encoderSelector, _messageEncoderSettings, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => _subject.ReceiveMessage(10, encoderSelector, _messageEncoderSettings, CancellationToken.None); - } - - act.ShouldThrow<ArgumentNullException>(); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("encoderSelector"); } [Theory] [ParameterAttributeData] - public void ReceiveMessage_should_throw_an_ObjectDisposedException_if_the_connection_is_disposed( + public async Task ReceiveMessage_should_throw_an_ObjectDisposedException_if_the_connection_is_disposed( [Values(false, true)] bool async) { var encoderSelector = new Mock<IMessageEncoderSelector>().Object; _subject.Dispose(); - Action act; - if (async) - { - act = () => _subject.ReceiveMessageAsync(10, encoderSelector, _messageEncoderSettings, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => _subject.ReceiveMessage(10, encoderSelector, _messageEncoderSettings, CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => _subject.ReceiveMessageAsync(OperationContext.NoTimeout, 10, encoderSelector, _messageEncoderSettings)) : + Record.Exception(() => _subject.ReceiveMessage(OperationContext.NoTimeout, 10, encoderSelector, _messageEncoderSettings)); - act.ShouldThrow<ObjectDisposedException>(); + exception.Should().BeOfType<ObjectDisposedException>(); } [Theory] [ParameterAttributeData] - public void ReceiveMessage_should_throw_an_InvalidOperationException_if_the_connection_is_not_open( + public async Task ReceiveMessage_should_throw_an_InvalidOperationException_if_the_connection_is_not_open( [Values(false, true)] bool async) { var encoderSelector = new Mock<IMessageEncoderSelector>().Object; - Action act; - if (async) - { - act = () => _subject.ReceiveMessageAsync(10, encoderSelector, _messageEncoderSettings, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => _subject.ReceiveMessage(10, encoderSelector, _messageEncoderSettings, CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => _subject.ReceiveMessageAsync(OperationContext.NoTimeout, 10, encoderSelector, _messageEncoderSettings)) : + Record.Exception(() => _subject.ReceiveMessage(OperationContext.NoTimeout, 10, encoderSelector, _messageEncoderSettings)); - act.ShouldThrow<InvalidOperationException>(); + exception.Should().BeOfType<InvalidOperationException>(); } [Theory] [ParameterAttributeData] - public void ReceiveMessage_should_complete_when_reply_is_already_on_the_stream( + public async Task ReceiveMessage_should_complete_when_reply_is_already_on_the_stream( [Values(false, true)] bool async) { using (var stream = new BlockingMemoryStream()) { var messageToReceive = MessageHelper.BuildReply<BsonDocument>(new BsonDocument(), BsonDocumentSerializer.Instance, responseTo: 10); - MessageHelper.WriteResponsesToStream(stream, new[] { messageToReceive }); - - var encoderSelector = new ReplyMessageEncoderSelector<BsonDocument>(BsonDocumentSerializer.Instance); + MessageHelper.WriteResponsesToStream(stream, messageToReceive); - ResponseMessage received; - if (async) - { - _mockStreamFactory.Setup(f => f.CreateStreamAsync(_endPoint, CancellationToken.None)) - .Returns(Task.FromResult<Stream>(stream)); - _subject.OpenAsync(CancellationToken.None).GetAwaiter().GetResult(); - _capturedEvents.Clear(); + _mockStreamFactory.Setup(f => f.CreateStreamAsync(_endPoint, It.IsAny<CancellationToken>())) + .ReturnsAsync(stream); + _mockStreamFactory.Setup(f => f.CreateStream(_endPoint, It.IsAny<CancellationToken>())) + .Returns(stream); + await _subject.OpenAsync(OperationContext.NoTimeout); + _capturedEvents.Clear(); - received = _subject.ReceiveMessageAsync(10, encoderSelector, _messageEncoderSettings, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - _mockStreamFactory.Setup(f => f.CreateStream(_endPoint, CancellationToken.None)) - .Returns(stream); - _subject.Open(CancellationToken.None); - _capturedEvents.Clear(); + var encoderSelector = new ReplyMessageEncoderSelector<BsonDocument>(BsonDocumentSerializer.Instance); - received = _subject.ReceiveMessage(10, encoderSelector, _messageEncoderSettings, CancellationToken.None); - } + var received = async ? + await _subject.ReceiveMessageAsync(OperationContext.NoTimeout, 10, encoderSelector, _messageEncoderSettings) : + _subject.ReceiveMessage(OperationContext.NoTimeout, 10, encoderSelector, _messageEncoderSettings); var expected = MessageHelper.TranslateMessagesToBsonDocuments(new[] { messageToReceive }); var actual = MessageHelper.TranslateMessagesToBsonDocuments(new[] { received }); @@ -515,40 +463,31 @@ public void ReceiveMessage_should_complete_when_reply_is_already_on_the_stream( [Theory] [ParameterAttributeData] - public void ReceiveMessage_should_complete_when_reply_is_not_already_on_the_stream( + public async Task ReceiveMessage_should_complete_when_reply_is_not_already_on_the_stream( [Values(false, true)] bool async) { using (var stream = new BlockingMemoryStream()) { - var encoderSelector = new ReplyMessageEncoderSelector<BsonDocument>(BsonDocumentSerializer.Instance); - - Task<ResponseMessage> receiveMessageTask; - if (async) - { - _mockStreamFactory.Setup(f => f.CreateStreamAsync(_endPoint, CancellationToken.None)) - .Returns(Task.FromResult<Stream>(stream)); - _subject.OpenAsync(CancellationToken.None).GetAwaiter().GetResult(); - _capturedEvents.Clear(); + _mockStreamFactory.Setup(f => f.CreateStreamAsync(_endPoint, It.IsAny<CancellationToken>())) + .ReturnsAsync(stream); + _mockStreamFactory.Setup(f => f.CreateStream(_endPoint, It.IsAny<CancellationToken>())) + .Returns(stream); + await _subject.OpenAsync(OperationContext.NoTimeout); + _capturedEvents.Clear(); - receiveMessageTask = _subject.ReceiveMessageAsync(10, encoderSelector, _messageEncoderSettings, CancellationToken.None); - } - else - { - _mockStreamFactory.Setup(f => f.CreateStream(_endPoint, CancellationToken.None)) - .Returns(stream); - _subject.Open(CancellationToken.None); - _capturedEvents.Clear(); + var encoderSelector = new ReplyMessageEncoderSelector<BsonDocument>(BsonDocumentSerializer.Instance); - receiveMessageTask = Task.Run(() => _subject.ReceiveMessage(10, encoderSelector, _messageEncoderSettings, CancellationToken.None)); - } + var receiveMessageTask = async ? + _subject.ReceiveMessageAsync(OperationContext.NoTimeout, 10, encoderSelector, _messageEncoderSettings) : + Task.Run(() => _subject.ReceiveMessage(OperationContext.NoTimeout, 10, encoderSelector, _messageEncoderSettings)); receiveMessageTask.IsCompleted.Should().BeFalse(); var messageToReceive = MessageHelper.BuildReply<BsonDocument>(new BsonDocument(), BsonDocumentSerializer.Instance, responseTo: 10); - MessageHelper.WriteResponsesToStream(stream, new[] { messageToReceive }); + MessageHelper.WriteResponsesToStream(stream, messageToReceive); - var received = receiveMessageTask.GetAwaiter().GetResult(); + var received = await receiveMessageTask; var expected = MessageHelper.TranslateMessagesToBsonDocuments(new[] { messageToReceive }); var actual = MessageHelper.TranslateMessagesToBsonDocuments(new[] { received }); @@ -561,69 +500,8 @@ public void ReceiveMessage_should_complete_when_reply_is_not_already_on_the_stre } } - [Theory] - [ParameterAttributeData] - public void ReceiveMessage_should_handle_out_of_order_replies( - [Values(false, true)] - bool async1, - [Values(false, true)] - bool async2) - { - using (var stream = new BlockingMemoryStream()) - { - _mockStreamFactory.Setup(f => f.CreateStream(_endPoint, CancellationToken.None)) - .Returns(stream); - _subject.Open(CancellationToken.None); - _capturedEvents.Clear(); - - var encoderSelector = new ReplyMessageEncoderSelector<BsonDocument>(BsonDocumentSerializer.Instance); - - Task<ResponseMessage> receivedTask10; - if (async1) - { - receivedTask10 = _subject.ReceiveMessageAsync(10, encoderSelector, _messageEncoderSettings, CancellationToken.None); - } - else - { - receivedTask10 = Task.Run(() => _subject.ReceiveMessage(10, encoderSelector, _messageEncoderSettings, CancellationToken.None)); - } - - Task<ResponseMessage> receivedTask11; - if (async2) - { - receivedTask11 = _subject.ReceiveMessageAsync(11, encoderSelector, _messageEncoderSettings, CancellationToken.None); - } - else - { - receivedTask11 = Task.Run(() => _subject.ReceiveMessage(11, encoderSelector, _messageEncoderSettings, CancellationToken.None)); - } - - SpinWait.SpinUntil(() => _capturedEvents.Count >= 2, TimeSpan.FromSeconds(5)).Should().BeTrue(); - - var messageToReceive10 = MessageHelper.BuildReply<BsonDocument>(new BsonDocument("_id", 10), BsonDocumentSerializer.Instance, responseTo: 10); - var messageToReceive11 = MessageHelper.BuildReply<BsonDocument>(new BsonDocument("_id", 11), BsonDocumentSerializer.Instance, responseTo: 11); - MessageHelper.WriteResponsesToStream(stream, new[] { messageToReceive11, messageToReceive10 }); // out of order - - var received10 = receivedTask10.GetAwaiter().GetResult(); - var received11 = receivedTask11.GetAwaiter().GetResult(); - - var expected = MessageHelper.TranslateMessagesToBsonDocuments(new[] { messageToReceive10, messageToReceive11 }); - var actual = MessageHelper.TranslateMessagesToBsonDocuments(new[] { received10, received11 }); - - actual.Should().BeEquivalentTo(expected); - - _capturedEvents.Next().Should().BeOfType<ConnectionReceivingMessageEvent>(); - _capturedEvents.Next().Should().BeOfType<ConnectionReceivingMessageEvent>(); - _capturedEvents.Next().Should().BeOfType<ConnectionReceivedMessageEvent>(); - _capturedEvents.Next().Should().BeOfType<ConnectionReceivedMessageEvent>(); - _capturedEvents.Any().Should().BeFalse(); - } - } - - [Theory] - [ParameterAttributeData] - public void ReceiveMessage_should_not_produce_unobserved_task_exceptions_on_fail( - [Values(false, true)] bool async) + [Fact] + public async Task ReceiveMessage_should_not_produce_unobserved_task_exceptions_on_fail() { var unobservedTaskExceptionRaised = false; var mockStream = new Mock<Stream>(); @@ -644,30 +522,13 @@ public void ReceiveMessage_should_not_produce_unobserved_task_exceptions_on_fail .Setup(f => f.CreateStream(_endPoint, CancellationToken.None)) .Returns(mockStream.Object); - if (async) - { - mockStream - .Setup(s => s.ReadAsync(It.IsAny<byte[]>(), It.IsAny<int>(), It.IsAny<int>(), It.IsAny<CancellationToken>())) - .Throws(new SocketException()); - } - else - { - mockStream - .Setup(s => s.Read(It.IsAny<byte[]>(), It.IsAny<int>(), It.IsAny<int>())) - .Throws(new SocketException()); - } + var tcs = new TaskCompletionSource<int>(); + tcs.SetException(new SocketException()); + SetupStreamRead(mockStream, tcs); - _subject.Open(CancellationToken.None); + _subject.Open(OperationContext.NoTimeout); - Exception exception; - if (async) - { - exception = Record.Exception(() => _subject.ReceiveMessageAsync(1, encoderSelector, _messageEncoderSettings, CancellationToken.None).GetAwaiter().GetResult()); - } - else - { - exception = Record.Exception(() => _subject.ReceiveMessage(1, encoderSelector, _messageEncoderSettings, CancellationToken.None)); - } + var exception = await Record.ExceptionAsync(() => _subject.ReceiveMessageAsync(OperationContext.NoTimeout, 1, encoderSelector, _messageEncoderSettings)); exception.Should().BeOfType<MongoConnectionException>(); GC.Collect(); // Collects the unobserved tasks @@ -682,75 +543,56 @@ public void ReceiveMessage_should_not_produce_unobserved_task_exceptions_on_fail } } - [Theory] - [ParameterAttributeData] - public void ReceiveMessage_should_throw_network_exception_to_all_awaiters( - [Values(false, true)] - bool async1, - [Values(false, true)] - bool async2) + [Fact] + public async Task ReceiveMessageAsync_should_not_produce_unobserved_task_exceptions_on_timeout() { + GC.Collect(); // Collects the unobserved tasks + GC.WaitForPendingFinalizers(); // Assures finalizers are executed + + Exception ex = null; var mockStream = new Mock<Stream>(); - using (mockStream.Object) + EventHandler<UnobservedTaskExceptionEventArgs> eventHandler = (s, args) => { - var encoderSelector = new ReplyMessageEncoderSelector<BsonDocument>(BsonDocumentSerializer.Instance); - - _mockStreamFactory.Setup(f => f.CreateStream(_endPoint, CancellationToken.None)) - .Returns(mockStream.Object); - var readTcs = new TaskCompletionSource<int>(); - mockStream.Setup(s => s.Read(It.IsAny<byte[]>(), It.IsAny<int>(), It.IsAny<int>())) - .Returns(() => readTcs.Task.GetAwaiter().GetResult()); - mockStream.Setup(s => s.ReadAsync(It.IsAny<byte[]>(), It.IsAny<int>(), It.IsAny<int>(), It.IsAny<CancellationToken>())) - .Returns(readTcs.Task); - _subject.Open(CancellationToken.None); - _capturedEvents.Clear(); - - Task task1; - if (async1) - { - task1 = _subject.ReceiveMessageAsync(1, encoderSelector, _messageEncoderSettings, It.IsAny<CancellationToken>()); - } - else - { - task1 = Task.Run(() => _subject.ReceiveMessage(1, encoderSelector, _messageEncoderSettings, CancellationToken.None)); - } + ex = args.Exception; + }; - Task task2; - if (async2) - { - task2 = _subject.ReceiveMessageAsync(2, encoderSelector, _messageEncoderSettings, CancellationToken.None); - } - else - { - task2 = Task.Run(() => _subject.ReceiveMessage(2, encoderSelector, _messageEncoderSettings, CancellationToken.None)); - } + try + { + TaskScheduler.UnobservedTaskException += eventHandler; + var encoderSelector = new ReplyMessageEncoderSelector<BsonDocument>(BsonDocumentSerializer.Instance); - SpinWait.SpinUntil(() => _capturedEvents.Count >= 2, TimeSpan.FromSeconds(5)).Should().BeTrue(); + _mockStreamFactory + .Setup(f => f.CreateStream(_endPoint, It.IsAny<CancellationToken>())) + .Returns(mockStream.Object); - readTcs.SetException(new SocketException()); + var tcs = new TaskCompletionSource<int>(); + SetupStreamRead(mockStream, tcs); + _subject.Open(OperationContext.NoTimeout); - Func<Task> act1 = () => task1; - act1.ShouldThrow<MongoConnectionException>() - .WithInnerException<SocketException>() - .And.ConnectionId.Should().Be(_subject.ConnectionId); + var exception = await Record.ExceptionAsync(() => _subject.ReceiveMessageAsync(OperationContext.NoTimeout, 1, encoderSelector, _messageEncoderSettings)); + exception.Should().BeOfType<MongoConnectionException>(); + exception.InnerException.Should().BeOfType<TimeoutException>(); - Func<Task> act2 = () => task2; - act2.ShouldThrow<MongoConnectionException>() - .WithInnerException<SocketException>() - .And.ConnectionId.Should().Be(_subject.ConnectionId); + tcs = null; + mockStream.Reset(); + GC.Collect(); // Collects the unobserved tasks + GC.WaitForPendingFinalizers(); // Assures finalizers are executed - _capturedEvents.Next().Should().BeOfType<ConnectionReceivingMessageEvent>(); - _capturedEvents.Next().Should().BeOfType<ConnectionReceivingMessageEvent>(); - _capturedEvents.Next().Should().BeOfType<ConnectionFailedEvent>(); - _capturedEvents.Next().Should().BeOfType<ConnectionReceivingMessageFailedEvent>(); - _capturedEvents.Next().Should().BeOfType<ConnectionReceivingMessageFailedEvent>(); - _capturedEvents.Any().Should().BeFalse(); + if (ex != null) + { + Assert.Fail($"{ex.Message} - {ex}"); + } + } + finally + { + TaskScheduler.UnobservedTaskException -= eventHandler; + mockStream.Object?.Dispose(); } } [Theory] [ParameterAttributeData] - public void ReceiveMessage_should_throw_MongoConnectionClosedException_when_connection_has_failed( + public async Task ReceiveMessage_should_throw_MongoConnectionClosedException_when_connection_has_failed( [Values(false, true)] bool async1, [Values(false, true)] @@ -759,45 +601,29 @@ public void ReceiveMessage_should_throw_MongoConnectionClosedException_when_conn var mockStream = new Mock<Stream>(); using (mockStream.Object) { - _mockStreamFactory.Setup(f => f.CreateStream(_endPoint, CancellationToken.None)) - .Returns(mockStream.Object); + _mockStreamFactory.Setup(f => f.CreateStreamAsync(_endPoint, It.IsAny<CancellationToken>())).ReturnsAsync(mockStream.Object); var readTcs = new TaskCompletionSource<int>(); readTcs.SetException(new SocketException()); - mockStream.Setup(s => s.Read(It.IsAny<byte[]>(), It.IsAny<int>(), It.IsAny<int>())) - .Returns(() => readTcs.Task.GetAwaiter().GetResult()); - mockStream.Setup(s => s.ReadAsync(It.IsAny<byte[]>(), It.IsAny<int>(), It.IsAny<int>(), It.IsAny<CancellationToken>())) - .Returns(readTcs.Task); - _subject.Open(CancellationToken.None); + SetupStreamRead(mockStream, readTcs); + await _subject.OpenAsync(OperationContext.NoTimeout); _capturedEvents.Clear(); var encoderSelector = new ReplyMessageEncoderSelector<BsonDocument>(BsonDocumentSerializer.Instance); - Action act1; - if (async1) - { - act1 = () => _subject.ReceiveMessageAsync(1, encoderSelector, _messageEncoderSettings, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act1 = () => _subject.ReceiveMessage(1, encoderSelector, _messageEncoderSettings, CancellationToken.None); - } + var exception1 = async1 ? + await Record.ExceptionAsync(() => _subject.ReceiveMessageAsync(OperationContext.NoTimeout, 1, encoderSelector, _messageEncoderSettings)) : + Record.Exception(() => _subject.ReceiveMessage(OperationContext.NoTimeout, 1, encoderSelector, _messageEncoderSettings)); - Action act2; - if (async2) - { - act2 = () => _subject.ReceiveMessageAsync(2, encoderSelector, _messageEncoderSettings, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act2 = () => _subject.ReceiveMessage(2, encoderSelector, _messageEncoderSettings, CancellationToken.None); - } + var exception2 = async2 ? + await Record.ExceptionAsync(() => _subject.ReceiveMessageAsync(OperationContext.NoTimeout, 2, encoderSelector, _messageEncoderSettings)) : + Record.Exception(() => _subject.ReceiveMessage(OperationContext.NoTimeout, 2, encoderSelector, _messageEncoderSettings)); - act1.ShouldThrow<MongoConnectionException>() - .WithInnerException<SocketException>() - .And.ConnectionId.Should().Be(_subject.ConnectionId); + exception1.Should().BeOfType<MongoConnectionException>().Subject + .ConnectionId.Should().Be(_subject.ConnectionId); + exception1.InnerException.Should().BeOfType<SocketException>(); - act2.ShouldThrow<MongoConnectionClosedException>() - .And.ConnectionId.Should().Be(_subject.ConnectionId); + exception2.Should().BeOfType<MongoConnectionClosedException>().Subject + .ConnectionId.Should().Be(_subject.ConnectionId); _capturedEvents.Next().Should().BeOfType<ConnectionReceivingMessageEvent>(); _capturedEvents.Next().Should().BeOfType<ConnectionFailedEvent>(); @@ -808,108 +634,103 @@ public void ReceiveMessage_should_throw_MongoConnectionClosedException_when_conn [Theory] [ParameterAttributeData] - public void SendMessages_should_throw_an_ArgumentNullException_if_messages_is_null( + public async Task SendMessage_should_throw_an_ArgumentNullException_if_message_is_null( [Values(false, true)] bool async) { - Action act; - if (async) - { - act = () => _subject.SendMessagesAsync(null, _messageEncoderSettings, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => _subject.SendMessages(null, _messageEncoderSettings, CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => _subject.SendMessageAsync(OperationContext.NoTimeout, null, _messageEncoderSettings)) : + Record.Exception(() => _subject.SendMessage(OperationContext.NoTimeout, null, _messageEncoderSettings)); - act.ShouldThrow<ArgumentNullException>(); + exception.Should().BeOfType<ArgumentNullException>(); } [Theory] [ParameterAttributeData] - public void SendMessages_should_throw_an_ObjectDisposedException_if_the_connection_is_disposed( + public async Task SendMessage_should_throw_an_ObjectDisposedException_if_the_connection_is_disposed( [Values(false, true)] bool async) { var message = MessageHelper.BuildQuery(); _subject.Dispose(); - Action act; - if (async) - { - act = () => _subject.SendMessagesAsync(new[] { message }, _messageEncoderSettings, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => _subject.SendMessages(new[] { message }, _messageEncoderSettings, CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => _subject.SendMessageAsync(OperationContext.NoTimeout, message, _messageEncoderSettings)) : + Record.Exception(() => _subject.SendMessage(OperationContext.NoTimeout, message, _messageEncoderSettings)); - act.ShouldThrow<ObjectDisposedException>(); + exception.Should().BeOfType<ObjectDisposedException>(); } [Theory] [ParameterAttributeData] - public void SendMessages_should_throw_an_InvalidOperationException_if_the_connection_is_not_open( + public async Task SendMessage_should_throw_an_InvalidOperationException_if_the_connection_is_not_open( [Values(false, true)] bool async) { var message = MessageHelper.BuildQuery(); - Action act; - if (async) - { - act = () => _subject.SendMessagesAsync(new[] { message }, _messageEncoderSettings, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => _subject.SendMessages(new[] { message }, _messageEncoderSettings, CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => _subject.SendMessageAsync(OperationContext.NoTimeout, message, _messageEncoderSettings)) : + Record.Exception(() => _subject.SendMessage(OperationContext.NoTimeout, message, _messageEncoderSettings)); - act.ShouldThrow<InvalidOperationException>(); + exception.Should().BeOfType<InvalidOperationException>(); } [Theory] [ParameterAttributeData] - public void SendMessages_should_put_the_messages_on_the_stream_and_raise_the_correct_events( + public async Task SendMessage_should_put_the_message_on_the_stream_and_raise_the_correct_events( [Values(false, true)] bool async) { using (var stream = new MemoryStream()) { - var message1 = MessageHelper.BuildQuery(query: new BsonDocument("x", 1)); - var message2 = MessageHelper.BuildQuery(query: new BsonDocument("y", 2)); + _mockStreamFactory.Setup(f => f.CreateStreamAsync(_endPoint, It.IsAny<CancellationToken>())).ReturnsAsync(stream); + _mockStreamFactory.Setup(f => f.CreateStream(_endPoint, It.IsAny<CancellationToken>())).Returns(stream); + _subject.OpenAsync(OperationContext.NoTimeout).GetAwaiter().GetResult(); + _capturedEvents.Clear(); + + var message = MessageHelper.BuildQuery(query: new BsonDocument("x", 1)); if (async) { - _mockStreamFactory.Setup(f => f.CreateStreamAsync(_endPoint, CancellationToken.None)) - .Returns(Task.FromResult<Stream>(stream)); - _subject.OpenAsync(CancellationToken.None).GetAwaiter().GetResult(); - _capturedEvents.Clear(); - - _subject.SendMessagesAsync(new[] { message1, message2 }, _messageEncoderSettings, CancellationToken.None).GetAwaiter().GetResult(); + await _subject.SendMessageAsync(OperationContext.NoTimeout, message, _messageEncoderSettings); } else { - _mockStreamFactory.Setup(f => f.CreateStream(_endPoint, CancellationToken.None)) - .Returns(stream); - _subject.Open(CancellationToken.None); - _capturedEvents.Clear(); - - _subject.SendMessages(new[] { message1, message2 }, _messageEncoderSettings, CancellationToken.None); + _subject.SendMessage(OperationContext.NoTimeout, message, _messageEncoderSettings); } - var expectedRequests = MessageHelper.TranslateMessagesToBsonDocuments(new[] { message1, message2 }); + var expectedRequests = MessageHelper.TranslateMessagesToBsonDocuments(new[] { message }); var sentRequests = MessageHelper.TranslateMessagesToBsonDocuments(stream.ToArray()); sentRequests.Should().BeEquivalentTo(expectedRequests); _capturedEvents.Next().Should().BeOfType<ConnectionSendingMessagesEvent>(); _capturedEvents.Next().Should().BeOfType<CommandStartedEvent>(); - _capturedEvents.Next().Should().BeOfType<CommandStartedEvent>(); _capturedEvents.Next().Should().BeOfType<ConnectionSentMessagesEvent>(); _capturedEvents.Any().Should().BeFalse(); } } + private void SetupStreamRead(Mock<Stream> streamMock, TaskCompletionSource<int> tcs) + { + streamMock.Setup(s => s.BeginRead(It.IsAny<byte[]>(), It.IsAny<int>(), It.IsAny<int>(), It.IsAny<AsyncCallback>(), It.IsAny<object>())) + .Returns((byte[] _, int __, int ___, AsyncCallback callback, object state) => + { + var innerTcs = new TaskCompletionSource<int>(state); + tcs.Task.ContinueWith(t => + { + innerTcs.TrySetException(t.Exception.InnerException); + callback(innerTcs.Task); + }); + return innerTcs.Task; + }); + streamMock.Setup(s => s.EndRead(It.IsAny<IAsyncResult>())) + .Returns<IAsyncResult>(x => ((Task<int>)x).GetAwaiter().GetResult()); + streamMock.Setup(s => s.ReadAsync(It.IsAny<byte[]>(), It.IsAny<int>(), It.IsAny<int>(), It.IsAny<CancellationToken>())) + .Returns(tcs.Task); + streamMock.Setup(s => s.Close()).Callback(() => tcs.TrySetException(new ObjectDisposedException("stream"))); + } + // nested type private sealed class IgnoreWritesMemoryStream : MemoryStream { diff --git a/tests/MongoDB.Driver.Tests/Core/Connections/BinaryConnection_CommandEventTests.cs b/tests/MongoDB.Driver.Tests/Core/Connections/BinaryConnection_CommandEventTests.cs index f4ddfddc9c9..fd580287d70 100644 --- a/tests/MongoDB.Driver.Tests/Core/Connections/BinaryConnection_CommandEventTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Connections/BinaryConnection_CommandEventTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,6 @@ using System; using System.Collections.Generic; using System.IO; -using System.Linq; using System.Net; using System.Threading; using System.Threading.Tasks; @@ -88,9 +87,9 @@ public BinaryConnection_CommandEventTests(ITestOutputHelper output) : base(outpu new HelloResult(new BsonDocument { { "maxWireVersion", WireVersion.Server36 } })); _mockConnectionInitializer = new Mock<IConnectionInitializer>(); - _mockConnectionInitializer.Setup(i => i.SendHelloAsync(It.IsAny<IConnection>(), CancellationToken.None)) + _mockConnectionInitializer.Setup(i => i.SendHelloAsync(It.IsAny<OperationContext>(), It.IsAny<IConnection>())) .Returns(() => Task.FromResult(new ConnectionInitializerContext(connectionDescriptionFunc(), null))); - _mockConnectionInitializer.Setup(i => i.AuthenticateAsync(It.IsAny<IConnection>(), It.IsAny<ConnectionInitializerContext>(), CancellationToken.None)) + _mockConnectionInitializer.Setup(i => i.AuthenticateAsync(It.IsAny<OperationContext>(), It.IsAny<IConnection>(), It.IsAny<ConnectionInitializerContext>())) .Returns(() => Task.FromResult(new ConnectionInitializerContext(connectionDescriptionFunc(), null))); _subject = new BinaryConnection( @@ -100,12 +99,14 @@ public BinaryConnection_CommandEventTests(ITestOutputHelper output) : base(outpu streamFactory: _mockStreamFactory.Object, connectionInitializer: _mockConnectionInitializer.Object, eventSubscriber: _capturedEvents, - LoggerFactory); + loggerFactory: LoggerFactory, + socketReadTimeout: Timeout.InfiniteTimeSpan, + socketWriteTimeout: Timeout.InfiniteTimeSpan); _stream = new BlockingMemoryStream(); - _mockStreamFactory.Setup(f => f.CreateStreamAsync(_endPoint, CancellationToken.None)) + _mockStreamFactory.Setup(f => f.CreateStreamAsync(_endPoint, It.IsAny<CancellationToken>())) .Returns(Task.FromResult<Stream>(_stream)); - _subject.OpenAsync(CancellationToken.None).Wait(); + _subject.OpenAsync(OperationContext.NoTimeout).Wait(); _capturedEvents.Clear(); _operationIdDisposer = EventContext.BeginOperation(); @@ -126,13 +127,13 @@ public void Should_process_a_command() var requestMessage = MessageHelper.BuildCommand( expectedCommand, requestId: 10); - SendMessages(requestMessage); + SendMessage(requestMessage); var replyMessage = MessageHelper.BuildReply<BsonDocument>( expectedReply, BsonDocumentSerializer.Instance, responseTo: requestMessage.RequestId); - ReceiveMessages(replyMessage); + ReceiveMessage(replyMessage); var commandStartedEvent = (CommandStartedEvent)_capturedEvents.Next(); var commandSucceededEvent = (CommandSucceededEvent)_capturedEvents.Next(); @@ -162,13 +163,13 @@ public void Should_process_a_redacted_command(string commandJson, bool shouldBeR var requestMessage = MessageHelper.BuildCommand( command, requestId: 10); - SendMessages(requestMessage); + SendMessage(requestMessage); var replyMessage = MessageHelper.BuildReply<BsonDocument>( reply, BsonDocumentSerializer.Instance, responseTo: requestMessage.RequestId); - ReceiveMessages(replyMessage); + ReceiveMessage(replyMessage); var commandStartedEvent = (CommandStartedEvent)_capturedEvents.Next(); var commandSucceededEvent = (CommandSucceededEvent)_capturedEvents.Next(); @@ -197,13 +198,13 @@ public void Should_process_a_failed_command() var requestMessage = MessageHelper.BuildCommand( expectedCommand, requestId: 10); - SendMessages(requestMessage); + SendMessage(requestMessage); var replyMessage = MessageHelper.BuildReply<BsonDocument>( expectedReply, BsonDocumentSerializer.Instance, responseTo: requestMessage.RequestId); - ReceiveMessages(replyMessage); + ReceiveMessage(replyMessage); var commandStartedEvent = (CommandStartedEvent)_capturedEvents.Next(); var commandFailedEvent = (CommandFailedEvent)_capturedEvents.Next(); @@ -233,13 +234,13 @@ public void Should_process_a_redacted_failed_command(string commandJson, bool sh var requestMessage = MessageHelper.BuildCommand( command, requestId: 10); - SendMessages(requestMessage); + SendMessage(requestMessage); var replyMessage = MessageHelper.BuildReply<BsonDocument>( reply, BsonDocumentSerializer.Instance, responseTo: requestMessage.RequestId); - ReceiveMessages(replyMessage); + ReceiveMessage(replyMessage); var commandStartedEvent = (CommandStartedEvent)_capturedEvents.Next(); var commandFailedEvent = (CommandFailedEvent)_capturedEvents.Next(); @@ -288,15 +289,14 @@ public void Should_process_a_query_without_modifiers() var requestMessage = MessageHelper.BuildQuery( (BsonDocument)expectedCommand["filter"], requestId: 10); - SendMessages(requestMessage); - + SendMessage(requestMessage); var replyMessage = MessageHelper.BuildReply<BsonDocument>( expectedReplyDocuments, BsonDocumentSerializer.Instance, responseTo: requestMessage.RequestId, cursorId: expectedReply["cursor"]["id"].ToInt64()); - ReceiveMessages(replyMessage); + ReceiveMessage(replyMessage); var commandStartedEvent = (CommandStartedEvent)_capturedEvents.Next(); var commandSucceededEvent = (CommandSucceededEvent)_capturedEvents.Next(); @@ -374,7 +374,7 @@ public void Should_process_a_query_with_modifiers() using (EventContext.BeginFind(expectedCommand["batchSize"].ToInt32(), expectedCommand["limit"].ToInt32())) { - SendMessages(requestMessage); + SendMessage(requestMessage); } var replyMessage = MessageHelper.BuildReply<BsonDocument>( @@ -382,7 +382,7 @@ public void Should_process_a_query_with_modifiers() BsonDocumentSerializer.Instance, responseTo: requestMessage.RequestId, cursorId: expectedReply["cursor"]["id"].ToInt64()); - ReceiveMessages(replyMessage); + ReceiveMessage(replyMessage); var commandStartedEvent = (CommandStartedEvent)_capturedEvents.Next(); var commandSucceededEvent = (CommandSucceededEvent)_capturedEvents.Next(); @@ -420,13 +420,13 @@ public void Should_process_a_query_with_the_explain_modifier() var requestMessage = MessageHelper.BuildQuery( query, requestId: 10); - SendMessages(requestMessage); + SendMessage(requestMessage); var replyMessage = MessageHelper.BuildReply( expectedReply, BsonDocumentSerializer.Instance, responseTo: requestMessage.RequestId); - ReceiveMessages(replyMessage); + ReceiveMessage(replyMessage); var commandStartedEvent = (CommandStartedEvent)_capturedEvents.Next(); var commandSucceededEvent = (CommandSucceededEvent)_capturedEvents.Next(); @@ -459,13 +459,12 @@ public void Should_process_a_failed_query() var requestMessage = MessageHelper.BuildQuery( (BsonDocument)expectedCommand["filter"], requestId: 10); - SendMessages(requestMessage); - + SendMessage(requestMessage); var replyMessage = MessageHelper.BuildQueryFailedReply<BsonDocument>( queryFailureDocument, requestMessage.RequestId); - ReceiveMessages(replyMessage); + ReceiveMessage(replyMessage); var commandStartedEvent = (CommandStartedEvent)_capturedEvents.Next(); var commandFailedEvent = (CommandFailedEvent)_capturedEvents.Next(); @@ -485,19 +484,16 @@ public void Should_process_a_failed_query() commandFailedEvent.RequestId.Should().Be(commandStartedEvent.RequestId); } - private void SendMessages(params RequestMessage[] messages) + private void SendMessage(RequestMessage message) { - _subject.SendMessagesAsync(messages, _messageEncoderSettings, CancellationToken.None).Wait(); + _subject.SendMessageAsync(OperationContext.NoTimeout, message, _messageEncoderSettings).Wait(); } - private void ReceiveMessages(params ReplyMessage<BsonDocument>[] messages) + private void ReceiveMessage(ReplyMessage<BsonDocument> message) { - MessageHelper.WriteResponsesToStream(_stream, messages); + MessageHelper.WriteResponsesToStream(_stream, message); var encoderSelector = new ReplyMessageEncoderSelector<BsonDocument>(BsonDocumentSerializer.Instance); - foreach (var message in messages) - { - _subject.ReceiveMessageAsync(message.ResponseTo, encoderSelector, _messageEncoderSettings, CancellationToken.None).Wait(); - } + _subject.ReceiveMessageAsync(OperationContext.NoTimeout, message.ResponseTo, encoderSelector, _messageEncoderSettings).Wait(); } } } diff --git a/tests/MongoDB.Driver.Tests/Core/Connections/ConnectionInitializerTests.cs b/tests/MongoDB.Driver.Tests/Core/Connections/ConnectionInitializerTests.cs index 63ed1e2ed24..d86d925434a 100644 --- a/tests/MongoDB.Driver.Tests/Core/Connections/ConnectionInitializerTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Connections/ConnectionInitializerTests.cs @@ -16,6 +16,7 @@ using System; using System.Net; using System.Threading; +using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.TestHelpers; @@ -42,72 +43,79 @@ public class ConnectionInitializerTests [Theory] [ParameterAttributeData] - public void ConnectionAuthentication_should_throw_an_ArgumentNullException_if_required_arguments_missed( + public async Task ConnectionAuthentication_should_throw_if_operationContext_is_null( [Values(false, true)] bool async) { var connectionInitializerContext = new ConnectionInitializerContext(__emptyConnectionDescription, null); var subject = CreateSubject(); - if (async) - { - Record.Exception(() => subject.AuthenticateAsync(null, connectionInitializerContext, CancellationToken.None).GetAwaiter().GetResult()).Should().BeOfType<ArgumentNullException>(); - Record.Exception(() => subject.AuthenticateAsync(Mock.Of<IConnection>(), null, CancellationToken.None).GetAwaiter().GetResult()).Should().BeOfType<ArgumentNullException>(); - } - else - { - Record.Exception(() => subject.Authenticate(null, connectionInitializerContext, CancellationToken.None)).Should().BeOfType<ArgumentNullException>(); - Record.Exception(() => subject.Authenticate(Mock.Of<IConnection>(), null, CancellationToken.None)).Should().BeOfType<ArgumentNullException>(); - } - } + var exception = async ? + await Record.ExceptionAsync(() => subject.AuthenticateAsync(null, Mock.Of<IConnection>(), connectionInitializerContext)) : + Record.Exception(() => subject.Authenticate(null, Mock.Of<IConnection>(), connectionInitializerContext)); - [Fact] - public void ConnectionInitializerContext_should_throw_when_description_is_null() - { - Record.Exception(() => new ConnectionInitializerContext(null, null)).Should().BeOfType<ArgumentNullException>().Which.ParamName.Should().Be("description"); + exception.Should().BeOfType<ArgumentNullException>() + .Subject.ParamName.Should().Be("operationContext"); } - [Fact] - public void ConnectionInitializerContext_should_not_throw_when_authenticator_is_null() + [Theory] + [ParameterAttributeData] + public async Task ConnectionAuthentication_should_throw_if_connection_is_null( + [Values(false, true)] bool async) { - _ = new ConnectionInitializerContext(__emptyConnectionDescription, null); + var connectionInitializerContext = new ConnectionInitializerContext(__emptyConnectionDescription, null); + var subject = CreateSubject(); + var exception = async ? + await Record.ExceptionAsync(() => subject.AuthenticateAsync(OperationContext.NoTimeout, null, connectionInitializerContext)) : + Record.Exception(() => subject.Authenticate(OperationContext.NoTimeout, null, connectionInitializerContext)); + + exception.Should().BeOfType<ArgumentNullException>() + .Subject.ParamName.Should().Be("connection"); } [Theory] [ParameterAttributeData] - public void CreateInitialHelloCommand_without_server_api_should_return_legacy_hello_with_speculativeAuthenticate( - [Values("default", "SCRAM-SHA-256", "SCRAM-SHA-1")] string authenticatorType, + public async Task ConnectionAuthentication_should_throw_if_connectionInitializerContext_is_null( [Values(false, true)] bool async) { - var identity = new MongoExternalIdentity(source: "Pathfinder", username: "Barclay"); - var evidence = new PasswordEvidence("Barclay-Alpha-1-7-Gamma"); - var authenticator = CreateAuthenticator(authenticatorType, identity, evidence); - var subject = CreateSubject(); - var helloDocument = subject.CreateInitialHelloCommand(authenticator, false); + var exception = async ? + await Record.ExceptionAsync(() => subject.AuthenticateAsync(OperationContext.NoTimeout, Mock.Of<IConnection>(), null)) : + Record.Exception(() => subject.Authenticate(OperationContext.NoTimeout, Mock.Of<IConnection>(), null)); - helloDocument.Should().Contain(OppressiveLanguageConstants.LegacyHelloCommandName); - helloDocument.Should().Contain("speculativeAuthenticate"); - var speculativeAuthenticateDocument = helloDocument["speculativeAuthenticate"].AsBsonDocument; - speculativeAuthenticateDocument.Should().Contain("mechanism"); - var expectedMechanism = new BsonString( - authenticatorType == "default" ? "SCRAM-SHA-256" : authenticatorType); - speculativeAuthenticateDocument["mechanism"].Should().Be(expectedMechanism); - speculativeAuthenticateDocument["db"].Should().Be(new BsonString(identity.Source)); + exception.Should().BeOfType<ArgumentNullException>() + .Subject.ParamName.Should().Be("connectionInitializerContext"); + } + + [Fact] + public void ConnectionInitializerContext_should_throw_when_description_is_null() + { + var exception = Record.Exception(() => new ConnectionInitializerContext(null, Mock.Of<IAuthenticator>())); + + exception.Should().BeOfType<ArgumentNullException>() + .Subject.ParamName.Should().Be("description"); + } + + [Fact] + public void ConnectionInitializerContext_should_not_throw_when_authenticator_is_null() + { + _ = new ConnectionInitializerContext(__emptyConnectionDescription, null); } [Theory] [ParameterAttributeData] - public void CreateInitialHelloCommand_with_server_api_should_return_hello_with_speculativeAuthenticate( + public void CreateInitialHelloCommand_should_return_expected_hello_with_speculativeAuthenticate( [Values("default", "SCRAM-SHA-256", "SCRAM-SHA-1")] string authenticatorType, - [Values(false, true)] bool async) + [Values(true, false)] bool withServerApi, + [Values(true, false)] bool loadBalanced) { var identity = new MongoExternalIdentity(source: "Pathfinder", username: "Barclay"); var evidence = new PasswordEvidence("Barclay-Alpha-1-7-Gamma"); var authenticator = CreateAuthenticator(authenticatorType, identity, evidence); - var subject = new ConnectionInitializer("test", new[] { new CompressorConfiguration(CompressorType.Zlib) }, serverApi: new ServerApi(ServerApiVersion.V1), null); - var helloDocument = subject.CreateInitialHelloCommand(authenticator, false); + var subject = CreateSubject(withServerApi); + var helloDocument = subject.CreateInitialHelloCommand(authenticator, loadBalanced); - helloDocument.Should().Contain("hello"); + var expectedHelloCommand = withServerApi || loadBalanced ? "hello" : OppressiveLanguageConstants.LegacyHelloCommandName; + helloDocument.Should().Contain(expectedHelloCommand); helloDocument.Should().Contain("speculativeAuthenticate"); var speculativeAuthenticateDocument = helloDocument["speculativeAuthenticate"].AsBsonDocument; speculativeAuthenticateDocument.Should().Contain("mechanism"); @@ -119,42 +127,31 @@ public void CreateInitialHelloCommand_with_server_api_should_return_hello_with_s [Theory] [ParameterAttributeData] - public void CreateInitialHelloCommand_without_server_api_but_with_load_balancing_should_return_hello_with_speculativeAuthenticate( - [Values("default", "SCRAM-SHA-256", "SCRAM-SHA-1")] string authenticatorType, + public async Task Handshake_should_throw_if_operationContext_is_null( [Values(false, true)] bool async) { - var identity = new MongoExternalIdentity(source: "Pathfinder", username: "Barclay"); - var evidence = new PasswordEvidence("Barclay-Alpha-1-7-Gamma"); - var authenticator = CreateAuthenticator(authenticatorType, identity, evidence); - var subject = CreateSubject(); - var helloDocument = subject.CreateInitialHelloCommand(authenticator, true); - - helloDocument.Should().Contain("hello"); - helloDocument.Should().Contain("speculativeAuthenticate"); - var speculativeAuthenticateDocument = helloDocument["speculativeAuthenticate"].AsBsonDocument; - speculativeAuthenticateDocument.Should().Contain("mechanism"); - var expectedMechanism = new BsonString( - authenticatorType == "default" ? "SCRAM-SHA-256" : authenticatorType); - speculativeAuthenticateDocument["mechanism"].Should().Be(expectedMechanism); - speculativeAuthenticateDocument["db"].Should().Be(new BsonString(identity.Source)); + var exception = async ? + await Record.ExceptionAsync(() => subject.SendHelloAsync(null, Mock.Of<IConnection>())) : + Record.Exception(() => subject.SendHello(null, Mock.Of<IConnection>())); + exception.Should().BeOfType<ArgumentNullException>(); } [Theory] [ParameterAttributeData] - public void Handshake_should_throw_an_ArgumentNullException_if_the_connection_is_null( + public async Task Handshake_should_throw_if_connection_is_null( [Values(false, true)] bool async) { var subject = CreateSubject(); - var exception = async - ? Record.Exception(() => subject.SendHelloAsync(null, CancellationToken.None).GetAwaiter().GetResult()) - : Record.Exception(() => subject.SendHello(null, CancellationToken.None)); + var exception = async ? + await Record.ExceptionAsync(() => subject.SendHelloAsync(OperationContext.NoTimeout, null)) : + Record.Exception(() => subject.SendHello(OperationContext.NoTimeout, null)); exception.Should().BeOfType<ArgumentNullException>(); } [Theory] [ParameterAttributeData] - public void InitializeConnection_should_acquire_connectionId_from_hello_response( + public async Task InitializeConnection_should_acquire_connectionId_from_hello_response( [Values(1, int.MaxValue, (long)int.MaxValue + 1, long.MaxValue, 1d, (double)int.MaxValue+1, (double)int.MaxValue*4)] object serverConnectionId, [Values(false, true)] bool async) { @@ -165,7 +162,7 @@ public void InitializeConnection_should_acquire_connectionId_from_hello_response connection.EnqueueCommandResponseMessage(helloResponse); var subject = CreateSubject(withServerApi: true); - var result = InitializeConnection(subject, connection, async, CancellationToken.None); + var result = await InitializeConnection(subject, connection, async); var sentMessages = connection.GetSentMessages(); sentMessages.Should().HaveCount(1); @@ -174,7 +171,7 @@ public void InitializeConnection_should_acquire_connectionId_from_hello_response [Theory] [ParameterAttributeData] - public void InitializeConnection_should_acquire_connectionId_from_legacy_hello_response( + public async Task InitializeConnection_should_acquire_connectionId_from_legacy_hello_response( [Values(1, int.MaxValue, (long)int.MaxValue + 1, long.MaxValue, 1d, (double)int.MaxValue+1, (double)int.MaxValue*4)] object serverConnectionId, [Values(false, true)] bool async) { @@ -185,7 +182,7 @@ public void InitializeConnection_should_acquire_connectionId_from_legacy_hello_r connection.EnqueueReplyMessage(legacyHelloReply); var subject = CreateSubject(); - var result = InitializeConnection(subject, connection, async, CancellationToken.None); + var result = await InitializeConnection(subject, connection, async); var sentMessages = connection.GetSentMessages(); sentMessages.Should().HaveCount(1); @@ -194,7 +191,7 @@ public void InitializeConnection_should_acquire_connectionId_from_legacy_hello_r [Theory] [ParameterAttributeData] - public void InitializeConnection_should_call_Authenticator_CustomizeInitialHelloCommand( + public async Task InitializeConnection_should_call_Authenticator_CustomizeInitialHelloCommand( [Values("default", "SCRAM-SHA-256", "SCRAM-SHA-1")] string authenticatorType, [Values(false, true)] bool async) { @@ -209,14 +206,8 @@ public void InitializeConnection_should_call_Authenticator_CustomizeInitialHello var subject = CreateSubject(); // We expect authentication to fail since we have not enqueued the expected authentication replies - try - { - _ = InitializeConnection(subject, connection, async, CancellationToken.None); - } - catch (InvalidOperationException ex) - { - ex.Message.Should().Be("Queue empty."); - } + var exception = await Record.ExceptionAsync(() => InitializeConnection(subject, connection, async)); + exception.Message.Should().Be("Queue empty."); var sentMessages = connection.GetSentMessages(); var legacyHelloQuery = (QueryMessage)sentMessages[0]; @@ -233,7 +224,7 @@ public void InitializeConnection_should_call_Authenticator_CustomizeInitialHello [Theory] [ParameterAttributeData] - public void InitializeConnection_with_serverApi_should_send_hello([Values(false, true)] bool async) + public async Task InitializeConnection_with_serverApi_should_send_hello([Values(false, true)] bool async) { var serverApi = new ServerApi(ServerApiVersion.V1, true, true); @@ -243,7 +234,7 @@ public void InitializeConnection_with_serverApi_should_send_hello([Values(false, var subject = new ConnectionInitializer("test", new[] { new CompressorConfiguration(CompressorType.Zlib) }, serverApi, null); - var result = InitializeConnection(subject, connection, async, CancellationToken.None); + var result = await InitializeConnection(subject, connection, async); result.ConnectionId.LongServerValue.Should().Be(1); @@ -262,7 +253,7 @@ public void InitializeConnection_with_serverApi_should_send_hello([Values(false, [Theory] [ParameterAttributeData] - public void InitializeConnection_without_serverApi_should_send_legacy_hello([Values(false, true)] bool async) + public async Task InitializeConnection_without_serverApi_should_send_legacy_hello([Values(false, true)] bool async) { var connection = new MockConnection(__serverId); var helloReply = RawBsonDocumentHelper.FromJson($"{{ ok : 1, connectionId : 1, maxWireVersion : {WireVersion.Server42} }}"); @@ -270,7 +261,7 @@ public void InitializeConnection_without_serverApi_should_send_legacy_hello([Val var subject = CreateSubject(); - var result = InitializeConnection(subject, connection, async, CancellationToken.None); + var result = await InitializeConnection(subject, connection, async); result.ConnectionId.LongServerValue.Should().Be(1); @@ -288,7 +279,7 @@ public void InitializeConnection_without_serverApi_should_send_legacy_hello([Val [Theory] [ParameterAttributeData] - public void InitializeConnection_without_serverApi_but_with_loadBalancing_should_send_hello([Values(false, true)] bool async) + public async Task InitializeConnection_without_serverApi_but_with_loadBalancing_should_send_hello([Values(false, true)] bool async) { var connection = new MockConnection(__serverId, new ConnectionSettings(loadBalanced:true), null); var helloReply = RawBsonDocumentHelper.FromJson($"{{ ok : 1, connectionId : 1, maxWireVersion : {WireVersion.Server42}, serviceId : '{ObjectId.GenerateNewId()}' }}"); @@ -296,7 +287,7 @@ public void InitializeConnection_without_serverApi_but_with_loadBalancing_should var subject = CreateSubject(); - var result = InitializeConnection(subject, connection, async, CancellationToken.None); + var result = await InitializeConnection(subject, connection, async); result.ConnectionId.LongServerValue.Should().Be(1); @@ -315,7 +306,7 @@ public void InitializeConnection_without_serverApi_but_with_loadBalancing_should [Theory] [ParameterAttributeData] - public void InitializeConnection_should_build_the_ConnectionDescription_correctly( + public async Task InitializeConnection_should_build_the_ConnectionDescription_correctly( [Values("noop", "zlib", "snappy", "zstd")] string compressorType, [Values(false, true)] bool async) { @@ -328,7 +319,7 @@ public void InitializeConnection_should_build_the_ConnectionDescription_correctl connection.EnqueueCommandResponseMessage(gleReply); var subject = CreateSubject(); - var result = InitializeConnection(subject, connection, async, CancellationToken.None); + var result = await InitializeConnection(subject, connection, async); result.MaxWireVersion.Should().Be(6); result.ConnectionId.LongServerValue.Should().Be(10); @@ -351,7 +342,7 @@ CompressorType ToCompressorTypeEnum(string ct) [Theory] [ParameterAttributeData] - public void InitializeConnection_should_switch_command_wire_protocol_after_handshake_if_OP_MSG_is_supported( + public async Task InitializeConnection_should_switch_command_wire_protocol_after_handshake_if_OP_MSG_is_supported( [Values(false, true)] bool async) { var legacyHelloReply = MessageHelper.BuildReply( @@ -366,14 +357,9 @@ public void InitializeConnection_should_switch_command_wire_protocol_after_hands var subject = CreateSubject(); // We expect authentication to fail since we have not enqueued the expected authentication replies - try - { - _ = InitializeConnection(subject, connection, async, CancellationToken.None); - } - catch (InvalidOperationException ex) - { - ex.Message.Should().Be("Queue empty."); - } + var exception = await Record.ExceptionAsync(() => InitializeConnection(subject, connection, async)); + exception.Message.Should().Be("Queue empty."); + var sentMessages = MessageHelper.TranslateMessagesToBsonDocuments(connection.GetSentMessages()); sentMessages.Count.Should().Be(2); @@ -417,20 +403,22 @@ private ConnectionInitializer CreateSubject(bool withServerApi = false) => serverApi: withServerApi ? new ServerApi(ServerApiVersion.V1) : null, libraryInfo: null); - private ConnectionDescription InitializeConnection(ConnectionInitializer connectionInitializer, MockConnection connection, bool async, CancellationToken cancellationToken) + private async Task<ConnectionDescription> InitializeConnection(ConnectionInitializer connectionInitializer, MockConnection connection, bool async) { ConnectionInitializerContext connectionInitializerContext; if (async) { - connectionInitializerContext = connectionInitializer.SendHelloAsync(connection, cancellationToken).GetAwaiter().GetResult(); + connectionInitializerContext = await connectionInitializer.SendHelloAsync(OperationContext.NoTimeout, connection); connection.Description = connectionInitializerContext.Description; - return connectionInitializer.AuthenticateAsync(connection, connectionInitializerContext, cancellationToken).GetAwaiter().GetResult().Description; + connectionInitializerContext = await connectionInitializer.AuthenticateAsync(OperationContext.NoTimeout, connection, connectionInitializerContext); + return connectionInitializerContext.Description; } else { - connectionInitializerContext = connectionInitializer.SendHello(connection, cancellationToken); + connectionInitializerContext = connectionInitializer.SendHello(OperationContext.NoTimeout, connection); connection.Description = connectionInitializerContext.Description; - return connectionInitializer.Authenticate(connection, connectionInitializerContext, cancellationToken).Description; + connectionInitializerContext = connectionInitializer.Authenticate(OperationContext.NoTimeout, connection, connectionInitializerContext); + return connectionInitializerContext.Description; } } } @@ -441,6 +429,6 @@ public static BsonDocument CreateInitialHelloCommand( this ConnectionInitializer initializer, IAuthenticator authenticator, bool loadBalanced) => - (BsonDocument)Reflector.Invoke(initializer, nameof(CreateInitialHelloCommand), authenticator, loadBalanced, CancellationToken.None); + (BsonDocument)Reflector.Invoke(initializer, nameof(CreateInitialHelloCommand), OperationContext.NoTimeout, authenticator, loadBalanced); } } diff --git a/tests/MongoDB.Driver.Tests/Core/Connections/HelloHelperTests.cs b/tests/MongoDB.Driver.Tests/Core/Connections/HelloHelperTests.cs index c16c62505ff..26782c91b55 100644 --- a/tests/MongoDB.Driver.Tests/Core/Connections/HelloHelperTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Connections/HelloHelperTests.cs @@ -44,7 +44,7 @@ public void CreateCommand_should_return_correct_hello_command(bool useServerApiV [Theory] [ParameterAttributeData] public void AddClientDocumentToCommand_with_custom_document_should_return_expected_result( - [Values("{ client : { driver : 'dotnet', version : '4.0.0' }, os : { type : 'Windows' } }")] + [Values("{ client : { driver : 'dotnet', version : '4.2.0' }, os : { type : 'Windows' } }")] string clientDocumentString) { var clientDocument = BsonDocument.Parse(clientDocumentString); diff --git a/tests/MongoDB.Driver.Tests/Core/Connections/Socks5AuthenticationSettingsTests.cs b/tests/MongoDB.Driver.Tests/Core/Connections/Socks5AuthenticationSettingsTests.cs new file mode 100644 index 00000000000..423f33967f8 --- /dev/null +++ b/tests/MongoDB.Driver.Tests/Core/Connections/Socks5AuthenticationSettingsTests.cs @@ -0,0 +1,114 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using FluentAssertions; +using Xunit; + +namespace MongoDB.Driver.Core.Connections; + +public class Socks5AuthenticationSettingsTests +{ + // 3 bytes * 86 = 258 bytes length when UTF8 encoded + private const string TooLong = + "€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€€"; + + [Fact] + public void None_should_return_NoAuthenticationSettings_instance() + { + var none = Socks5AuthenticationSettings.None; + none.Should().BeOfType<Socks5AuthenticationSettings.NoAuthenticationSettings>(); + } + + [Fact] + public void UsernamePassword_should_return_UsernamePasswordAuthenticationSettings_instance_with_correct_values() + { + var up = Socks5AuthenticationSettings.UsernamePassword("user", "pass"); + var upcast = up.Should().BeOfType<Socks5AuthenticationSettings.UsernamePasswordAuthenticationSettings>().Subject; + upcast.Username.Should().Be("user"); + upcast.Password.Should().Be("pass"); + } + + [Theory] + [InlineData(null)] + [InlineData("")] + public void UsernamePassword_should_throw_when_username_is_null_or_empty(string username) + { + var ex = Record.Exception(() => Socks5AuthenticationSettings.UsernamePassword(username, "pass")); + var argumentEx = ex.Should().BeAssignableTo<ArgumentException>().Subject; + argumentEx.ParamName.Should().Contain("username"); + } + + [Fact] + public void UsernamePassword_should_throw_when_username_is_too_long() + { + var ex = Record.Exception(() => Socks5AuthenticationSettings.UsernamePassword(TooLong, "password")); + var argumentEx = ex.Should().BeAssignableTo<ArgumentException>().Subject; + argumentEx.ParamName.Should().Contain("username"); + } + + [Theory] + [InlineData(null)] + [InlineData("")] + public void UsernamePassword_should_throw_when_password_is_null_or_empty(string password) + { + var ex = Record.Exception(() => Socks5AuthenticationSettings.UsernamePassword("username", password)); + var argumentEx = ex.Should().BeAssignableTo<ArgumentException>().Subject; + argumentEx.ParamName.Should().Contain("password"); + } + + [Fact] + public void UsernamePassword_should_throw_when_password_is_too_long() + { + var ex = Record.Exception(() => Socks5AuthenticationSettings.UsernamePassword("username", TooLong)); + var argumentEx = ex.Should().BeAssignableTo<ArgumentException>().Subject; + argumentEx.ParamName.Should().Contain("password"); + } + + [Fact] + public void NoAuthenticationSettings_Equals_and_GetHashCode_should_work_correctly() + { + var none = Socks5AuthenticationSettings.None; + none.Equals(Socks5AuthenticationSettings.None).Should().BeTrue(); + none.GetHashCode().Should().Be(Socks5AuthenticationSettings.None.GetHashCode()); + + var up = Socks5AuthenticationSettings.UsernamePassword("a", "b"); + none.Equals(up).Should().BeFalse(); + none.GetHashCode().Should().NotBe(up.GetHashCode()); + } + + [Theory] + [InlineData("u", "p", "u", "p", true)] + [InlineData("u", "p", "u", "x", false)] + [InlineData("u", "p", "x", "p", false)] + public void UsernamePasswordAuthenticationSettings_Equals_and_GetHashCode_should_work_correctly_with_UsernamePassword(string u1, string p1, string u2, string p2, bool areEqual) + { + var up1 = Socks5AuthenticationSettings.UsernamePassword(u1, p1); + + var up2 = Socks5AuthenticationSettings.UsernamePassword(u2, p2); + up1.Equals(up2).Should().Be(areEqual); + up1.GetHashCode().Equals(up2.GetHashCode()).Should().Be(areEqual); + } + + [Fact] + public void UsernamePasswordAuthenticationSettings_Equals_and_GetHashCode_should_work_correctly_with_None() + { + var up1 = Socks5AuthenticationSettings.UsernamePassword("u1", "p1"); + + var none = Socks5AuthenticationSettings.None; + up1.Equals(none).Should().BeFalse(); + up1.GetHashCode().Should().NotBe(none.GetHashCode()); + } +} \ No newline at end of file diff --git a/tests/MongoDB.Driver.Tests/Core/Connections/Socks5ProxySettingsTests.cs b/tests/MongoDB.Driver.Tests/Core/Connections/Socks5ProxySettingsTests.cs new file mode 100644 index 00000000000..5ee1978839d --- /dev/null +++ b/tests/MongoDB.Driver.Tests/Core/Connections/Socks5ProxySettingsTests.cs @@ -0,0 +1,133 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using FluentAssertions; +using Xunit; + +namespace MongoDB.Driver.Core.Connections; + +public class Socks5ProxySettingsTests +{ + [Fact] + public void Constructor_should_set_properties_correctly_with_host_only() + { + var settings = new Socks5ProxySettings("localhost"); + settings.Host.Should().Be("localhost"); + settings.Port.Should().Be(1080); + settings.Authentication.Should().Be(Socks5AuthenticationSettings.None); + } + + [Fact] + public void Constructor_should_set_properties_correctly_with_host_and_port() + { + var settings = new Socks5ProxySettings("localhost", 1234); + settings.Host.Should().Be("localhost"); + settings.Port.Should().Be(1234); + settings.Authentication.Should().Be(Socks5AuthenticationSettings.None); + } + + [Fact] + public void Constructor_should_set_properties_correctly_with_host_and_authentication() + { + var auth = Socks5AuthenticationSettings.UsernamePassword("user", "pass"); + var settings = new Socks5ProxySettings("localhost", auth); + settings.Host.Should().Be("localhost"); + settings.Port.Should().Be(1080); + settings.Authentication.Should().Be(auth); + } + + [Fact] + public void Constructor_should_set_properties_correctly_with_host_port_and_authentication() + { + var auth = Socks5AuthenticationSettings.UsernamePassword("user", "pass"); + var settings = new Socks5ProxySettings("localhost", 1234, auth); + settings.Host.Should().Be("localhost"); + settings.Port.Should().Be(1234); + settings.Authentication.Should().Be(auth); + } + + [Theory] + [InlineData(null)] + [InlineData("")] + public void Constructor_should_throw_when_host_is_null_or_empty(string host) + { + var ex = Record.Exception(() => new Socks5ProxySettings(host)); + ex.Should().BeAssignableTo<ArgumentException>(); + } + + [Fact] + public void Constructor_should_throw_when_host_is_too_long() + { + var host = new string('a', 256); + var ex = Record.Exception(() => new Socks5ProxySettings(host)); + ex.Should().BeAssignableTo<ArgumentException>(); + } + + [Theory] + [InlineData(0)] + [InlineData(65536)] + public void Constructor_should_throw_when_port_is_out_of_range(int port) + { + var ex = Record.Exception(() => new Socks5ProxySettings("localhost", port)); + ex.Should().BeOfType<ArgumentOutOfRangeException>(); + } + + [Fact] + public void Constructor_should_throw_when_authentication_is_null() + { + var ex = Record.Exception(() => new Socks5ProxySettings("localhost", 1080, null)); + ex.Should().BeOfType<ArgumentNullException>(); + } + + [Fact] + public void Equals_and_GetHashCode_should_work_for_Socks5ProxySettings() + { + var auth1 = Socks5AuthenticationSettings.UsernamePassword("user", "pass"); + var auth2 = Socks5AuthenticationSettings.UsernamePassword("user", "pass"); + var s1 = new Socks5ProxySettings("host", 1234, auth1); + var s2 = new Socks5ProxySettings("host", 1234, auth2); + s1.Equals(s2).Should().BeTrue(); + s1.GetHashCode().Should().Be(s2.GetHashCode()); + } + + [Fact] + public void ToString_should_return_expected_string_for_no_auth() + { + var s = new Socks5ProxySettings("host"); + var expected = "{ Host : host, Port : 1080, Authentication : None }"; + s.ToString().Should().Be(expected); + } + + [Fact] + public void ToString_should_return_expected_string_for_username_password_auth() + { + var s = new Socks5ProxySettings("host", 1234, Socks5AuthenticationSettings.UsernamePassword("u", "p")); + var expected = "{ Host : host, Port : 1234, Authentication : UsernamePassword }"; + s.ToString().Should().Be(expected); + } + + [Fact] + public void Create_should_return_expected_settings() + { + var s = Socks5ProxySettings.Create("host", 1234, "u", "p"); + s.Host.Should().Be("host"); + s.Port.Should().Be(1234); + s.Authentication.Should().BeOfType<Socks5AuthenticationSettings.UsernamePasswordAuthenticationSettings>(); + var up = (Socks5AuthenticationSettings.UsernamePasswordAuthenticationSettings)s.Authentication; + up.Username.Should().Be("u"); + up.Password.Should().Be("p"); + } +} \ No newline at end of file diff --git a/tests/MongoDB.Driver.Tests/Core/Connections/TcpStreamFactoryTests.cs b/tests/MongoDB.Driver.Tests/Core/Connections/TcpStreamFactoryTests.cs index 014f1b6a523..cfb8f8194c5 100644 --- a/tests/MongoDB.Driver.Tests/Core/Connections/TcpStreamFactoryTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Connections/TcpStreamFactoryTests.cs @@ -29,6 +29,7 @@ namespace MongoDB.Driver.Core.Connections { + [Trait("Category", "Integration")] public class TcpStreamFactoryTests { [Theory] diff --git a/tests/MongoDB.Driver.Tests/Core/DatabaseNamespaceTests.cs b/tests/MongoDB.Driver.Tests/Core/DatabaseNamespaceTests.cs index 0625103a527..f6496a91181 100644 --- a/tests/MongoDB.Driver.Tests/Core/DatabaseNamespaceTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/DatabaseNamespaceTests.cs @@ -76,7 +76,9 @@ public void SystemIndexesCollection_should_return_the_system_indexes_collection( { var subject = new DatabaseNamespace("test"); +#pragma warning disable CS0618 // Type or member is obsolete var commandCollection = subject.SystemIndexesCollection; +#pragma warning restore CS0618 // Type or member is obsolete commandCollection.FullName.Should().Be("test.system.indexes"); } @@ -85,7 +87,9 @@ public void SystemNamespacesCollection_should_return_the_system_namespaces_colle { var subject = new DatabaseNamespace("test"); +#pragma warning disable CS0618 // Type or member is obsolete var commandCollection = subject.SystemNamespacesCollection; +#pragma warning restore CS0618 // Type or member is obsolete commandCollection.FullName.Should().Be("test.system.namespaces"); } diff --git a/tests/MongoDB.Driver.Tests/Core/Helpers/MessageHelper.cs b/tests/MongoDB.Driver.Tests/Core/Helpers/MessageHelper.cs index 6af2baa3d27..bd73201dceb 100644 --- a/tests/MongoDB.Driver.Tests/Core/Helpers/MessageHelper.cs +++ b/tests/MongoDB.Driver.Tests/Core/Helpers/MessageHelper.cs @@ -225,7 +225,7 @@ public static List<BsonDocument> TranslateMessagesToBsonDocuments(byte[] bytes) return TranslateMessagesToBsonDocuments(TranslateBytesToRequests(bytes)); } - public static void WriteResponsesToStream(BlockingMemoryStream stream, IEnumerable<ResponseMessage> responses) + public static void WriteResponsesToStream(BlockingMemoryStream stream, params ResponseMessage[] responses) { lock (stream.Lock) { diff --git a/tests/MongoDB.Driver.Tests/Core/IAsyncCursorExtensionsTests.cs b/tests/MongoDB.Driver.Tests/Core/IAsyncCursorExtensionsTests.cs index aff630021b5..47b69d6da9a 100644 --- a/tests/MongoDB.Driver.Tests/Core/IAsyncCursorExtensionsTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/IAsyncCursorExtensionsTests.cs @@ -16,6 +16,8 @@ using System; using System.Collections.Generic; using System.Linq; +using System.Threading; +using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -201,6 +203,55 @@ public void SingleOrDefault_should_throw_when_cursor_has_wrong_number_of_documen action.ShouldThrow<InvalidOperationException>(); } + [Fact] + public void ToAsyncEnumerable_result_should_only_be_enumerable_one_time() + { + var cursor = CreateCursor(2); + var enumerable = cursor.ToAsyncEnumerable(); + enumerable.GetAsyncEnumerator(); + + Record.Exception(() => enumerable.GetAsyncEnumerator()).Should().BeOfType<InvalidOperationException>(); + } + + [Fact] + public async Task ToAsyncEnumerable_should_respect_cancellation_token() + { + var source = CreateCursor(5); + using var cts = new CancellationTokenSource(); + + var count = 0; + var exception = await Record.ExceptionAsync(async () => + { + await foreach (var doc in source.ToAsyncEnumerable().WithCancellation(cts.Token)) + { + count++; + if (count == 2) + cts.Cancel(); + } + }); + + exception.Should().BeOfType<OperationCanceledException>(); + } + + [Fact] + public async Task ToAsyncEnumerable_should_return_expected_result() + { + var cursor = CreateCursor(2); + var expectedDocuments = new[] + { + new BsonDocument("_id", 0), + new BsonDocument("_id", 1) + }; + + var result = new List<BsonDocument>(); + await foreach (var doc in cursor.ToAsyncEnumerable()) + { + result.Add(doc); + } + + result.Should().Equal(expectedDocuments); + } + [Fact] public void ToEnumerable_result_should_only_be_enumerable_one_time() { diff --git a/tests/MongoDB.Driver.Tests/Core/IAsyncCursorSourceExtensionsTests.cs b/tests/MongoDB.Driver.Tests/Core/IAsyncCursorSourceExtensionsTests.cs index e2b8188ce43..dedd7cd59ae 100644 --- a/tests/MongoDB.Driver.Tests/Core/IAsyncCursorSourceExtensionsTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/IAsyncCursorSourceExtensionsTests.cs @@ -203,6 +203,31 @@ public void SingleOrDefault_should_throw_when_cursor_has_wrong_number_of_documen action.ShouldThrow<InvalidOperationException>(); } + [Theory] + [ParameterAttributeData] + public async Task ToAsyncEnumerable_result_should_be_enumerable_multiple_times( + [Values(1, 2)] int times) + { + var source = CreateCursorSource(2); + var expectedDocuments = new[] + { + new BsonDocument("_id", 0), + new BsonDocument("_id", 1) + }; + + var result = new List<BsonDocument>(); + for (var i = 0; i < times; i++) + { + await foreach (var doc in source.ToAsyncEnumerable()) + { + result.Add(doc); + } + + result.Should().Equal(expectedDocuments); + result.Clear(); + } + } + [Theory] [ParameterAttributeData] public void ToEnumerable_result_should_be_enumerable_multiple_times( diff --git a/tests/MongoDB.Driver.Tests/Core/Jira/CSharp3173Tests.cs b/tests/MongoDB.Driver.Tests/Core/Jira/CSharp3173Tests.cs index bdf4438a008..92bac389e2b 100644 --- a/tests/MongoDB.Driver.Tests/Core/Jira/CSharp3173Tests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Jira/CSharp3173Tests.cs @@ -75,7 +75,7 @@ public void Ensure_command_network_error_before_handshake_is_correctly_handled([ // The next hello or legacy hello response will be delayed because the waiting in the mock.Callbacks cluster.Initialize(); - var selectedServer = cluster.SelectServer(CreateWritableServerAndEndPointSelector(__endPoint1), CancellationToken.None); + var selectedServer = cluster.SelectServer(OperationContext.NoTimeout, CreateWritableServerAndEndPointSelector(__endPoint1)); initialSelectedEndpoint = selectedServer.EndPoint; initialSelectedEndpoint.Should().Be(__endPoint1); @@ -86,11 +86,11 @@ public void Ensure_command_network_error_before_handshake_is_correctly_handled([ Exception exception; if (async) { - exception = Record.Exception(() => selectedServer.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult()); + exception = Record.Exception(() => selectedServer.GetChannelAsync(OperationContext.NoTimeout).GetAwaiter().GetResult()); } else { - exception = Record.Exception(() => selectedServer.GetChannel(CancellationToken.None)); + exception = Record.Exception(() => selectedServer.GetChannel(OperationContext.NoTimeout)); } var e = exception.Should().BeOfType<MongoConnectionException>().Subject; @@ -107,7 +107,7 @@ public void Ensure_command_network_error_before_handshake_is_correctly_handled([ } // ensure that a new server can be selected - selectedServer = cluster.SelectServer(WritableServerSelector.Instance, CancellationToken.None); + selectedServer = cluster.SelectServer(OperationContext.NoTimeout, WritableServerSelector.Instance); // ensure that the selected server is not the same as the initial selectedServer.EndPoint.Should().Be(__endPoint2); @@ -187,11 +187,11 @@ void SetupConnectionPool(Mock<IConnectionPool> mockConnectionPool, IConnectionHa { var dnsException = CreateDnsException(connection.ConnectionId, from: "pool"); mockConnectionPool - .Setup(c => c.AcquireConnection(It.IsAny<CancellationToken>())) + .Setup(c => c.AcquireConnection(It.IsAny<OperationContext>())) .Callback(() => exceptionHandlerProvider().HandleExceptionOnOpen(dnsException)) .Throws(dnsException); // throw command dns exception mockConnectionPool - .Setup(c => c.AcquireConnectionAsync(It.IsAny<CancellationToken>())) + .Setup(c => c.AcquireConnectionAsync(It.IsAny<OperationContext>())) .Callback(() => exceptionHandlerProvider().HandleExceptionOnOpen(dnsException)) .Throws(dnsException); // throw command dns exception } @@ -253,8 +253,9 @@ private MultiServerCluster CreateAndSetupCluster(TaskCompletionSource<bool> hasN serverApi: null); var serverMonitorSettings = new ServerMonitorSettings( - connectTimeout: TimeSpan.FromMilliseconds(1), - heartbeatInterval: __heartbeatInterval); + ConnectTimeout: TimeSpan.FromMilliseconds(20), + HeartbeatTimeout: TimeSpan.FromMilliseconds(10), + HeartbeatInterval: __heartbeatInterval); var serverSettings = new ServerSettings(serverMonitorSettings.HeartbeatInterval); MultiServerCluster cluster = null; @@ -289,7 +290,7 @@ private IServerSelector CreateWritableServerAndEndPointSelector(EndPoint endPoin private void ForceClusterId(MultiServerCluster cluster, ClusterId clusterId) { Reflector.SetFieldValue(cluster, "_clusterId", clusterId); - Reflector.SetFieldValue(cluster, "_descriptionWithChangedTaskCompletionSource", new Cluster.ClusterDescriptionChangeSource(ClusterDescription.CreateInitial(clusterId, __directConnection))); + Reflector.SetFieldValue(cluster, "_expirableClusterDescription", new Cluster.ExpirableClusterDescription(cluster, ClusterDescription.CreateInitial(clusterId, __directConnection))); } private void SetupServerMonitorConnection( @@ -301,7 +302,7 @@ private void SetupServerMonitorConnection( bool streamable) { var connectionId = new ConnectionId(serverId); - var maxWireVersion = streamable ? WireVersion.Server44 : WireVersion.Server40; + var maxWireVersion = streamable ? WireVersion.Server44 : WireVersion.Server42; var helloDocument = new BsonDocument { { "ok", 1 }, @@ -353,7 +354,7 @@ void SetupFailedConnection(Mock<IConnection> mockFaultyConnection) () => WaitForTaskOrTimeout(hasClusterBeenDisposed.Task, TimeSpan.FromMinutes(1), "cluster dispose") }); mockFaultyConnection - .Setup(c => c.Open(It.IsAny<CancellationToken>())) + .Setup(c => c.Open(It.IsAny<OperationContext>())) .Callback(() => { var responseAction = faultyConnectionResponses.Dequeue(); @@ -361,7 +362,7 @@ void SetupFailedConnection(Mock<IConnection> mockFaultyConnection) }); mockFaultyConnection - .Setup(c => c.ReceiveMessage(It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), It.IsAny<MessageEncoderSettings>(), It.IsAny<CancellationToken>())) + .Setup(c => c.ReceiveMessage(It.IsAny<OperationContext>(), It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), It.IsAny<MessageEncoderSettings>())) .Returns(() => { WaitForTaskOrTimeout( @@ -374,13 +375,13 @@ void SetupFailedConnection(Mock<IConnection> mockFaultyConnection) void SetupHealthyConnection(Mock<IConnection> mockHealthyConnection) { - mockHealthyConnection.Setup(c => c.Open(It.IsAny<CancellationToken>())); // no action is required - mockHealthyConnection.Setup(c => c.OpenAsync(It.IsAny<CancellationToken>())).Returns(Task.FromResult(true)); // no action is required + mockHealthyConnection.Setup(c => c.Open(It.IsAny<OperationContext>())); // no action is required + mockHealthyConnection.Setup(c => c.OpenAsync(It.IsAny<OperationContext>())).Returns(Task.FromResult(true)); // no action is required mockHealthyConnection - .Setup(c => c.ReceiveMessage(It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), It.IsAny<MessageEncoderSettings>(), It.IsAny<CancellationToken>())) + .Setup(c => c.ReceiveMessage(It.IsAny<OperationContext>(), It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), It.IsAny<MessageEncoderSettings>())) .Returns(commandResponseAction); mockConnection - .Setup(c => c.ReceiveMessageAsync(It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), It.IsAny<MessageEncoderSettings>(), It.IsAny<CancellationToken>())) + .Setup(c => c.ReceiveMessageAsync(It.IsAny<OperationContext>(), It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), It.IsAny<MessageEncoderSettings>())) .ReturnsAsync(commandResponseAction); } } diff --git a/tests/MongoDB.Driver.Tests/Core/Jira/CSharp3302Tests.cs b/tests/MongoDB.Driver.Tests/Core/Jira/CSharp3302Tests.cs index 2a2a3f024d2..792344ad6de 100644 --- a/tests/MongoDB.Driver.Tests/Core/Jira/CSharp3302Tests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Jira/CSharp3302Tests.cs @@ -96,7 +96,7 @@ public async Task RapidHeartbeatTimerCallback_should_ignore_reentrant_calls() cluster.Initialize(); // Trigger Cluster._rapidHeartbeatTimer - var _ = cluster.SelectServerAsync(CreateWritableServerAndEndPointSelector(__endPoint1), CancellationToken.None); + _ = cluster.SelectServerAsync(OperationContext.NoTimeout, CreateWritableServerAndEndPointSelector(__endPoint1)); // Wait for all heartbeats to complete await Task.WhenAny(allHeartbeatsReceived.Task, Task.Delay(1000)); @@ -142,13 +142,13 @@ public async Task Ensure_no_deadlock_after_primary_update() server.DescriptionChanged += ProcessServerDescriptionChanged; } - var selectedServer = cluster.SelectServer(CreateWritableServerAndEndPointSelector(__endPoint1), CancellationToken.None); + var selectedServer = cluster.SelectServer(OperationContext.NoTimeout, CreateWritableServerAndEndPointSelector(__endPoint1)); initialSelectedEndpoint = selectedServer.EndPoint; initialSelectedEndpoint.Should().Be(__endPoint1); // Change primary currentPrimaries.Add(__serverId2); - selectedServer = cluster.SelectServer(CreateWritableServerAndEndPointSelector(__endPoint2), CancellationToken.None); + selectedServer = cluster.SelectServer(OperationContext.NoTimeout, CreateWritableServerAndEndPointSelector(__endPoint2)); selectedServer.EndPoint.Should().Be(__endPoint2); // Ensure stalling happened @@ -198,10 +198,10 @@ void SetupConnection(Mock<IConnectionHandle> mockConnectionHandle, ServerId serv void SetupConnectionPool(Mock<IConnectionPool> mockConnectionPool, IConnectionHandle connection) { mockConnectionPool - .Setup(c => c.AcquireConnection(It.IsAny<CancellationToken>())) + .Setup(c => c.AcquireConnection(It.IsAny<OperationContext>())) .Returns(connection); mockConnectionPool - .Setup(c => c.AcquireConnectionAsync(It.IsAny<CancellationToken>())) + .Setup(c => c.AcquireConnectionAsync(It.IsAny<OperationContext>())) .Returns(Task.FromResult(connection)); } @@ -243,8 +243,9 @@ private MultiServerCluster CreateAndSetupCluster(HashSet<ServerId> primaries) endPoints: serverInfoCollection.Select(c => c.Endpoint).ToArray()); var serverMonitorSettings = new ServerMonitorSettings( - connectTimeout: TimeSpan.FromMilliseconds(1), - heartbeatInterval: __heartbeatInterval); + ConnectTimeout: TimeSpan.FromMilliseconds(20), + HeartbeatTimeout: TimeSpan.FromMilliseconds(10), + HeartbeatInterval: __heartbeatInterval); var serverSettings = new ServerSettings(serverMonitorSettings.HeartbeatInterval); var eventCapturer = new EventCapturer(); @@ -270,7 +271,7 @@ private IServerSelector CreateWritableServerAndEndPointSelector(EndPoint endPoin private void ForceClusterId(MultiServerCluster cluster, ClusterId clusterId) { Reflector.SetFieldValue(cluster, "_clusterId", clusterId); - Reflector.SetFieldValue(cluster, "_descriptionWithChangedTaskCompletionSource", new Cluster.ClusterDescriptionChangeSource(ClusterDescription.CreateInitial(clusterId, __directConnection))); + Reflector.SetFieldValue(cluster, "_expirableClusterDescription", new Cluster.ExpirableClusterDescription(cluster, ClusterDescription.CreateInitial(clusterId, __directConnection))); } private void SetupServerMonitorConnection( @@ -282,8 +283,8 @@ private void SetupServerMonitorConnection( var baseDocument = new BsonDocument { { "ok", 1 }, - { "minWireVersion", WireVersion.Server36 }, - { "maxWireVersion", WireVersion.Server40 }, + { "minWireVersion", WireVersion.Server42 }, + { "maxWireVersion", WireVersion.Server44 }, { "setName", "rs" }, { "hosts", new BsonArray(new [] { "localhost:27017", "localhost:27018" })}, { "topologyVersion", new TopologyVersion(ObjectId.Empty, 1).ToBsonDocument(), false } @@ -303,9 +304,9 @@ private void SetupServerMonitorConnection( .SetupGet(c => c.Description) .Returns(GetConnectionDescription); - mockConnection.Setup(c => c.Open(It.IsAny<CancellationToken>())); // no action is required + mockConnection.Setup(c => c.Open(It.IsAny<OperationContext>())); // no action is required mockConnection - .Setup(c => c.ReceiveMessage(It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), It.IsAny<MessageEncoderSettings>(), It.IsAny<CancellationToken>())) + .Setup(c => c.ReceiveMessage(It.IsAny<OperationContext>(), It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), It.IsAny<MessageEncoderSettings>())) .Returns(GetHelloResponse); ResponseMessage GetHelloResponse() diff --git a/tests/MongoDB.Driver.Tests/Core/LoadBalancingIntergationTests.cs b/tests/MongoDB.Driver.Tests/Core/LoadBalancingIntergationTests.cs index d8a6095aaf1..a433e87fa5e 100644 --- a/tests/MongoDB.Driver.Tests/Core/LoadBalancingIntergationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/LoadBalancingIntergationTests.cs @@ -15,7 +15,6 @@ using System; using System.Collections.Generic; -using System.Threading; using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; @@ -615,11 +614,11 @@ private BulkWriteOperationResult CreateAndRunBulkOperation(RetryableWriteContext if (async) { - return bulkInsertOperation.ExecuteAsync(context, CancellationToken.None).GetAwaiter().GetResult(); + return bulkInsertOperation.ExecuteAsync(OperationContext.NoTimeout, context).GetAwaiter().GetResult(); } else { - return bulkInsertOperation.Execute(context, CancellationToken.None); + return bulkInsertOperation.Execute(OperationContext.NoTimeout, context); } } @@ -635,19 +634,19 @@ private IAsyncCursor<BsonDocument> CreateAndRunFindOperation(RetryableReadContex if (async) { - return findOperation.ExecuteAsync(context, CancellationToken.None).GetAwaiter().GetResult(); + return findOperation.ExecuteAsync(OperationContext.NoTimeout, context).GetAwaiter().GetResult(); } else { - return findOperation.Execute(context, CancellationToken.None); + return findOperation.Execute(OperationContext.NoTimeout, context); } } private RetryableReadContext CreateRetryableReadContext(IReadBindingHandle readBindingHandle, bool async) { return async - ? RetryableReadContext.CreateAsync(readBindingHandle, retryRequested: false, CancellationToken.None).GetAwaiter().GetResult() - : RetryableReadContext.Create(readBindingHandle, retryRequested: false, CancellationToken.None); + ? RetryableReadContext.CreateAsync(OperationContext.NoTimeout, readBindingHandle, retryRequested: false).GetAwaiter().GetResult() + : RetryableReadContext.Create(OperationContext.NoTimeout, readBindingHandle, retryRequested: false); } private DisposableBindingBundle<IReadBindingHandle, RetryableReadContext> CreateReadBindingsAndRetryableReadContext(IClusterInternal cluster, ICoreSessionHandle sessionHandle, bool async) @@ -663,8 +662,8 @@ private DisposableBindingBundle<IReadBindingHandle, RetryableReadContext> Create private RetryableWriteContext CreateRetryableWriteContext(IReadWriteBindingHandle readWriteBindingHandle, bool async) { return async - ? RetryableWriteContext.CreateAsync(readWriteBindingHandle, retryRequested: false, CancellationToken.None).GetAwaiter().GetResult() - : RetryableWriteContext.Create(readWriteBindingHandle, retryRequested: false, CancellationToken.None); + ? RetryableWriteContext.CreateAsync(OperationContext.NoTimeout, readWriteBindingHandle, retryRequested: false).GetAwaiter().GetResult() + : RetryableWriteContext.Create(OperationContext.NoTimeout, readWriteBindingHandle, retryRequested: false); } private DisposableBindingBundle<IReadWriteBindingHandle, RetryableWriteContext> CreateReadWriteBindingsAndRetryableWriteContext(IClusterInternal cluster, ICoreSessionHandle sessionHandle, bool async) diff --git a/tests/MongoDB.Driver.Tests/Core/Logging/EventLoggerTests.cs b/tests/MongoDB.Driver.Tests/Core/Logging/EventLoggerTests.cs index 2d8e97168dd..8d07de219b4 100644 --- a/tests/MongoDB.Driver.Tests/Core/Logging/EventLoggerTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Logging/EventLoggerTests.cs @@ -94,7 +94,7 @@ private static IEnumerable<object[]> EventsData() (new LogCategories.Command(), new CommandStartedEvent("test", new Bson.BsonDocument(), new DatabaseNamespace("test"), 1, 1, connectionId)), (new LogCategories.Connection(), new ConnectionCreatedEvent(connectionId, null, 1)), (new LogCategories.SDAM(), new ServerHeartbeatStartedEvent(connectionId, true)), - (new LogCategories.ServerSelection(), new ClusterSelectingServerEvent(clusterDescription, new RandomServerSelector(), default, default)) + (new LogCategories.ServerSelection(), new ClusterSelectingServerEvent(clusterDescription, Mock.Of<IServerSelector>(), default, default)) }; var booleanValues = new[] { true, false }; diff --git a/tests/MongoDB.Driver.Tests/Core/Misc/FrozenClock.cs b/tests/MongoDB.Driver.Tests/Core/Misc/FrozenClock.cs index dfffd3a3e71..1026101fc0d 100644 --- a/tests/MongoDB.Driver.Tests/Core/Misc/FrozenClock.cs +++ b/tests/MongoDB.Driver.Tests/Core/Misc/FrozenClock.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,11 +14,10 @@ */ using System; -using MongoDB.Driver.Core.Misc; namespace MongoDB.Driver.Core.Misc { - public class FrozenClock : IClock + internal class FrozenClock : IClock { // public static methods public static FrozenClock FreezeUtcNow() @@ -36,10 +35,18 @@ public FrozenClock(DateTime utcNow) } // public properties + public long Frequency => TimeSpan.TicksPerSecond; + public DateTime UtcNow { get { return _utcNow; } - set { _utcNow = value; } + } + + public long GetTimestamp() => UtcNow.Ticks; + + public void AdvanceCurrentTime(TimeSpan timeSpan) + { + _utcNow += timeSpan; } } } diff --git a/tests/MongoDB.Driver.Tests/Core/Misc/MetronomeTests.cs b/tests/MongoDB.Driver.Tests/Core/Misc/MetronomeTests.cs index 1444877e130..cad299155c0 100644 --- a/tests/MongoDB.Driver.Tests/Core/Misc/MetronomeTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Misc/MetronomeTests.cs @@ -15,9 +15,7 @@ using System; using System.Threading; -using System.Threading.Tasks; using FluentAssertions; -using MongoDB.Driver.Core.Async; using MongoDB.Driver.Core.Misc; using Xunit; @@ -73,7 +71,7 @@ public void GetNextTickDelay_should_be_infinite_if_period_is_infinite() public void GetNextTickDelay_should_be_threeQuarterPeriod_when_oneQuarterPeriod_past_the_last_tick() { var now = _clock.UtcNow; - _clock.UtcNow = _clock.UtcNow.Add(_quarterPeriod); + _clock.AdvanceCurrentTime(_quarterPeriod); _subject.GetNextTickDelay().Should().Be(_threeQuarterPeriod); _subject.NextTick.Should().Be(now + _period); } @@ -89,7 +87,7 @@ public void GetNextTickDelay_should_be_zero_when_first_instantiated() public void GetNextTickDelay_should_be_zero_when_time_equals_nextTick() { var now = _clock.UtcNow; - _clock.UtcNow = _clock.UtcNow.Add(_period); + _clock.AdvanceCurrentTime(_period); _subject.GetNextTickDelay().Should().Be(TimeSpan.Zero); _subject.NextTick.Should().Be(now + _period); } @@ -98,11 +96,11 @@ public void GetNextTickDelay_should_be_zero_when_time_equals_nextTick() public void GetNextTickDelay_should_not_advance_nextTick_when_called_more_than_once_during_the_same_period() { var now = _clock.UtcNow; - _clock.UtcNow = _clock.UtcNow.Add(_quarterPeriod); + _clock.AdvanceCurrentTime(_quarterPeriod); _subject.GetNextTickDelay().Should().Be(_threeQuarterPeriod); _subject.NextTick.Should().Be(now + _period); - _clock.UtcNow = _clock.UtcNow.Add(_quarterPeriod); + _clock.AdvanceCurrentTime(_quarterPeriod); _subject.GetNextTickDelay().Should().Be(_halfPeriod); _subject.NextTick.Should().Be(now + _period); } @@ -111,7 +109,7 @@ public void GetNextTickDelay_should_not_advance_nextTick_when_called_more_than_o public void GetNextTickDelay_should_skip_one_missed_tick() { var now = _clock.UtcNow; - _clock.UtcNow = _clock.UtcNow.Add(_period + _quarterPeriod); + _clock.AdvanceCurrentTime(_period + _quarterPeriod); _subject.GetNextTickDelay().Should().Be(_threeQuarterPeriod); _subject.NextTick.Should().Be(now + _period + _period); } @@ -120,7 +118,7 @@ public void GetNextTickDelay_should_skip_one_missed_tick() public void GetNextTickDelay_should_skip_two_missed_ticks() { var now = _clock.UtcNow; - _clock.UtcNow = _clock.UtcNow.Add(_period + _period + _quarterPeriod); + _clock.AdvanceCurrentTime(_period + _period + _quarterPeriod); _subject.GetNextTickDelay().Should().Be(_threeQuarterPeriod); _subject.NextTick.Should().Be(now + _period + _period + _period); } diff --git a/tests/MongoDB.Driver.Tests/Core/Misc/SemaphoreSlimRequestTests.cs b/tests/MongoDB.Driver.Tests/Core/Misc/SemaphoreSlimRequestTests.cs deleted file mode 100644 index 7dca0e28f9a..00000000000 --- a/tests/MongoDB.Driver.Tests/Core/Misc/SemaphoreSlimRequestTests.cs +++ /dev/null @@ -1,125 +0,0 @@ -/* Copyright 2015-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System; -using System.Threading; -using System.Threading.Tasks; -using FluentAssertions; -using Xunit; - -namespace MongoDB.Driver.Core.Misc -{ - public class SemaphoreSlimRequestTests - { - // public methods - [Fact] - public void constructor_should_initialize_instance_with_completed_task_when_semaphore_is_available() - { - var semaphore = new SemaphoreSlim(1); - - var result = new SemaphoreSlimRequest(semaphore, CancellationToken.None); - - result.Task.Status.Should().Be(TaskStatus.RanToCompletion); - semaphore.CurrentCount.Should().Be(0); - } - - [Fact] - public void constructor_should_initialize_instance_with_incompleted_task_when_semaphore_is_not_available() - { - var semaphore = new SemaphoreSlim(1); - semaphore.Wait(); - - var result = new SemaphoreSlimRequest(semaphore, CancellationToken.None); - - result.Task.IsCompleted.Should().BeFalse(); - semaphore.CurrentCount.Should().Be(0); - } - - [Fact] - public void constructor_should_throw_when_semaphore_is_null() - { - Action action = () => new SemaphoreSlimRequest(null, CancellationToken.None); - - action.ShouldThrow<ArgumentNullException>().And.ParamName.Should().Be("semaphore"); - } - - [Fact] - public void Dispose_should_cancel_pending_request() - { - var semaphore = new SemaphoreSlim(1); - semaphore.Wait(); - var subject = new SemaphoreSlimRequest(semaphore, CancellationToken.None); - - subject.Dispose(); - semaphore.Release(); - - subject.Task.Status.Should().Be(TaskStatus.Canceled); - semaphore.CurrentCount.Should().Be(1); - } - - [Fact] - public void Dispose_should_release_semaphore() - { - var semaphore = new SemaphoreSlim(1); - var subject = new SemaphoreSlimRequest(semaphore, CancellationToken.None); - - subject.Dispose(); - - semaphore.CurrentCount.Should().Be(1); - } - - [Fact] - public void Sempahore_should_not_be_released_when_cancellation_is_requested_after_semaphore_is_acquired() - { - var semaphore = new SemaphoreSlim(1); - using var cancellationTokenSource = new CancellationTokenSource(); - var subject = new SemaphoreSlimRequest(semaphore, cancellationTokenSource.Token); - - cancellationTokenSource.Cancel(); - - semaphore.CurrentCount.Should().Be(0); - } - - [Fact] - public void Task_should_be_cancelled_when_cancellationToken_requests_cancellation() - { - var semaphore = new SemaphoreSlim(1); - using var cancellationTokenSource = new CancellationTokenSource(); - semaphore.Wait(); - var subject = new SemaphoreSlimRequest(semaphore, cancellationTokenSource.Token); - - cancellationTokenSource.Cancel(); - SpinWait.SpinUntil(() => subject.Task.IsCompleted, TimeSpan.FromSeconds(5)).Should().BeTrue(); - semaphore.Release(); - - subject.Task.Status.Should().Be(TaskStatus.Canceled); - semaphore.CurrentCount.Should().Be(1); - } - - [Fact] - public void Task_should_be_completed_when_semaphore_becomes_available() - { - var semaphore = new SemaphoreSlim(1); - semaphore.Wait(); - var subject = new SemaphoreSlimRequest(semaphore, CancellationToken.None); - - semaphore.Release(); - SpinWait.SpinUntil(() => subject.Task.IsCompleted, TimeSpan.FromSeconds(5)).Should().BeTrue(); - - subject.Task.Status.Should().Be(TaskStatus.RanToCompletion); - semaphore.CurrentCount.Should().Be(0); - } - } -} diff --git a/tests/MongoDB.Driver.Tests/Core/Misc/StreamExtensionMethodsTests.cs b/tests/MongoDB.Driver.Tests/Core/Misc/StreamExtensionMethodsTests.cs index bf17fa539c6..8da7e5f7de8 100644 --- a/tests/MongoDB.Driver.Tests/Core/Misc/StreamExtensionMethodsTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Misc/StreamExtensionMethodsTests.cs @@ -14,10 +14,7 @@ */ using System; -using System.Collections.Generic; using System.IO; -using System.Linq; -using System.Text; using System.Threading; using System.Threading.Tasks; using FluentAssertions; @@ -31,272 +28,413 @@ namespace MongoDB.Driver.Core.Misc public class StreamExtensionMethodsTests { [Theory] - [InlineData(0, new byte[] { 0, 0 })] - [InlineData(1, new byte[] { 1, 0 })] - [InlineData(2, new byte[] { 1, 2 })] - public async Task ReadBytesAsync_with_byte_array_should_have_expected_effect_for_count(int count, byte[] expectedBytes) + [InlineData(true, 0, new byte[] { 0, 0 })] + [InlineData(true, 1, new byte[] { 1, 0 })] + [InlineData(true, 2, new byte[] { 1, 2 })] + [InlineData(false, 0, new byte[] { 0, 0 })] + [InlineData(false, 1, new byte[] { 1, 0 })] + [InlineData(false, 2, new byte[] { 1, 2 })] + public async Task ReadBytes_with_byte_array_should_have_expected_effect_for_count(bool async, int count, byte[] expectedBytes) { var bytes = new byte[] { 1, 2 }; var stream = new MemoryStream(bytes); var destination = new byte[2]; - await stream.ReadBytesAsync(destination, 0, count, Timeout.InfiniteTimeSpan, CancellationToken.None); + if (async) + { + await stream.ReadBytesAsync(OperationContext.NoTimeout, destination, 0, count, Timeout.InfiniteTimeSpan); + } + else + { + stream.ReadBytes(OperationContext.NoTimeout, destination, 0, count, Timeout.InfiniteTimeSpan); + } destination.Should().Equal(expectedBytes); } [Theory] - [InlineData(1, new byte[] { 0, 1, 0 })] - [InlineData(2, new byte[] { 0, 0, 1 })] - public async Task ReadBytesAsync_with_byte_array_should_have_expected_effect_for_offset(int offset, byte[] expectedBytes) + [InlineData(true, 1, new byte[] { 0, 1, 0 })] + [InlineData(true, 2, new byte[] { 0, 0, 1 })] + [InlineData(false, 1, new byte[] { 0, 1, 0 })] + [InlineData(false, 2, new byte[] { 0, 0, 1 })] + public async Task ReadBytes_with_byte_array_should_have_expected_effect_for_offset(bool async, int offset, byte[] expectedBytes) { var bytes = new byte[] { 1 }; var stream = new MemoryStream(bytes); var destination = new byte[3]; - await stream.ReadBytesAsync(destination, offset, 1, Timeout.InfiniteTimeSpan, CancellationToken.None); + if (async) + { + await stream.ReadBytesAsync(OperationContext.NoTimeout, destination, offset, 1, Timeout.InfiniteTimeSpan); + } + else + { + stream.ReadBytes(OperationContext.NoTimeout, destination, offset, 1, Timeout.InfiniteTimeSpan); + } destination.Should().Equal(expectedBytes); } [Theory] - [InlineData(1, new[] { 3 })] - [InlineData(2, new[] { 1, 2 })] - [InlineData(3, new[] { 2, 1 })] - [InlineData(4, new[] { 1, 1, 1 })] - public async Task ReadBytesAsync_with_byte_array_should_have_expected_effect_for_partial_reads(int testCase, int[] partition) + [InlineData(true, 1, new[] { 3 })] + [InlineData(true, 2, new[] { 1, 2 })] + [InlineData(true, 3, new[] { 2, 1 })] + [InlineData(true, 4, new[] { 1, 1, 1 })] + [InlineData(false, 1, new[] { 3 })] + [InlineData(false, 2, new[] { 1, 2 })] + [InlineData(false, 3, new[] { 2, 1 })] + [InlineData(false, 4, new[] { 1, 1, 1 })] + public async Task ReadBytes_with_byte_array_should_have_expected_effect_for_partial_reads(bool async, int testCase, int[] partition) { var mockStream = new Mock<Stream>(); var bytes = new byte[] { 1, 2, 3 }; var n = 0; var position = 0; + Task<int> ReadPartial (byte[] buffer, int offset, int count) + { + var length = partition[n++]; + Buffer.BlockCopy(bytes, position, buffer, offset, length); + position += length; + return Task.FromResult(length); + } + mockStream.Setup(s => s.ReadAsync(It.IsAny<byte[]>(), It.IsAny<int>(), It.IsAny<int>(), It.IsAny<CancellationToken>())) - .Returns((byte[] buffer, int offset, int count, CancellationToken cancellationToken) => - { - var length = partition[n++]; - Buffer.BlockCopy(bytes, position, buffer, offset, length); - position += length; - return Task.FromResult(length); - }); + .Returns((byte[] buffer, int offset, int count, CancellationToken cancellationToken) => ReadPartial(buffer, offset, count)); + mockStream.Setup(s => s.BeginRead(It.IsAny<byte[]>(), It.IsAny<int>(), It.IsAny<int>(), It.IsAny<AsyncCallback>(), It.IsAny<object>())) + .Returns((byte[] buffer, int offset, int count, AsyncCallback callback, object state) => ReadPartial(buffer, offset, count)); + mockStream.Setup(s => s.EndRead(It.IsAny<IAsyncResult>())) + .Returns<IAsyncResult>(x => ((Task<int>)x).GetAwaiter().GetResult()); var destination = new byte[3]; - await mockStream.Object.ReadBytesAsync(destination, 0, 3, Timeout.InfiniteTimeSpan, CancellationToken.None); + if (async) + { + await mockStream.Object.ReadBytesAsync(OperationContext.NoTimeout, destination, 0, 3, Timeout.InfiniteTimeSpan); + } + else + { + mockStream.Object.ReadBytes(OperationContext.NoTimeout, destination, 0, 3, Timeout.InfiniteTimeSpan); + } destination.Should().Equal(bytes); } - [Fact] - public void ReadBytesAsync_with_byte_array_should_throw_when_end_of_stream_is_reached() + [Theory] + [ParameterAttributeData] + public async Task ReadBytes_with_byte_array_should_throw_when_end_of_stream_is_reached([Values(true, false)]bool async) { var mockStream = new Mock<Stream>(); var destination = new byte[1]; - mockStream.Setup(s => s.ReadAsync(destination, 0, 1, It.IsAny<CancellationToken>())).Returns(Task.FromResult(0)); + mockStream.Setup(s => s.ReadAsync(destination, 0, 1, It.IsAny<CancellationToken>())) + .ReturnsAsync(0); + mockStream.Setup(s => s.BeginRead(It.IsAny<byte[]>(), It.IsAny<int>(), It.IsAny<int>(), It.IsAny<AsyncCallback>(), It.IsAny<object>())) + .Returns(Task.FromResult(0)); - Func<Task> action = () => mockStream.Object.ReadBytesAsync(destination, 0, 1, Timeout.InfiniteTimeSpan, CancellationToken.None); + var exception = async ? + await Record.ExceptionAsync(() => mockStream.Object.ReadBytesAsync(OperationContext.NoTimeout, destination, 0, 1, Timeout.InfiniteTimeSpan)) : + Record.Exception(() => mockStream.Object.ReadBytes(OperationContext.NoTimeout, destination, 0, 1, Timeout.InfiniteTimeSpan)); - action.ShouldThrow<EndOfStreamException>(); + exception.Should().BeOfType<EndOfStreamException>(); } - [Fact] - public void ReadBytesAsync_with_byte_array_should_throw_when_buffer_is_null() + [Theory] + [ParameterAttributeData] + public async Task ReadBytes_with_byte_array_should_throw_when_buffer_is_null([Values(true, false)]bool async) { var stream = new Mock<Stream>().Object; byte[] destination = null; - Func<Task> action = () => stream.ReadBytesAsync(destination, 0, 0, Timeout.InfiniteTimeSpan, CancellationToken.None); + var exception = async ? + await Record.ExceptionAsync(() => stream.ReadBytesAsync(OperationContext.NoTimeout, destination, 0, 0, Timeout.InfiniteTimeSpan)) : + Record.Exception(() => stream.ReadBytes(OperationContext.NoTimeout, destination, 0, 0, Timeout.InfiniteTimeSpan)); - action.ShouldThrow<ArgumentNullException>().And.ParamName.Should().Be("buffer"); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("buffer"); } [Theory] - [InlineData(0, -1)] - [InlineData(1, 2)] - [InlineData(2, 1)] - public void ReadBytesAsync_with_byte_array_should_throw_when_count_is_invalid(int offset, int count) + [InlineData(true, 0, -1)] + [InlineData(true, 1, 2)] + [InlineData(true, 2, 1)] + [InlineData(false, 0, -1)] + [InlineData(false, 1, 2)] + [InlineData(false, 2, 1)] + public async Task ReadBytes_with_byte_array_should_throw_when_count_is_invalid(bool async, int offset, int count) { var stream = new Mock<Stream>().Object; var destination = new byte[2]; - Func<Task> action = () => stream.ReadBytesAsync(destination, offset, count, Timeout.InfiniteTimeSpan, CancellationToken.None); + var exception = async ? + await Record.ExceptionAsync(() => stream.ReadBytesAsync(OperationContext.NoTimeout, destination, offset, count, Timeout.InfiniteTimeSpan)) : + Record.Exception(() => stream.ReadBytes(OperationContext.NoTimeout, destination, offset, count, Timeout.InfiniteTimeSpan)); - action.ShouldThrow<ArgumentOutOfRangeException>().And.ParamName.Should().Be("count"); + exception.Should().BeOfType<ArgumentOutOfRangeException>().Subject + .ParamName.Should().Be("count"); } [Theory] [ParameterAttributeData] - public void ReadBytesAsync_with_byte_array_should_throw_when_offset_is_invalid( - [Values(-1, 3)] - int offset) + public async Task ReadBytes_with_byte_array_should_throw_when_offset_is_invalid( + [Values(true, false)]bool async, + [Values(-1, 3)]int offset) { var stream = new Mock<Stream>().Object; var destination = new byte[2]; - Func<Task> action = () => stream.ReadBytesAsync(destination, offset, 0, Timeout.InfiniteTimeSpan, CancellationToken.None); + var exception = async ? + await Record.ExceptionAsync(() => stream.ReadBytesAsync(OperationContext.NoTimeout, destination, offset, 0, Timeout.InfiniteTimeSpan)) : + Record.Exception(() => stream.ReadBytes(OperationContext.NoTimeout, destination, offset, 0, Timeout.InfiniteTimeSpan)); - action.ShouldThrow<ArgumentOutOfRangeException>().And.ParamName.Should().Be("offset"); + exception.Should().BeOfType<ArgumentOutOfRangeException>().Subject + .ParamName.Should().Be("offset"); } - [Fact] - public void ReadBytesAsync_with_byte_array_should_throw_when_stream_is_null() + [Theory] + [ParameterAttributeData] + public async Task ReadBytes_with_byte_array_should_throw_when_stream_is_null([Values(true, false)]bool async) { Stream stream = null; var destination = new byte[0]; - Func<Task> action = () => stream.ReadBytesAsync(destination, 0, 0, Timeout.InfiniteTimeSpan, CancellationToken.None); + var exception = async ? + await Record.ExceptionAsync(() => stream.ReadBytesAsync(OperationContext.NoTimeout, destination, 0, 0, Timeout.InfiniteTimeSpan)) : + Record.Exception(() => stream.ReadBytes(OperationContext.NoTimeout, destination, 0, 0, Timeout.InfiniteTimeSpan)); - action.ShouldThrow<ArgumentNullException>().And.ParamName.Should().Be("stream"); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("stream"); } [Theory] - [InlineData(0, new byte[] { 0, 0 })] - [InlineData(1, new byte[] { 1, 0 })] - [InlineData(2, new byte[] { 1, 2 })] - public async Task ReadBytesAsync_with_byte_buffer_should_have_expected_effect_for_count(int count, byte[] expectedBytes) + [InlineData(true, 0, new byte[] { 0, 0 })] + [InlineData(true, 1, new byte[] { 1, 0 })] + [InlineData(true, 2, new byte[] { 1, 2 })] + [InlineData(false, 0, new byte[] { 0, 0 })] + [InlineData(false, 1, new byte[] { 1, 0 })] + [InlineData(false, 2, new byte[] { 1, 2 })] + public async Task ReadBytes_with_byte_buffer_should_have_expected_effect_for_count(bool async, int count, byte[] expectedBytes) { var bytes = new byte[] { 1, 2 }; var stream = new MemoryStream(bytes); var destination = new ByteArrayBuffer(new byte[2]); - await stream.ReadBytesAsync(destination, 0, count, Timeout.InfiniteTimeSpan, CancellationToken.None); + if (async) + { + await stream.ReadBytesAsync(OperationContext.NoTimeout, destination, 0, count, Timeout.InfiniteTimeSpan); + } + else + { + stream.ReadBytes(OperationContext.NoTimeout, destination, 0, count, Timeout.InfiniteTimeSpan); + } destination.AccessBackingBytes(0).Array.Should().Equal(expectedBytes); } [Theory] - [InlineData(1, new byte[] { 0, 1, 0 })] - [InlineData(2, new byte[] { 0, 0, 1 })] - public async Task ReadBytesAsync_with_byte_buffer_should_have_expected_effect_for_offset(int offset, byte[] expectedBytes) + [InlineData(true, 1, new byte[] { 0, 1, 0 })] + [InlineData(true, 2, new byte[] { 0, 0, 1 })] + [InlineData(false, 1, new byte[] { 0, 1, 0 })] + [InlineData(false, 2, new byte[] { 0, 0, 1 })] + public async Task ReadBytes_with_byte_buffer_should_have_expected_effect_for_offset(bool async, int offset, byte[] expectedBytes) { var bytes = new byte[] { 1 }; var stream = new MemoryStream(bytes); var destination = new ByteArrayBuffer(new byte[3]); - await stream.ReadBytesAsync(destination, offset, 1, Timeout.InfiniteTimeSpan, CancellationToken.None); + if (async) + { + await stream.ReadBytesAsync(OperationContext.NoTimeout, destination, offset, 1, Timeout.InfiniteTimeSpan); + } + else + { + stream.ReadBytes(OperationContext.NoTimeout, destination, offset, 1, Timeout.InfiniteTimeSpan); + } destination.AccessBackingBytes(0).Array.Should().Equal(expectedBytes); } [Theory] - [InlineData(1, new[] { 3 })] - [InlineData(2, new[] { 1, 2 })] - [InlineData(3, new[] { 2, 1 })] - [InlineData(4, new[] { 1, 1, 1 })] - public async Task ReadBytesAsync_with_byte_buffer_should_have_expected_effect_for_partial_reads(int testCase, int[] partition) + [InlineData(true, 1, new[] { 3 })] + [InlineData(true, 2, new[] { 1, 2 })] + [InlineData(true, 3, new[] { 2, 1 })] + [InlineData(true, 4, new[] { 1, 1, 1 })] + [InlineData(false, 1, new[] { 3 })] + [InlineData(false, 2, new[] { 1, 2 })] + [InlineData(false, 3, new[] { 2, 1 })] + [InlineData(false, 4, new[] { 1, 1, 1 })] + public async Task ReadBytes_with_byte_buffer_should_have_expected_effect_for_partial_reads(bool async, int testCase, int[] partition) { var bytes = new byte[] { 1, 2, 3 }; var mockStream = new Mock<Stream>(); var destination = new ByteArrayBuffer(new byte[3], 3); var n = 0; var position = 0; - mockStream.Setup(s => s.ReadAsync(It.IsAny<byte[]>(), It.IsAny<int>(), It.IsAny<int>(), It.IsAny<CancellationToken>())) - .Returns((byte[] buffer, int offset, int count, CancellationToken cancellationToken) => - { - var length = partition[n++]; - Buffer.BlockCopy(bytes, position, buffer, offset, length); - position += length; - return Task.FromResult(length); - }); + Task<int> ReadPartial (byte[] buffer, int offset, int count) + { + var length = partition[n++]; + Buffer.BlockCopy(bytes, position, buffer, offset, length); + position += length; + return Task.FromResult(length); + } - await mockStream.Object.ReadBytesAsync(destination, 0, 3, Timeout.InfiniteTimeSpan, CancellationToken.None); + mockStream.Setup(s => s.ReadAsync(It.IsAny<byte[]>(), It.IsAny<int>(), It.IsAny<int>(), It.IsAny<CancellationToken>())) + .Returns((byte[] buffer, int offset, int count, CancellationToken cancellationToken) => ReadPartial(buffer, offset, count)); + mockStream.Setup(s => s.BeginRead(It.IsAny<byte[]>(), It.IsAny<int>(), It.IsAny<int>(), It.IsAny<AsyncCallback>(), It.IsAny<object>())) + .Returns((byte[] buffer, int offset, int count, AsyncCallback callback, object state) => ReadPartial(buffer, offset, count)); + mockStream.Setup(s => s.EndRead(It.IsAny<IAsyncResult>())) + .Returns<IAsyncResult>(x => ((Task<int>)x).GetAwaiter().GetResult()); + + if (async) + { + await mockStream.Object.ReadBytesAsync(OperationContext.NoTimeout, destination, 0, 3, Timeout.InfiniteTimeSpan); + } + else + { + mockStream.Object.ReadBytes(OperationContext.NoTimeout, destination, 0, 3, Timeout.InfiniteTimeSpan); + } destination.AccessBackingBytes(0).Array.Should().Equal(bytes); } - [Fact] - public void ReadBytesAsync_with_byte_buffer_should_throw_when_end_of_stream_is_reached() + [Theory] + [ParameterAttributeData] + public async Task ReadBytes_with_byte_buffer_should_throw_when_end_of_stream_is_reached([Values(true, false)]bool async) { var mockStream = new Mock<Stream>(); var destination = CreateMockByteBuffer(1).Object; - mockStream.Setup(s => s.ReadAsync(It.IsAny<byte[]>(), 0, 1, It.IsAny<CancellationToken>())).Returns(Task.FromResult(0)); + mockStream.Setup(s => s.ReadAsync(It.IsAny<byte[]>(), 0, 1, It.IsAny<CancellationToken>())) + .ReturnsAsync(0); + mockStream.Setup(s => s.BeginRead(It.IsAny<byte[]>(), It.IsAny<int>(), It.IsAny<int>(), It.IsAny<AsyncCallback>(), It.IsAny<object>())) + .Returns(Task.FromResult(0)); - Func<Task> action = () => mockStream.Object.ReadBytesAsync(destination, 0, 1, Timeout.InfiniteTimeSpan, CancellationToken.None); + var exception = async ? + await Record.ExceptionAsync(() => mockStream.Object.ReadBytesAsync(OperationContext.NoTimeout, destination, 0, 1, Timeout.InfiniteTimeSpan)) : + Record.Exception(() => mockStream.Object.ReadBytes(OperationContext.NoTimeout, destination, 0, 1, Timeout.InfiniteTimeSpan)); - action.ShouldThrow<EndOfStreamException>(); + exception.Should().BeOfType<EndOfStreamException>(); } - [Fact] - public void ReadBytesAsync_with_byte_buffer_should_throw_when_buffer_is_null() + [Theory] + [ParameterAttributeData] + public async Task ReadBytes_with_byte_buffer_should_throw_when_buffer_is_null([Values(true, false)]bool async) { var stream = new Mock<Stream>().Object; IByteBuffer destination = null; - Func<Task> action = () => stream.ReadBytesAsync(destination, 0, 0, Timeout.InfiniteTimeSpan, CancellationToken.None); + var exception = async ? + await Record.ExceptionAsync(() => stream.ReadBytesAsync(OperationContext.NoTimeout, destination, 0, 0, Timeout.InfiniteTimeSpan)) : + Record.Exception(() => stream.ReadBytes(OperationContext.NoTimeout, destination, 0, 0, Timeout.InfiniteTimeSpan)); - action.ShouldThrow<ArgumentNullException>().And.ParamName.Should().Be("buffer"); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("buffer"); } [Theory] - [InlineData(0, -1)] - [InlineData(1, 2)] - [InlineData(2, 1)] - public void ReadBytesAsync_with_byte_buffer_should_throw_when_count_is_invalid(int offset, int count) + [InlineData(true, 0, -1)] + [InlineData(true, 1, 2)] + [InlineData(true, 2, 1)] + [InlineData(false, 0, -1)] + [InlineData(false, 1, 2)] + [InlineData(false, 2, 1)] + public async Task ReadBytes_with_byte_buffer_should_throw_when_count_is_invalid(bool async, int offset, int count) { var stream = new Mock<Stream>().Object; var destination = CreateMockByteBuffer(2).Object; - Func<Task> action = () => stream.ReadBytesAsync(destination, offset, count, Timeout.InfiniteTimeSpan, CancellationToken.None); + var exception = async ? + await Record.ExceptionAsync(() => stream.ReadBytesAsync(OperationContext.NoTimeout, destination, offset, count, Timeout.InfiniteTimeSpan)) : + Record.Exception(() => stream.ReadBytes(OperationContext.NoTimeout, destination, offset, count, Timeout.InfiniteTimeSpan)); - action.ShouldThrow<ArgumentOutOfRangeException>().And.ParamName.Should().Be("count"); + exception.Should().BeOfType<ArgumentOutOfRangeException>().Subject + .ParamName.Should().Be("count"); } [Theory] [ParameterAttributeData] - public void ReadBytesAsync_with_byte_buffer_should_throw_when_offset_is_invalid( - [Values(-1, 3)] - int offset) + public async Task ReadBytes_with_byte_buffer_should_throw_when_offset_is_invalid( + [Values(true, false)] bool async, + [Values(-1, 3)]int offset) { var stream = new Mock<Stream>().Object; var destination = CreateMockByteBuffer(2).Object; - Func<Task> action = () => stream.ReadBytesAsync(destination, offset, 0, Timeout.InfiniteTimeSpan, CancellationToken.None); + var exception = async ? + await Record.ExceptionAsync(() => stream.ReadBytesAsync(OperationContext.NoTimeout, destination, offset, 0, Timeout.InfiniteTimeSpan)) : + Record.Exception(() => stream.ReadBytes(OperationContext.NoTimeout, destination, offset, 0, Timeout.InfiniteTimeSpan)); - action.ShouldThrow<ArgumentOutOfRangeException>().And.ParamName.Should().Be("offset"); + exception.Should().BeOfType<ArgumentOutOfRangeException>().Subject + .ParamName.Should().Be("offset"); } - [Fact] - public void ReadBytesAsync_with_byte_buffer_should_throw_when_stream_is_null() + [Theory] + [ParameterAttributeData] + public async Task ReadBytes_with_byte_buffer_should_throw_when_stream_is_null([Values(true, false)]bool async) { Stream stream = null; var destination = new Mock<IByteBuffer>().Object; - Func<Task> action = () => stream.ReadBytesAsync(destination, 0, 0, Timeout.InfiniteTimeSpan, CancellationToken.None); + var exception = async ? + await Record.ExceptionAsync(() => stream.ReadBytesAsync(OperationContext.NoTimeout, destination, 0, 0, Timeout.InfiniteTimeSpan)) : + Record.Exception(() => stream.ReadBytes(OperationContext.NoTimeout, destination, 0, 0, Timeout.InfiniteTimeSpan)); - action.ShouldThrow<ArgumentNullException>().And.ParamName.Should().Be("stream"); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("stream"); } [Theory] - [InlineData(0, new byte[] { })] - [InlineData(1, new byte[] { 1 })] - [InlineData(2, new byte[] { 1, 2 })] - public async Task WriteBytesAsync_should_have_expected_effect_for_count(int count, byte[] expectedBytes) + [InlineData(true, 0, new byte[] { })] + [InlineData(true, 1, new byte[] { 1 })] + [InlineData(true, 2, new byte[] { 1, 2 })] + [InlineData(false, 0, new byte[] { })] + [InlineData(false, 1, new byte[] { 1 })] + [InlineData(false, 2, new byte[] { 1, 2 })] + public async Task WriteBytes_should_have_expected_effect_for_count(bool async, int count, byte[] expectedBytes) { var stream = new MemoryStream(); var source = new ByteArrayBuffer(new byte[] { 1, 2 }); - await stream.WriteBytesAsync(source, 0, count, Timeout.InfiniteTimeSpan, CancellationToken.None); + if (async) + { + await stream.WriteBytesAsync(OperationContext.NoTimeout, source, 0, count, Timeout.InfiniteTimeSpan); + } + else + { + stream.WriteBytes(OperationContext.NoTimeout, source, 0, count, Timeout.InfiniteTimeSpan); + } stream.ToArray().Should().Equal(expectedBytes); } [Theory] - [InlineData(1, new byte[] { 2 })] - [InlineData(2, new byte[] { 3 })] - public async Task WriteBytesAsync_should_have_expected_effect_for_offset(int offset, byte[] expectedBytes) + [InlineData(true, 1, new byte[] { 2 })] + [InlineData(true, 2, new byte[] { 3 })] + [InlineData(false, 1, new byte[] { 2 })] + [InlineData(false, 2, new byte[] { 3 })] + public async Task WriteBytes_should_have_expected_effect_for_offset(bool async, int offset, byte[] expectedBytes) { var stream = new MemoryStream(); var source = new ByteArrayBuffer(new byte[] { 1, 2, 3 }); - await stream.WriteBytesAsync(source, offset, 1, Timeout.InfiniteTimeSpan, CancellationToken.None); + if (async) + { + await stream.WriteBytesAsync(OperationContext.NoTimeout, source, offset, 1, Timeout.InfiniteTimeSpan); + } + else + { + stream.WriteBytes(OperationContext.NoTimeout, source, offset, 1, Timeout.InfiniteTimeSpan); + } stream.ToArray().Should().Equal(expectedBytes); } [Theory] - [InlineData(1, new[] { 3 })] - [InlineData(2, new[] { 1, 2 })] - [InlineData(3, new[] { 2, 1 })] - [InlineData(4, new[] { 1, 1, 1 })] - public async Task WriteBytesAsync_should_have_expected_effect_for_partial_writes(int testCase, int[] partition) + [InlineData(true, 1, new[] { 3 })] + [InlineData(true, 2, new[] { 1, 2 })] + [InlineData(true, 3, new[] { 2, 1 })] + [InlineData(true, 4, new[] { 1, 1, 1 })] + [InlineData(false, 1, new[] { 3 })] + [InlineData(false, 2, new[] { 1, 2 })] + [InlineData(false, 3, new[] { 2, 1 })] + [InlineData(false, 4, new[] { 1, 1, 1 })] + public async Task WriteBytes_should_have_expected_effect_for_partial_writes(bool async, int testCase, int[] partition) { var stream = new MemoryStream(); var mockSource = new Mock<IByteBuffer>(); @@ -310,58 +448,82 @@ public async Task WriteBytesAsync_should_have_expected_effect_for_partial_writes return new ArraySegment<byte>(bytes, position, length); }); - await stream.WriteBytesAsync(mockSource.Object, 0, 3, Timeout.InfiniteTimeSpan, CancellationToken.None); + if (async) + { + await stream.WriteBytesAsync(OperationContext.NoTimeout, mockSource.Object, 0, 3, Timeout.InfiniteTimeSpan); + } + else + { + stream.WriteBytes(OperationContext.NoTimeout, mockSource.Object, 0, 3, Timeout.InfiniteTimeSpan); + } stream.ToArray().Should().Equal(bytes); } - [Fact] - public void WriteBytesAsync_should_throw_when_buffer_is_null() + [Theory] + [ParameterAttributeData] + public async Task WriteBytes_should_throw_when_buffer_is_null([Values(true, false)]bool async) { var stream = new Mock<Stream>().Object; - Func<Task> action = () => stream.WriteBytesAsync(null, 0, 0, Timeout.InfiniteTimeSpan, CancellationToken.None); + var exception = async ? + await Record.ExceptionAsync(() => stream.WriteBytesAsync(OperationContext.NoTimeout, null, 0, 0, Timeout.InfiniteTimeSpan)) : + Record.Exception(() => stream.WriteBytes(OperationContext.NoTimeout, null, 0, 0, Timeout.InfiniteTimeSpan)); - action.ShouldThrow<ArgumentNullException>().And.ParamName.Should().Be("buffer"); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("buffer"); } [Theory] - [InlineData(0, -1)] - [InlineData(1, 2)] - [InlineData(2, 1)] - public void WriteBytesAsync_should_throw_when_count_is_invalid(int offset, int count) + [InlineData(true, 0, -1)] + [InlineData(true, 1, 2)] + [InlineData(true, 2, 1)] + [InlineData(false, 0, -1)] + [InlineData(false, 1, 2)] + [InlineData(false, 2, 1)] + public async Task WriteBytes_should_throw_when_count_is_invalid(bool async, int offset, int count) { var stream = new Mock<Stream>().Object; var source = CreateMockByteBuffer(2).Object; - Func<Task> action = () => stream.WriteBytesAsync(source, offset, count, Timeout.InfiniteTimeSpan, CancellationToken.None); + var exception = async ? + await Record.ExceptionAsync(() => stream.WriteBytesAsync(OperationContext.NoTimeout, source, offset, count, Timeout.InfiniteTimeSpan)) : + Record.Exception(() => stream.WriteBytes(OperationContext.NoTimeout, source, offset, count, Timeout.InfiniteTimeSpan)); - action.ShouldThrow<ArgumentOutOfRangeException>().And.ParamName.Should().Be("count"); + exception.Should().BeOfType<ArgumentOutOfRangeException>().Subject + .ParamName.Should().Be("count"); } [Theory] [ParameterAttributeData] - public void WriteBytesAsync_should_throw_when_offset_is_invalid( - [Values(-1, 3)] - int offset) + public async Task WriteBytes_should_throw_when_offset_is_invalid( + [Values(true, false)]bool async, + [Values(-1, 3)]int offset) { var stream = new Mock<Stream>().Object; - var destination = CreateMockByteBuffer(2).Object; + var source = CreateMockByteBuffer(2).Object; - Func<Task> action = () => stream.WriteBytesAsync(destination, offset, 0, Timeout.InfiniteTimeSpan, CancellationToken.None); + var exception = async ? + await Record.ExceptionAsync(() => stream.WriteBytesAsync(OperationContext.NoTimeout, source, offset, 0, Timeout.InfiniteTimeSpan)) : + Record.Exception(() => stream.WriteBytes(OperationContext.NoTimeout, source, offset, 0, Timeout.InfiniteTimeSpan)); - action.ShouldThrow<ArgumentOutOfRangeException>().And.ParamName.Should().Be("offset"); + exception.Should().BeOfType<ArgumentOutOfRangeException>().Subject + .ParamName.Should().Be("offset"); } - [Fact] - public void WriteBytesAsync_should_throw_when_stream_is_null() + [Theory] + [ParameterAttributeData] + public async Task WriteBytes_should_throw_when_stream_is_null([Values(true, false)]bool async) { Stream stream = null; var source = new Mock<IByteBuffer>().Object; - Func<Task> action = () => stream.WriteBytesAsync(source, 0, 0, Timeout.InfiniteTimeSpan, CancellationToken.None); + var exception = async ? + await Record.ExceptionAsync(() => stream.WriteBytesAsync(OperationContext.NoTimeout, source, 0, 0, Timeout.InfiniteTimeSpan)) : + Record.Exception(() => stream.WriteBytes(OperationContext.NoTimeout, source, 0, 0, Timeout.InfiniteTimeSpan)); - action.ShouldThrow<ArgumentNullException>().And.ParamName.Should().Be("stream"); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("stream"); } // helper methods diff --git a/tests/MongoDB.Driver.Tests/Core/Misc/TaskExtensionsTests.cs b/tests/MongoDB.Driver.Tests/Core/Misc/TaskExtensionsTests.cs new file mode 100644 index 00000000000..f3799318b5b --- /dev/null +++ b/tests/MongoDB.Driver.Tests/Core/Misc/TaskExtensionsTests.cs @@ -0,0 +1,191 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using System.Threading; +using System.Threading.Tasks; +using FluentAssertions; +using MongoDB.TestHelpers.XunitExtensions; +using Xunit; + +namespace MongoDB.Driver.Core.Misc +{ + public class TaskExtensionsTests + { + [Theory] + [ParameterAttributeData] + public async Task WaitAsync_should_throw_on_negative_timeout([Values(true, false)] bool isPromiseTask) + { + var task = CreateSubject(isPromiseTask); + + var exception = await Record.ExceptionAsync(() => task.WaitAsync(TimeSpan.FromSeconds(-42), CancellationToken.None)); + + exception.Should().BeOfType<ArgumentOutOfRangeException>(); + } + + [Theory] + [ParameterAttributeData] + public async Task WaitAsync_should_work_for_task([Values(true, false)] bool isPromiseTask) + { + var task = CreateSubject(isPromiseTask); + + await task.WaitAsync(Timeout.InfiniteTimeSpan, CancellationToken.None); + + task.IsCompleted.Should().BeTrue(); + } + + [Theory] + [ParameterAttributeData] + public async Task WaitAsync_should_rethrow_for_failed_task([Values(true, false)] bool isPromiseTask) + { + var ex = new InvalidOperationException(); + var task = CreateSubject(isPromiseTask, ex); + + var exception = await Record.ExceptionAsync(() => task.WaitAsync(Timeout.InfiniteTimeSpan, CancellationToken.None)); + + exception.Should().Be(ex); + } + + [Fact] + public async Task WaitAsync_should_throw_on_cancellation() + { + var task = CreateSubject(true); + using var cts = new CancellationTokenSource(5); + + var exception = await Record.ExceptionAsync(() => task.WaitAsync(Timeout.InfiniteTimeSpan, cts.Token)); + + task.IsCompleted.Should().BeFalse(); + exception.Should().BeOfType<TaskCanceledException>(); + } + + [Fact] + public async Task WaitAsync_should_throw_on_timeout() + { + var task = CreateSubject(true); + + var exception = await Record.ExceptionAsync(() => task.WaitAsync(TimeSpan.FromMilliseconds(5), CancellationToken.None)); + + task.IsCompleted.Should().BeFalse(); + exception.Should().BeOfType<TimeoutException>(); + } + + [Theory] + [ParameterAttributeData] + public async Task WaitAsyncTResult_should_throw_on_negative_timeout([Values(true, false)] bool isPromiseTask) + { + var task = CreateSubject(42, isPromiseTask); + + var exception = await Record.ExceptionAsync(() => task.WaitAsync(TimeSpan.FromSeconds(-42), CancellationToken.None)); + + exception.Should().BeOfType<ArgumentOutOfRangeException>(); + } + + [Theory] + [ParameterAttributeData] + public async Task WaitAsyncTResult_should_not_throw_on_infinite_timeout([Values(true, false)] bool isPromiseTask) + { + var task = CreateSubject(42, isPromiseTask); + + await task.WaitAsync(Timeout.InfiniteTimeSpan, CancellationToken.None); + + task.IsCompleted.Should().BeTrue(); + } + + [Theory] + [ParameterAttributeData] + public async Task WaitAsyncTResult_should_work_for_task([Values(true, false)] bool isPromiseTask) + { + var task = CreateSubject(42, isPromiseTask); + + var result = await task.WaitAsync(Timeout.InfiniteTimeSpan, CancellationToken.None); + + result.Should().Be(42); + } + + [Theory] + [ParameterAttributeData] + public async Task WaitAsyncTResult_should_rethrow_for_failed_task([Values(true, false)] bool isPromiseTask) + { + var ex = new InvalidOperationException(); + var task = CreateSubject(42, isPromiseTask, ex); + + var exception = await Record.ExceptionAsync(() => task.WaitAsync(Timeout.InfiniteTimeSpan, CancellationToken.None)); + + exception.Should().Be(ex); + } + + [Fact] + public async Task WaitAsyncTResult_should_throw_on_cancellation() + { + var task = CreateSubject(42, true); + using var cts = new CancellationTokenSource(5); + + var exception = await Record.ExceptionAsync(() => task.WaitAsync(Timeout.InfiniteTimeSpan, cts.Token)); + + task.IsCompleted.Should().BeFalse(); + exception.Should().BeOfType<TaskCanceledException>(); + } + + [Fact] + public async Task WaitAsyncTResult_should_throw_on_timeout() + { + var task = CreateSubject(42, true); + + var exception = await Record.ExceptionAsync(() => task.WaitAsync(TimeSpan.FromMilliseconds(5), CancellationToken.None)); + + task.IsCompleted.Should().BeFalse(); + exception.Should().BeOfType<TimeoutException>(); + } + + private Task CreateSubject(bool isPromise, Exception exception = null) + { + if (exception == null) + { + return isPromise ? Task.Delay(50) : Task.CompletedTask; + } + + return isPromise ? + Task.Delay(50).ContinueWith(_ => throw exception) : + Task.FromException(exception); + } + + private Task<TResult> CreateSubject<TResult>(TResult result, bool isPromise, Exception exception = null) + { + var tcs = new TaskCompletionSource<TResult>(); + if (isPromise) + { + Task.Delay(50).ContinueWith(_ => + { + if (exception == null) + { + tcs.TrySetResult(result); + } + else + { + tcs.SetException(exception); + } + }); + } + + if (exception == null) + { + return isPromise ? tcs.Task : Task.FromResult(result); + } + + return isPromise ? tcs.Task : Task.FromException<TResult>(exception); + } + } +} + diff --git a/tests/MongoDB.Driver.Tests/Core/Misc/WireVersionTests.cs b/tests/MongoDB.Driver.Tests/Core/Misc/WireVersionTests.cs index 7c94537bc7b..9dc3d5538ae 100644 --- a/tests/MongoDB.Driver.Tests/Core/Misc/WireVersionTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Misc/WireVersionTests.cs @@ -24,6 +24,7 @@ namespace MongoDB.Driver.Core.Tests.Core.Misc public class WireVersionTests { [Fact] + [Trait("Category", "Integration")] public void Server_maxWireVersion_should_be_in_supported_range() { RequireServer.Check().StableServer(stable: true); @@ -46,7 +47,7 @@ public void GetServerVersionForErrorMessage_should_return_expected_serverVersion [Fact] public void SupportedWireRange_should_be_correct() { - WireVersion.SupportedWireVersionRange.Should().Be(new Range<int>(7, 27)); + WireVersion.SupportedWireVersionRange.Should().Be(new Range<int>(8, 28)); } [Fact] @@ -59,7 +60,8 @@ public void ToServerVersion_should_throw_if_wireVersion_less_than_0() [Theory] [InlineData(99, null, null)] - [InlineData(28, null, null)] + [InlineData(29, null, null)] + [InlineData(28, 8, 3)] [InlineData(27, 8, 2)] [InlineData(26, 8, 1)] [InlineData(25, 8, 0)] diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/AggregateOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/AggregateOperationTests.cs index 9e20b7d8fa0..2a1605a9890 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/AggregateOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/AggregateOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -308,7 +308,7 @@ public void CreateCommand_should_return_the_expected_result() var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -334,7 +334,7 @@ public void CreateCommand_should_return_the_expected_result_when_AllowDiskUse_is var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -359,7 +359,7 @@ public void CreateCommand_should_return_the_expected_result_when_BatchSize_is_se var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var cursor = new BsonDocument { @@ -389,7 +389,7 @@ public void CreateCommand_should_return_the_expected_result_when_Collation_is_se var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -415,7 +415,7 @@ public void CreateCommand_should_return_expected_result_when_Comment_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -442,7 +442,7 @@ public void CreateCommand_should_return_the_expected_result_when_Hint_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -469,7 +469,7 @@ public void CreateCommand_should_return_expected_result_when_Let_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(Feature.AggregateOptionsLet.FirstSupportedWireVersion); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -497,7 +497,7 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -510,6 +510,24 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long result["maxTimeMS"].BsonType.Should().Be(BsonType.Int32); } + [Theory] + [InlineData(42)] + [InlineData(-1)] + public void CreateCommand_should_ignore_maxtime_if_timeout_specified(int timeoutMs) + { + var subject = new AggregateOperation<BsonDocument>(_collectionNamespace, __pipeline, __resultSerializer, _messageEncoderSettings) + { + MaxTime = TimeSpan.FromTicks(10) + }; + var connectionDescription = OperationTestHelper.CreateConnectionDescription(); + var session = OperationTestHelper.CreateSession(); + + var operationContext = new OperationContext(TimeSpan.FromMilliseconds(timeoutMs), CancellationToken.None); + var result = subject.CreateCommand(operationContext, session, connectionDescription); + + result.Should().NotContain("maxTimeMS"); + } + [Theory] [ParameterAttributeData] public void CreateCommand_should_return_the_expected_result_when_ReadConcern_is_set( @@ -525,7 +543,7 @@ public void CreateCommand_should_return_the_expected_result_when_ReadConcern_is_ var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -552,7 +570,7 @@ public void CreateCommand_should_return_the_expected_result_when_using_causal_co var connectionDescription = OperationTestHelper.CreateConnectionDescription(supportsSessions: true); var session = OperationTestHelper.CreateSession(true, new BsonTimestamp(100)); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedReadConcernDocument = readConcern.ToBsonDocument(); expectedReadConcernDocument["afterClusterTime"] = new BsonTimestamp(100); @@ -592,16 +610,7 @@ public void Execute_should_throw_when_binding_is_null( { var subject = new AggregateOperation<BsonDocument>(_collectionNamespace, __pipeline, __resultSerializer, _messageEncoderSettings); - Exception exception; - if (async) - { - exception = Record.Exception(() => subject.ExecuteAsync(binding: null, cancellationToken: CancellationToken.None).GetAwaiter().GetResult()); - } - else - { - exception = Record.Exception(() => subject.Execute(binding: null, cancellationToken: CancellationToken.None)); - } - + var exception = Record.Exception(() => ExecuteOperation(subject, binding: null, async: async)); var argumentNullException = exception.Should().BeOfType<ArgumentNullException>().Subject; argumentNullException.ParamName.Should().Be("binding"); } diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/AggregateToCollectionOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/AggregateToCollectionOperationTests.cs index b45edc47149..024fc2a5d1c 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/AggregateToCollectionOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/AggregateToCollectionOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ using System.Collections.Generic; using System.Linq; using System.Net; +using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; @@ -306,7 +307,7 @@ public void CreateCommand_should_return_expected_result() var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -330,7 +331,7 @@ public void CreateCommand_should_return_expected_result_when_AllowDiskUse_is_set var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -355,7 +356,7 @@ public void CreateCommand_should_return_expected_result_when_BypassDocumentValid var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -381,7 +382,7 @@ public void CreateCommand_should_return_expected_result_when_Collation_is_set( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -406,7 +407,7 @@ public void CreateCommand_should_return_expected_result_when_Comment_is_set( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -432,7 +433,7 @@ public void CreateCommand_should_return_the_expected_result_when_Hint_is_set( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -458,7 +459,7 @@ public void CreateCommand_should_return_the_expected_result_when_Let_is_set( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -486,7 +487,7 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -499,6 +500,24 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long result["maxTimeMS"].BsonType.Should().Be(BsonType.Int32); } + [Theory] + [InlineData(42)] + [InlineData(-1)] + public void CreateCommand_should_ignore_maxtime_if_timeout_specified(int timeoutMs) + { + var subject = new AggregateToCollectionOperation(_collectionNamespace, __pipeline, _messageEncoderSettings) + { + MaxTime = TimeSpan.FromTicks(10) + }; + var session = OperationTestHelper.CreateSession(); + var connectionDescription = OperationTestHelper.CreateConnectionDescription(); + + var operationContext = new OperationContext(TimeSpan.FromMilliseconds(timeoutMs), CancellationToken.None); + var result = subject.CreateCommand(operationContext, session, connectionDescription); + + result.Should().NotContain("maxTimeMS"); + } + [Theory] [ParameterAttributeData] public void CreateCommand_should_return_expected_result_when_ReadConcern_is_set( @@ -513,7 +532,7 @@ public void CreateCommand_should_return_expected_result_when_ReadConcern_is_set( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -539,7 +558,7 @@ public void CreateCommand_should_return_expected_result_when_WriteConcern_is_set var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/AsyncCursorEnumerableOneTimeAdapterTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/AsyncCursorEnumerableOneTimeAdapterTests.cs index c8438ee7c40..67fff836641 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/AsyncCursorEnumerableOneTimeAdapterTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/AsyncCursorEnumerableOneTimeAdapterTests.cs @@ -15,6 +15,7 @@ using System; using System.Threading; +using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; using Moq; @@ -32,6 +33,37 @@ public void constructor_should_throw_when_cursor_is_null() action.ShouldThrow<ArgumentNullException>().And.ParamName.Should().Be("cursor"); } + [Fact] + public async Task GetAsyncEnumerator_should_return_expected_result() + { + var mockCursor = new Mock<IAsyncCursor<BsonDocument>>(); + mockCursor.SetupSequence(c => c.MoveNextAsync(CancellationToken.None)).ReturnsAsync(true).ReturnsAsync(false); + mockCursor.Setup(c => c.Current).Returns(new[] { new BsonDocument("_id", 0) }); + var subject = new AsyncCursorEnumerableOneTimeAdapter<BsonDocument>(mockCursor.Object, CancellationToken.None); + + var result = subject.GetAsyncEnumerator(); + + var result1 = await result.MoveNextAsync(); + result1.Should().BeTrue(); + + result.Current.Should().Be(new BsonDocument("_id", 0)); + + var result2 = await result.MoveNextAsync(); + result2.Should().BeFalse(); + } + + [Fact] + public void GetAsyncEnumerator_should_throw_when_called_more_than_once() + { + var mockCursor = new Mock<IAsyncCursor<BsonDocument>>(); + var subject = new AsyncCursorEnumerableOneTimeAdapter<BsonDocument>(mockCursor.Object, CancellationToken.None); + subject.GetAsyncEnumerator(); + + var exception = Record.Exception(() => subject.GetAsyncEnumerator()); + + exception.Should().BeOfType<InvalidOperationException>(); + } + [Fact] public void GetEnumerator_should_return_expected_result() { diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/AsyncCursorEnumeratorTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/AsyncCursorEnumeratorTests.cs index e6cd9061a75..815fc0f1ae1 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/AsyncCursorEnumeratorTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/AsyncCursorEnumeratorTests.cs @@ -16,11 +16,13 @@ using System; using System.Linq; using System.Threading; +using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.WireProtocol.Messages.Encoders; +using MongoDB.TestHelpers.XunitExtensions; using Moq; using Xunit; @@ -105,29 +107,51 @@ public void Current_should_throw_when_subject_has_been_disposed() action.ShouldThrow<ObjectDisposedException>(); } - [Fact] - public void Dispose_should_dispose_cursor() + [Theory] + [ParameterAttributeData] + public async Task Dispose_should_dispose_cursor( + [Values(false, true)] bool async) { var mockCursor = new Mock<IAsyncCursor<BsonDocument>>(); var subject = new AsyncCursorEnumerator<BsonDocument>(mockCursor.Object, CancellationToken.None); - subject.Dispose(); + if (async) + { + await subject.DisposeAsync(); + } + else + { + subject.Dispose(); + } mockCursor.Verify(c => c.Dispose(), Times.Once); } - [Fact] - public void MoveNext_should_return_expected_result() + [Theory] + [ParameterAttributeData] + public async Task MoveNext_should_return_expected_result( + [Values(false, true)] bool async) { var subject = CreateSubject(2); - subject.MoveNext().Should().BeTrue(); - subject.MoveNext().Should().BeTrue(); - subject.MoveNext().Should().BeFalse(); + if (async) + { + (await subject.MoveNextAsync()).Should().BeTrue(); + (await subject.MoveNextAsync()).Should().BeTrue(); + (await subject.MoveNextAsync()).Should().BeFalse(); + } + else + { + subject.MoveNext().Should().BeTrue(); + subject.MoveNext().Should().BeTrue(); + subject.MoveNext().Should().BeFalse(); + } } - [Fact] - public void MoveNext_should_return_expected_result_when_there_are_two_batches() + [Theory] + [ParameterAttributeData] + public async Task MoveNext_should_return_expected_result_when_there_are_two_batches( + [Values(false, true)] bool async) { var mockCursor = new Mock<IAsyncCursor<BsonDocument>>(); var firstBatch = new[] @@ -139,25 +163,63 @@ public void MoveNext_should_return_expected_result_when_there_are_two_batches() { new BsonDocument("_id", 2) }; - mockCursor.SetupSequence(c => c.MoveNext(CancellationToken.None)).Returns(true).Returns(true).Returns(false); + + if (async) + { + mockCursor.SetupSequence(c => c.MoveNextAsync(CancellationToken.None)) + .ReturnsAsync(true) + .ReturnsAsync(true) + .ReturnsAsync(false); + } + else + { + mockCursor.SetupSequence(c => c.MoveNext(CancellationToken.None)) + .Returns(true) + .Returns(true) + .Returns(false); + } + mockCursor.SetupSequence(c => c.Current).Returns(firstBatch).Returns(secondBatch); var subject = new AsyncCursorEnumerator<BsonDocument>(mockCursor.Object, CancellationToken.None); - subject.MoveNext().Should().BeTrue(); - subject.MoveNext().Should().BeTrue(); - subject.MoveNext().Should().BeTrue(); - subject.MoveNext().Should().BeFalse(); + if (async) + { + (await subject.MoveNextAsync()).Should().BeTrue(); + (await subject.MoveNextAsync()).Should().BeTrue(); + (await subject.MoveNextAsync()).Should().BeTrue(); + (await subject.MoveNextAsync()).Should().BeFalse(); + } + else + { + subject.MoveNext().Should().BeTrue(); + subject.MoveNext().Should().BeTrue(); + subject.MoveNext().Should().BeTrue(); + subject.MoveNext().Should().BeFalse(); + } } - [Fact] - public void MoveNext_should_throw_when_subject_has_been_disposed() + [Theory] + [ParameterAttributeData] + public async Task MoveNext_should_throw_when_subject_has_been_disposed( + [Values(false, true)] bool async) { var subject = CreateSubject(0); - subject.Dispose(); - Action action = () => subject.MoveNext(); + Exception exception; + if (async) + { + await subject.DisposeAsync(); - action.ShouldThrow<ObjectDisposedException>(); + exception = await Record.ExceptionAsync(async () => await subject.MoveNextAsync()); + } + else + { + subject.Dispose(); + + exception = Record.Exception(() => subject.MoveNext()); + } + + exception.Should().BeOfType<ObjectDisposedException>(); } [Fact] diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/AsyncCursorSourceEnumeratorTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/AsyncCursorSourceEnumeratorTests.cs new file mode 100644 index 00000000000..1170d8368c3 --- /dev/null +++ b/tests/MongoDB.Driver.Tests/Core/Operations/AsyncCursorSourceEnumeratorTests.cs @@ -0,0 +1,194 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using FluentAssertions; +using MongoDB.Bson; +using MongoDB.Bson.Serialization.Serializers; +using MongoDB.Driver.Core.Bindings; +using MongoDB.Driver.Core.Operations; +using MongoDB.Driver.Core.WireProtocol.Messages.Encoders; +using Moq; +using Xunit; + +namespace MongoDB.Driver.Tests.Core.Operations +{ + public class AsyncCursorSourceEnumeratorTests + { + [Fact] + public void Constructor_should_throw_when_cursorSource_is_null() + { + var exception = Record.Exception(() => new AsyncCursorSourceEnumerator<BsonDocument>(null, CancellationToken.None)); + + exception.Should().BeOfType<ArgumentNullException>(); + } + + [Fact] + public void Current_should_throw_when_enumeration_has_not_started() + { + var source = CreateCursorSource(1); + var enumerator = new AsyncCursorSourceEnumerator<BsonDocument>(source, CancellationToken.None); + + var exception = Record.Exception(() => enumerator.Current); + + exception.Should().BeOfType<InvalidOperationException>() + .Which.Message.Should().Contain("Enumeration has not started"); + } + + [Fact] + public async Task Current_should_return_expected_document_after_MoveNextAsync() + { + var source = CreateCursorSource(1); + var enumerator = new AsyncCursorSourceEnumerator<BsonDocument>(source, CancellationToken.None); + var expectedDocument = new BsonDocument("_id", 0); + + await enumerator.MoveNextAsync(); + var result = enumerator.Current; + + result.Should().Be(expectedDocument); + } + + [Fact] + public async Task MoveNextAsync_should_execute_query_on_first_call() + { + var mockSource = new Mock<IAsyncCursorSource<BsonDocument>>(); + var cursor = CreateCursor(1); + mockSource.Setup(s => s.ToCursorAsync(It.IsAny<CancellationToken>())) + .ReturnsAsync(cursor); + + var enumerator = new AsyncCursorSourceEnumerator<BsonDocument>(mockSource.Object, CancellationToken.None); + + // Query should not execute until first MoveNextAsync call + mockSource.Verify(s => s.ToCursorAsync(It.IsAny<CancellationToken>()), Times.Never); + + await enumerator.MoveNextAsync(); + + // Query should execute exactly once + mockSource.Verify(s => s.ToCursorAsync(It.IsAny<CancellationToken>()), Times.Once); + } + + [Fact] + public async Task MoveNextAsync_should_not_execute_query_on_subsequent_calls() + { + var mockSource = new Mock<IAsyncCursorSource<BsonDocument>>(); + var cursor = CreateCursor(2); + mockSource.Setup(s => s.ToCursorAsync(It.IsAny<CancellationToken>())) + .ReturnsAsync(cursor); + + var enumerator = new AsyncCursorSourceEnumerator<BsonDocument>(mockSource.Object, CancellationToken.None); + + await enumerator.MoveNextAsync(); // First call + await enumerator.MoveNextAsync(); // Second call + + // Query should execute exactly once, not twice + mockSource.Verify(s => s.ToCursorAsync(It.IsAny<CancellationToken>()), Times.Once); + } + + [Fact] + public async Task MoveNextAsync_should_enumerate_all_documents() + { + var source = CreateCursorSource(3); + var enumerator = new AsyncCursorSourceEnumerator<BsonDocument>(source, CancellationToken.None); + var expectedDocuments = new[] + { + new BsonDocument("_id", 0), + new BsonDocument("_id", 1), + new BsonDocument("_id", 2) + }; + + var actualDocuments = new List<BsonDocument>(); + while (await enumerator.MoveNextAsync()) + { + actualDocuments.Add(enumerator.Current); + } + + actualDocuments.Should().Equal(expectedDocuments); + } + + [Fact] + public async Task MoveNextAsync_should_throw_when_disposed() + { + var source = CreateCursorSource(1); + var enumerator = new AsyncCursorSourceEnumerator<BsonDocument>(source, CancellationToken.None); + + await enumerator.DisposeAsync(); + + var exception = await Record.ExceptionAsync(async () => await enumerator.MoveNextAsync()); + + exception.Should().BeOfType<ObjectDisposedException>(); + } + + [Fact] + public async Task MoveNextAsync_should_respect_cancellation_token() + { + var mockSource = new Mock<IAsyncCursorSource<BsonDocument>>(); + var cts = new CancellationTokenSource(); + cts.Cancel(); // Cancel immediately + + mockSource.Setup(s => s.ToCursorAsync(It.IsAny<CancellationToken>())) + .ThrowsAsync(new OperationCanceledException()); + + var enumerator = new AsyncCursorSourceEnumerator<BsonDocument>(mockSource.Object, cts.Token); + + var exception = await Record.ExceptionAsync(async () => await enumerator.MoveNextAsync()); + + exception.Should().BeOfType<OperationCanceledException>(); + } + + [Fact] + public void Reset_should_throw_NotSupportedException() + { + var source = CreateCursorSource(1); + var enumerator = new AsyncCursorSourceEnumerator<BsonDocument>(source, CancellationToken.None); + + var exception = Record.Exception(() => enumerator.Reset()); + + exception.Should().BeOfType<NotSupportedException>(); + } + + // Helper methods + private IAsyncCursor<BsonDocument> CreateCursor(int count) + { + var firstBatch = Enumerable.Range(0, count) + .Select(i => new BsonDocument("_id", i)) + .ToArray(); + + return new AsyncCursor<BsonDocument>( + channelSource: new Mock<IChannelSource>().Object, + collectionNamespace: new CollectionNamespace("test", "collection"), + comment: null, + firstBatch: firstBatch, + cursorId: 0, + batchSize: null, + limit: null, + serializer: BsonDocumentSerializer.Instance, + messageEncoderSettings: new MessageEncoderSettings(), + maxTime: null); + } + + private IAsyncCursorSource<BsonDocument> CreateCursorSource(int count) + { + var mockCursorSource = new Mock<IAsyncCursorSource<BsonDocument>>(); + mockCursorSource.Setup(s => s.ToCursorAsync(It.IsAny<CancellationToken>())) + .ReturnsAsync(() => CreateCursor(count)); + + return mockCursorSource.Object; + } + } +} \ No newline at end of file diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/AsyncCursorTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/AsyncCursorTests.cs index 1f5be9d301d..804bf16939b 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/AsyncCursorTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/AsyncCursorTests.cs @@ -328,7 +328,7 @@ public void Dispose_should_be_shielded_from_exceptions() { var mockChannelSource = new Mock<IChannelSource>(); mockChannelSource - .Setup(c => c.GetChannel(It.IsAny<CancellationToken>())) + .Setup(c => c.GetChannel(It.IsAny<OperationContext>())) .Throws<Exception>(); var subject = CreateSubject(cursorId: 1, channelSource: Optional.Create(mockChannelSource.Object)); @@ -341,7 +341,7 @@ public void Dispose_should_dispose_channel_source_when_cursor_was_not_closed_by_ { var mockChannelSource = new Mock<IChannelSource>(); mockChannelSource - .Setup(c => c.GetChannel(It.IsAny<CancellationToken>())) + .Setup(c => c.GetChannel(It.IsAny<OperationContext>())) .Throws<Exception>(); var subject = CreateSubject(cursorId: 1, channelSource: Optional.Create(mockChannelSource.Object)); @@ -385,7 +385,7 @@ public void Dispose_should_not_call_close_cursors_for_zero_cursor_id() var mockChannelSource = new Mock<IChannelSource>(); mockChannelSource - .Setup(c => c.GetChannel(It.IsAny<CancellationToken>())) + .Setup(c => c.GetChannel(It.IsAny<OperationContext>())) .Returns(mockChannelHandle.Object); var subject = CreateSubject(cursorId: 0, channelSource: Optional.Create(mockChannelSource.Object)); @@ -409,8 +409,6 @@ public void GetMore_should_use_same_session( var collectionNamespace = new CollectionNamespace(databaseNamespace, "collection"); var cursorId = 1; var subject = CreateSubject(collectionNamespace: collectionNamespace, cursorId: cursorId, channelSource: Optional.Create(channelSource)); - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; var connectionDescription = CreateConnectionDescriptionSupportingSession(); mockChannelSource.SetupGet(m => m.Session).Returns(session); @@ -427,13 +425,14 @@ public void GetMore_should_use_same_session( } }; - subject.MoveNext(cancellationToken); // skip empty first batch + subject.MoveNext(CancellationToken.None); // skip empty first batch var sameSessionWasUsed = false; if (async) { - mockChannelSource.Setup(m => m.GetChannelAsync(cancellationToken)).Returns(Task.FromResult(channel)); + mockChannelSource.Setup(m => m.GetChannelAsync(It.IsAny<OperationContext>())).Returns(Task.FromResult(channel)); mockChannel .Setup(m => m.CommandAsync( + It.IsAny<OperationContext>(), session, null, databaseNamespace, @@ -444,18 +443,18 @@ public void GetMore_should_use_same_session( null, CommandResponseHandling.Return, It.IsAny<IBsonSerializer<BsonDocument>>(), - It.IsAny<MessageEncoderSettings>(), - cancellationToken)) + It.IsAny<MessageEncoderSettings>())) .Callback(() => sameSessionWasUsed = true) .Returns(Task.FromResult(secondBatch)); - subject.MoveNextAsync(cancellationToken).GetAwaiter().GetResult(); + subject.MoveNextAsync(CancellationToken.None).GetAwaiter().GetResult(); } else { - mockChannelSource.Setup(m => m.GetChannel(cancellationToken)).Returns(channel); + mockChannelSource.Setup(m => m.GetChannel(It.IsAny<OperationContext>())).Returns(channel); mockChannel .Setup(m => m.Command( + It.IsAny<OperationContext>(), session, null, databaseNamespace, @@ -466,30 +465,17 @@ public void GetMore_should_use_same_session( null, CommandResponseHandling.Return, It.IsAny<IBsonSerializer<BsonDocument>>(), - It.IsAny<MessageEncoderSettings>(), - cancellationToken)) + It.IsAny<MessageEncoderSettings>())) .Callback(() => sameSessionWasUsed = true) .Returns(secondBatch); - subject.MoveNext(cancellationToken); + subject.MoveNext(CancellationToken.None); } sameSessionWasUsed.Should().BeTrue(); } // private methods - private void Close(AsyncCursor<BsonDocument> asyncCursor, bool async, CancellationToken cancellationToken) - { - if (async) - { - asyncCursor.CloseAsync(cancellationToken).GetAwaiter().GetResult(); - } - else - { - asyncCursor.Close(cancellationToken); - } - } - private ConnectionDescription CreateConnectionDescriptionSupportingSession(int maxWireVersion = WireVersion.Server36) { var clusterId = new ClusterId(1); @@ -529,18 +515,6 @@ private AsyncCursor<BsonDocument> CreateSubject( maxTime.WithDefault(null)); } - private bool MoveNext(IAsyncCursor<BsonDocument> asyncCursor, bool async, CancellationToken cancellationToken) - { - if (async) - { - return asyncCursor.MoveNextAsync(cancellationToken).GetAwaiter().GetResult(); - } - else - { - return asyncCursor.MoveNext(cancellationToken); - } - } - private void SetupChannelMocks(Mock<IChannelSource> mockChannelSource, Mock<IChannelHandle> mockChannelHandle, bool async, string commandResult, int maxWireVersion = WireVersion.Server36, bool isChannelExpired = false) { SetupChannelMocks(mockChannelSource, mockChannelHandle, async, BsonDocument.Parse(commandResult), maxWireVersion, isChannelExpired); @@ -563,12 +537,13 @@ private void SetupChannelMocks(Mock<IChannelSource> mockChannelSource, Mock<ICha if (async) { mockChannelSource - .Setup(c => c.GetChannelAsync(It.IsAny<CancellationToken>())) + .Setup(c => c.GetChannelAsync(It.IsAny<OperationContext>())) .ReturnsAsync(mockChannelHandle.Object); mockChannelHandle .Setup( c => c.CommandAsync( + It.IsAny<OperationContext>(), It.IsAny<ICoreSession>(), It.IsAny<ReadPreference>(), It.IsAny<DatabaseNamespace>(), @@ -579,8 +554,7 @@ private void SetupChannelMocks(Mock<IChannelSource> mockChannelSource, Mock<ICha It.IsAny<Action<IMessageEncoderPostProcessor>>(), It.IsAny<CommandResponseHandling>(), It.IsAny<IBsonSerializer<BsonDocument>>(), - It.IsAny<MessageEncoderSettings>(), - It.IsAny<CancellationToken>())) + It.IsAny<MessageEncoderSettings>())) .ReturnsAsync(() => { var bsonDocument = commandResultFunc(); @@ -590,12 +564,13 @@ private void SetupChannelMocks(Mock<IChannelSource> mockChannelSource, Mock<ICha else { mockChannelSource - .Setup(c => c.GetChannel(It.IsAny<CancellationToken>())) + .Setup(c => c.GetChannel(It.IsAny<OperationContext>())) .Returns(mockChannelHandle.Object); mockChannelHandle .Setup( c => c.Command( + It.IsAny<OperationContext>(), It.IsAny<ICoreSession>(), It.IsAny<ReadPreference>(), It.IsAny<DatabaseNamespace>(), @@ -606,8 +581,7 @@ private void SetupChannelMocks(Mock<IChannelSource> mockChannelSource, Mock<ICha It.IsAny<Action<IMessageEncoderPostProcessor>>(), It.IsAny<CommandResponseHandling>(), It.IsAny<IBsonSerializer<BsonDocument>>(), - It.IsAny<MessageEncoderSettings>(), - It.IsAny<CancellationToken>())) + It.IsAny<MessageEncoderSettings>())) .Returns(() => { var bsonDocument = commandResultFunc(); @@ -622,6 +596,7 @@ private void VerifyHowManyTimesKillCursorsCommandWasCalled(Mock<IChannelHandle> { mockChannelHandle.Verify( s => s.CommandAsync( + It.IsAny<OperationContext>(), It.IsAny<ICoreSession>(), It.IsAny<ReadPreference>(), It.IsAny<DatabaseNamespace>(), @@ -632,16 +607,14 @@ private void VerifyHowManyTimesKillCursorsCommandWasCalled(Mock<IChannelHandle> It.IsAny<Action<IMessageEncoderPostProcessor>>(), It.IsAny<CommandResponseHandling>(), It.IsAny<IBsonSerializer<BsonDocument>>(), - It.IsAny<MessageEncoderSettings>(), - It.IsAny<CancellationToken>()), + It.IsAny<MessageEncoderSettings>()), times); - - } else { mockChannelHandle.Verify( s => s.Command( + It.IsAny<OperationContext>(), It.IsAny<ICoreSession>(), It.IsAny<ReadPreference>(), It.IsAny<DatabaseNamespace>(), @@ -652,8 +625,7 @@ private void VerifyHowManyTimesKillCursorsCommandWasCalled(Mock<IChannelHandle> It.IsAny<Action<IMessageEncoderPostProcessor>>(), It.IsAny<CommandResponseHandling>(), It.IsAny<IBsonSerializer<BsonDocument>>(), - It.IsAny<MessageEncoderSettings>(), - It.IsAny<CancellationToken>()), + It.IsAny<MessageEncoderSettings>()), times); } } @@ -676,19 +648,18 @@ public void Session_reference_count_should_be_decremented_as_soon_as_possible(in Insert(documents); _session.ReferenceCount().Should().Be(1); - var cancellationToken = CancellationToken.None; using (var binding = new ReadPreferenceBinding(CoreTestConfiguration.Cluster, ReadPreference.Primary, _session.Fork())) - using (var channelSource = (ChannelSourceHandle)binding.GetReadChannelSource(cancellationToken)) - using (var channel = channelSource.GetChannel(cancellationToken)) + using (var channelSource = (ChannelSourceHandle)binding.GetReadChannelSource(OperationContext.NoTimeout)) + using (var channel = channelSource.GetChannel(OperationContext.NoTimeout)) { var query = new BsonDocument(); long cursorId; - var firstBatch = GetFirstBatch(channel, query, batchSize, cancellationToken, out cursorId); + var firstBatch = GetFirstBatch(channel, query, batchSize, CancellationToken.None, out cursorId); using (var cursor = new AsyncCursor<BsonDocument>(channelSource, _collectionNamespace, comment: null, firstBatch, cursorId, batchSize, null, BsonDocumentSerializer.Instance, new MessageEncoderSettings())) { AssertExpectedSessionReferenceCount(_session, cursor); - while (cursor.MoveNext(cancellationToken)) + while (cursor.MoveNext(CancellationToken.None)) { AssertExpectedSessionReferenceCount(_session, cursor); } @@ -721,6 +692,7 @@ private IReadOnlyList<BsonDocument> GetFirstBatchUsingFindCommand(IChannelHandle { "batchSize", batchSize } }; var result = channel.Command<BsonDocument>( + new OperationContext(null, cancellationToken), _session, ReadPreference.Primary, _databaseNamespace, @@ -731,38 +703,12 @@ private IReadOnlyList<BsonDocument> GetFirstBatchUsingFindCommand(IChannelHandle null, // postWriteAction CommandResponseHandling.Return, BsonDocumentSerializer.Instance, - _messageEncoderSettings, - cancellationToken); + _messageEncoderSettings); var cursor = result["cursor"].AsBsonDocument; var firstBatch = cursor["firstBatch"].AsBsonArray.Select(i => i.AsBsonDocument).ToList(); cursorId = cursor["id"].ToInt64(); return firstBatch; } - - private IReadOnlyList<BsonDocument> GetFirstBatchUsingQueryMessage(IChannelHandle channel, BsonDocument query, int batchSize, CancellationToken cancellationToken, out long cursorId) - { -#pragma warning disable 618 - var result = channel.Query( - _collectionNamespace, - query, - null, // fields - NoOpElementNameValidator.Instance, - 0, // skip - batchSize, - false, // secondaryOk - false, // partialOk - false, // noCursorTimeout - false, // oplogReplay - false, // tailableCursor - false, // awaitData - BsonDocumentSerializer.Instance, - _messageEncoderSettings, - cancellationToken); -#pragma warning restore 618 - - cursorId = result.CursorId; - return result.Documents; - } } internal static class AsyncCursorReflector diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/BulkMixedWriteOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/BulkMixedWriteOperationTests.cs index fc223f8ab02..c14714121a3 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/BulkMixedWriteOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/BulkMixedWriteOperationTests.cs @@ -1376,8 +1376,8 @@ public void Execute_unacknowledged_with_an_error_in_the_first_batch_and_ordered_ }; using (var readWriteBinding = CreateReadWriteBinding(useImplicitSession: true)) - using (var channelSource = readWriteBinding.GetWriteChannelSource(CancellationToken.None)) - using (var channel = channelSource.GetChannel(CancellationToken.None)) + using (var channelSource = readWriteBinding.GetWriteChannelSource(OperationContext.NoTimeout)) + using (var channel = channelSource.GetChannel(OperationContext.NoTimeout)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, readWriteBinding.Session.Fork())) { var result = ExecuteOperation(subject, channelBinding, async); @@ -1423,8 +1423,8 @@ public void Execute_unacknowledged_with_an_error_in_the_first_batch_and_ordered_ }; using (var readWriteBinding = CreateReadWriteBinding(useImplicitSession: true)) - using (var channelSource = readWriteBinding.GetWriteChannelSource(CancellationToken.None)) - using (var channel = channelSource.GetChannel(CancellationToken.None)) + using (var channelSource = readWriteBinding.GetWriteChannelSource(OperationContext.NoTimeout)) + using (var channel = channelSource.GetChannel(OperationContext.NoTimeout)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, readWriteBinding.Session.Fork())) { var result = ExecuteOperation(subject, channelBinding, async); @@ -1464,8 +1464,8 @@ public void Execute_unacknowledged_with_an_error_in_the_second_batch_and_ordered }; using (var readWriteBinding = CreateReadWriteBinding(useImplicitSession: true)) - using (var channelSource = readWriteBinding.GetWriteChannelSource(CancellationToken.None)) - using (var channel = channelSource.GetChannel(CancellationToken.None)) + using (var channelSource = readWriteBinding.GetWriteChannelSource(OperationContext.NoTimeout)) + using (var channel = channelSource.GetChannel(OperationContext.NoTimeout)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, readWriteBinding.Session.Fork())) { var result = ExecuteOperation(subject, channelBinding, async); @@ -1505,8 +1505,8 @@ public void Execute_unacknowledged_with_an_error_in_the_second_batch_and_ordered }; using (var readWriteBinding = CreateReadWriteBinding(useImplicitSession: true)) - using (var channelSource = readWriteBinding.GetWriteChannelSource(CancellationToken.None)) - using (var channel = channelSource.GetChannel(CancellationToken.None)) + using (var channelSource = readWriteBinding.GetWriteChannelSource(OperationContext.NoTimeout)) + using (var channel = channelSource.GetChannel(OperationContext.NoTimeout)) using (var channelBinding = new ChannelReadWriteBinding(channelSource.Server, channel, readWriteBinding.Session.Fork())) { var result = ExecuteOperation(subject, channelBinding, async); diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/ChangeStreamCursorTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/ChangeStreamCursorTests.cs index f2661693749..4e11447d94c 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/ChangeStreamCursorTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/ChangeStreamCursorTests.cs @@ -435,8 +435,6 @@ public void MoveNext_should_call_Resume_after_resumable_exception( var mockBinding = new Mock<IReadBinding>(); var mockOperation = new Mock<IChangeStreamOperation<BsonDocument>>(); var subject = CreateSubject(cursor: mockCursor.Object, binding: mockBinding.Object, changeStreamOperation: mockOperation.Object); - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; var resumableException = CoreExceptionHelper.CreateException(resumableExceptionType); var mockResumedCursor = CreateMockCursor(); @@ -444,34 +442,34 @@ public void MoveNext_should_call_Resume_after_resumable_exception( var resumeToken = BsonDocument.Parse("{ resumeToken : 1 }"); var firstDocument = BsonDocument.Parse("{ _id : { resumeToken : 1 }, operationType : \"insert\", ns : { db : \"db\", coll : \"coll\" }, documentKey : { _id : 1 }, fullDocument : { _id : 1 } }"); var firstBatch = new[] { ToRawDocument(firstDocument) }; - mockCursor.Setup(c => c.MoveNext(cancellationToken)).Returns(true); + mockCursor.Setup(c => c.MoveNext(It.IsAny<CancellationToken>())).Returns(true); mockCursor.SetupGet(c => c.Current).Returns(firstBatch); - subject.MoveNext(cancellationToken); + subject.MoveNext(CancellationToken.None); bool result; if (async) { - mockCursor.Setup(c => c.MoveNextAsync(cancellationToken)).Returns(CreateFaultedTask<bool>(resumableException)); - mockOperation.Setup(o => o.ResumeAsync(mockBinding.Object, cancellationToken)).Returns(Task.FromResult(mockResumedCursor.Object)); - mockResumedCursor.Setup(c => c.MoveNextAsync(cancellationToken)).Returns(Task.FromResult(expectedResult)); + mockCursor.Setup(c => c.MoveNextAsync(It.IsAny<CancellationToken>())).Returns(CreateFaultedTask<bool>(resumableException)); + mockOperation.Setup(o => o.ResumeAsync(It.IsAny<OperationContext>(), mockBinding.Object)).Returns(Task.FromResult(mockResumedCursor.Object)); + mockResumedCursor.Setup(c => c.MoveNextAsync(It.IsAny<CancellationToken>())).Returns(Task.FromResult(expectedResult)); - result = subject.MoveNextAsync(cancellationToken).GetAwaiter().GetResult(); + result = subject.MoveNextAsync(It.IsAny<CancellationToken>()).GetAwaiter().GetResult(); - mockCursor.Verify(c => c.MoveNextAsync(cancellationToken), Times.Once); - mockOperation.Verify(o => o.ResumeAsync(mockBinding.Object, cancellationToken), Times.Once); - mockResumedCursor.Verify(c => c.MoveNextAsync(cancellationToken), Times.Once); + mockCursor.Verify(c => c.MoveNextAsync(It.IsAny<CancellationToken>()), Times.Once); + mockOperation.Verify(o => o.ResumeAsync(It.IsAny<OperationContext>(), mockBinding.Object), Times.Once); + mockResumedCursor.Verify(c => c.MoveNextAsync(It.IsAny<CancellationToken>()), Times.Once); } else { - mockCursor.Setup(c => c.MoveNext(cancellationToken)).Throws(resumableException); - mockOperation.Setup(o => o.Resume(mockBinding.Object, cancellationToken)).Returns(mockResumedCursor.Object); - mockResumedCursor.Setup(c => c.MoveNext(cancellationToken)).Returns(expectedResult); + mockCursor.Setup(c => c.MoveNext(It.IsAny<CancellationToken>())).Throws(resumableException); + mockOperation.Setup(o => o.Resume(It.IsAny<OperationContext>(), mockBinding.Object)).Returns(mockResumedCursor.Object); + mockResumedCursor.Setup(c => c.MoveNext(It.IsAny<CancellationToken>())).Returns(expectedResult); - result = subject.MoveNext(cancellationToken); + result = subject.MoveNext(It.IsAny<CancellationToken>()); - mockCursor.Verify(c => c.MoveNext(cancellationToken), Times.Exactly(2)); - mockOperation.Verify(o => o.Resume(mockBinding.Object, cancellationToken), Times.Once); - mockResumedCursor.Verify(c => c.MoveNext(cancellationToken), Times.Once); + mockCursor.Verify(c => c.MoveNext(It.IsAny<CancellationToken>()), Times.Exactly(2)); + mockOperation.Verify(o => o.Resume(It.IsAny<OperationContext>(), mockBinding.Object), Times.Once); + mockResumedCursor.Verify(c => c.MoveNext(It.IsAny<CancellationToken>()), Times.Once); } result.Should().Be(expectedResult); diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/ChangeStreamOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/ChangeStreamOperationTests.cs index b368fdea6a2..394685a8807 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/ChangeStreamOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/ChangeStreamOperationTests.cs @@ -636,15 +636,7 @@ public void Execute_should_throw_when_binding_does_not_implement_IReadBindingHan var subject = CreateSubject(); var binding = new Mock<IReadBinding>().Object; - Exception exception; - if (async) - { - exception = Record.Exception(() => subject.ExecuteAsync(binding, CancellationToken.None).GetAwaiter().GetResult()); - } - else - { - exception = Record.Exception(() => subject.Execute(binding, CancellationToken.None)); - } + var exception = Record.Exception(() => ExecuteOperation(subject, binding, async)); var argumentException = exception.Should().BeOfType<ArgumentException>().Subject; argumentException.ParamName.Should().Be("binding"); diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/CompositeWriteOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/CompositeWriteOperationTests.cs index 18c67618a5c..3dcc8a36cfc 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/CompositeWriteOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/CompositeWriteOperationTests.cs @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; @@ -70,8 +69,8 @@ public async Task Enumerating_operations_should_be_stopped_when_error([Values(fa var subject = new CompositeWriteOperation<BsonDocument>((healthyOperation1.Object, IsMainOperation: false), (faultyOperation2.Object, IsMainOperation: false), (healthyOperation3.Object, IsMainOperation: true)); var resultedException = async - ? await Record.ExceptionAsync(() => subject.ExecuteAsync(Mock.Of<IWriteBinding>(), CancellationToken.None)) - : Record.Exception(() => subject.Execute(Mock.Of<IWriteBinding>(), CancellationToken.None)); + ? await Record.ExceptionAsync(() => subject.ExecuteAsync(OperationContext.NoTimeout, Mock.Of<IWriteBinding>())) + : Record.Exception(() => subject.Execute(OperationContext.NoTimeout, Mock.Of<IWriteBinding>())); resultedException.Should().Be(testException); @@ -93,8 +92,8 @@ public void Enumerating_operations_should_return_result_of_main_operation([Value var subject = new CompositeWriteOperation<BsonDocument>((operation1.Object, IsMainOperation: false), (operation2.Object, IsMainOperation: true), (operation3.Object, IsMainOperation: false)); var result = async - ? subject.ExecuteAsync(Mock.Of<IWriteBinding>(), CancellationToken.None).GetAwaiter().GetResult() - : subject.Execute(Mock.Of<IWriteBinding>(), CancellationToken.None); + ? subject.ExecuteAsync(OperationContext.NoTimeout, Mock.Of<IWriteBinding>()).GetAwaiter().GetResult() + : subject.Execute(OperationContext.NoTimeout, Mock.Of<IWriteBinding>()); result.Should().Be(operation2Result); @@ -108,10 +107,10 @@ private Mock<IWriteOperation<BsonDocument>> CreateFaultyOperation(Exception test { var mockedOperation = new Mock<IWriteOperation<BsonDocument>>(); mockedOperation - .Setup(c => c.Execute(It.IsAny<IWriteBinding>(), It.IsAny<CancellationToken>())) + .Setup(c => c.Execute(It.IsAny<OperationContext>(), It.IsAny<IWriteBinding>())) .Throws(testException); mockedOperation - .Setup(c => c.ExecuteAsync(It.IsAny<IWriteBinding>(), It.IsAny<CancellationToken>())) + .Setup(c => c.ExecuteAsync(It.IsAny<OperationContext>(), It.IsAny<IWriteBinding>())) .Throws(testException); return mockedOperation; } @@ -120,10 +119,10 @@ private Mock<IWriteOperation<BsonDocument>> CreateHealthyOperation(BsonDocument { var mockedOperation = new Mock<IWriteOperation<BsonDocument>>(); mockedOperation - .Setup(c => c.Execute(It.IsAny<IWriteBinding>(), It.IsAny<CancellationToken>())) + .Setup(c => c.Execute(It.IsAny<OperationContext>(), It.IsAny<IWriteBinding>())) .Returns(response); mockedOperation - .Setup(c => c.ExecuteAsync(It.IsAny<IWriteBinding>(), It.IsAny<CancellationToken>())) + .Setup(c => c.ExecuteAsync(It.IsAny<OperationContext>(), It.IsAny<IWriteBinding>())) .ReturnsAsync(response); return mockedOperation; } @@ -132,11 +131,11 @@ private void VeryfyOperation(Mock<IWriteOperation<BsonDocument>> mockedOperation { if (async) { - mockedOperation.Verify(c => c.ExecuteAsync(It.IsAny<IWriteBinding>(), It.IsAny<CancellationToken>()), hasBeenCalled ? Times.Once : Times.Never); + mockedOperation.Verify(c => c.ExecuteAsync(It.IsAny<OperationContext>(), It.IsAny<IWriteBinding>()), hasBeenCalled ? Times.Once : Times.Never); } else { - mockedOperation.Verify(c => c.Execute(It.IsAny<IWriteBinding>(), It.IsAny<CancellationToken>()), hasBeenCalled ? Times.Once : Times.Never); + mockedOperation.Verify(c => c.Execute(It.IsAny<OperationContext>(), It.IsAny<IWriteBinding>()), hasBeenCalled ? Times.Once : Times.Never); } } } diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/CountOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/CountOperationTests.cs index 0d930fbf801..e1a5e6d7622 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/CountOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/CountOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,6 +14,7 @@ */ using System; +using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; @@ -202,7 +203,7 @@ public void CreateCommand_should_return_expected_result() var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -225,7 +226,7 @@ public void CreateCommand_should_return_expected_result_when_Comment_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -250,7 +251,7 @@ public void CreateCommand_should_return_expected_result_when_Collation_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -275,7 +276,7 @@ public void CreateCommand_should_return_expected_result_when_Filter_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -300,7 +301,7 @@ public void CreateCommand_should_return_expected_result_when_Hint_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -324,7 +325,7 @@ public void CreateCommand_should_return_expected_result_when_Limit_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -350,7 +351,7 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -361,6 +362,24 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long result["maxTimeMS"].BsonType.Should().Be(BsonType.Int32); } + [Theory] + [InlineData(42)] + [InlineData(-1)] + public void CreateCommand_should_ignore_maxtime_if_timeout_specified(int timeoutMs) + { + var subject = new CountOperation(_collectionNamespace, _messageEncoderSettings) + { + MaxTime = TimeSpan.FromTicks(10) + }; + var connectionDescription = OperationTestHelper.CreateConnectionDescription(); + var session = OperationTestHelper.CreateSession(); + + var operationContext = new OperationContext(TimeSpan.FromMilliseconds(timeoutMs), CancellationToken.None); + var result = subject.CreateCommand(operationContext, session, connectionDescription); + + result.Should().NotContain("maxTimeMS"); + } + [Theory] [ParameterAttributeData] public void CreateCommand_should_return_expected_result_when_ReadConcern_is_set( @@ -376,7 +395,7 @@ public void CreateCommand_should_return_expected_result_when_ReadConcern_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -401,7 +420,7 @@ public void CreateCommand_should_return_the_expected_result_when_using_causal_co var connectionDescription = OperationTestHelper.CreateConnectionDescription(supportsSessions: true); var session = OperationTestHelper.CreateSession(true, new BsonTimestamp(100)); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedReadConcernDocument = readConcern.ToBsonDocument(); expectedReadConcernDocument["afterClusterTime"] = new BsonTimestamp(100); @@ -428,7 +447,7 @@ public void CreateCommand_should_return_expected_result_when_Skip_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/CreateCollectionOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/CreateCollectionOperationTests.cs index 3ef1da35313..8fdc19a7467 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/CreateCollectionOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/CreateCollectionOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -76,9 +76,7 @@ public void constructor_should_initialize_subject() subject.IndexOptionDefaults.Should().BeNull(); subject.MaxDocuments.Should().NotHaveValue(); subject.MaxSize.Should().NotHaveValue(); - subject.NoPadding.Should().NotHaveValue(); subject.StorageEngine.Should().BeNull(); - subject.UsePowerOf2Sizes.Should().NotHaveValue(); subject.ValidationAction.Should().BeNull(); subject.ValidationLevel.Should().BeNull(); subject.Validator.Should().BeNull(); @@ -100,7 +98,7 @@ public void CreateCommand_should_return_expected_result() var subject = new CreateCollectionOperation(_collectionNamespace, _messageEncoderSettings); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session); var expectedResult = new BsonDocument { @@ -121,7 +119,7 @@ public void CreateCommand_should_return_expected_result_when_Capped_is_set( }; var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session); var expectedResult = new BsonDocument { @@ -144,7 +142,7 @@ public void CreateCommand_should_return_expected_result_when_ChangeStreamsPreAnd var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session); var expectedResult = new BsonDocument { @@ -166,7 +164,7 @@ public void CreateCommand_should_return_expected_result_when_ClusteredIndex_is_s }; var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session); var expectedResult = new BsonDocument { @@ -189,7 +187,7 @@ public void CreateCommand_should_return_expected_result_when_Collation_is_set( }; var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session); var expectedResult = new BsonDocument { @@ -212,7 +210,7 @@ public void CreateCommand_should_return_expected_result_when_IndexOptionDefaults }; var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session); var expectedResult = new BsonDocument { @@ -234,7 +232,7 @@ public void CreateCommand_should_return_expected_result_when_MaxDocuments_is_set }; var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session); var expectedResult = new BsonDocument { @@ -256,7 +254,7 @@ public void CreateCommand_should_return_expected_result_when_MaxSize_is_set( }; var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session); var expectedResult = new BsonDocument { @@ -266,28 +264,6 @@ public void CreateCommand_should_return_expected_result_when_MaxSize_is_set( result.Should().Be(expectedResult); } - [Theory] - [ParameterAttributeData] - public void CreateCommand_should_return_expected_result_when_NoPadding_is_set( - [Values(null, false, true)] - bool? noPadding) - { - var subject = new CreateCollectionOperation(_collectionNamespace, _messageEncoderSettings) - { - NoPadding = noPadding - }; - var session = OperationTestHelper.CreateSession(); - - var result = subject.CreateCommand(session); - - var expectedResult = new BsonDocument - { - { "create", _collectionNamespace.CollectionName }, - { "flags", () => noPadding.Value ? 2 : 0, noPadding != null } - }; - result.Should().Be(expectedResult); - } - [Theory] [ParameterAttributeData] public void CreateCommand_should_return_expected_result_when_StorageEngine_is_set( @@ -301,7 +277,7 @@ public void CreateCommand_should_return_expected_result_when_StorageEngine_is_se }; var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session); var expectedResult = new BsonDocument { @@ -311,28 +287,6 @@ public void CreateCommand_should_return_expected_result_when_StorageEngine_is_se result.Should().Be(expectedResult); } - [Theory] - [ParameterAttributeData] - public void CreateCommand_should_return_expected_result_when_UsePowerOf2Sizes_is_set( - [Values(null, false, true)] - bool? usePowerOf2Sizes) - { - var subject = new CreateCollectionOperation(_collectionNamespace, _messageEncoderSettings) - { - UsePowerOf2Sizes = usePowerOf2Sizes - }; - var session = OperationTestHelper.CreateSession(); - - var result = subject.CreateCommand(session); - - var expectedResult = new BsonDocument - { - { "create", _collectionNamespace.CollectionName }, - { "flags", () => usePowerOf2Sizes.Value ? 1 : 0, usePowerOf2Sizes != null } - }; - result.Should().Be(expectedResult); - } - [Theory] [ParameterAttributeData] public void CreateCommand_should_return_expected_result_when_ValidationAction_is_set( @@ -345,7 +299,7 @@ public void CreateCommand_should_return_expected_result_when_ValidationAction_is }; var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session); var expectedResult = new BsonDocument { @@ -367,7 +321,7 @@ public void CreateCommand_should_return_expected_result_when_ValidationLevel_is_ }; var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session); var expectedResult = new BsonDocument { @@ -390,7 +344,7 @@ public void CreateCommand_should_return_expected_result_when_Validator_is_set( }; var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session); var expectedResult = new BsonDocument { @@ -403,22 +357,36 @@ public void CreateCommand_should_return_expected_result_when_Validator_is_set( [Theory] [ParameterAttributeData] public void CreateCommand_should_return_expected_result_when_WriteConcern_is_set( - [Values(null, 1, 2)] - int? w) + [Values(null, 1, 2)] int? w, + [Values(null, 100)] int? wtimeout, + [Values(true, false)] bool hasOperationTimeout) { var writeConcern = w.HasValue ? new WriteConcern(w.Value) : null; + if (wtimeout.HasValue) + { + writeConcern ??= WriteConcern.Acknowledged; + writeConcern = writeConcern.With(wTimeout: TimeSpan.FromMilliseconds(wtimeout.Value)); + } + var subject = new CreateCollectionOperation(_collectionNamespace, _messageEncoderSettings) { WriteConcern = writeConcern }; + var operationContext = hasOperationTimeout ? new OperationContext(TimeSpan.FromSeconds(42), CancellationToken.None) : OperationContext.NoTimeout; var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(operationContext, session); + + var expectedConcern = writeConcern?.ToBsonDocument(); + if (hasOperationTimeout) + { + expectedConcern?.Remove("wtimeout"); + } var expectedResult = new BsonDocument { { "create", _collectionNamespace.CollectionName }, - { "writeConcern", () => writeConcern.ToBsonDocument(), writeConcern != null } + { "writeConcern", () => expectedConcern, w.HasValue || (wtimeout.HasValue && !hasOperationTimeout) } }; result.Should().Be(expectedResult); } @@ -431,7 +399,7 @@ public void CreateEncryptedCreateCollectionOperationIfConfigured_should_return_e var s = subject.Should().BeOfType<CreateCollectionOperation>().Subject; - var command = s.CreateCommand(session); + var command = s.CreateCommand(OperationContext.NoTimeout, session); var expectedResult = new BsonDocument { @@ -521,8 +489,8 @@ void AssertCommand((IWriteOperation<BsonDocument> Operation, bool IsMainOperatio var result = operation switch { - CreateCollectionOperation createCollectionOperation => createCollectionOperation.CreateCommand(session), - CreateIndexesOperation createIndexesOperation => createIndexesOperation.CreateCommand(session, OperationTestHelper.CreateConnectionDescription()), + CreateCollectionOperation createCollectionOperation => createCollectionOperation.CreateCommand(OperationContext.NoTimeout, session), + CreateIndexesOperation createIndexesOperation => createIndexesOperation.CreateCommand(OperationContext.NoTimeout, session, OperationTestHelper.CreateConnectionDescription()), _ => throw new Exception($"Unexpected operation {operation}."), }; result.Should().Be(expectedResult); @@ -700,31 +668,6 @@ public void Execute_should_create_collection_when_MaxSize_is_set( info["options"]["size"].ToInt64().Should().BeGreaterOrEqualTo(maxSize); // server rounds maxSize up } - [Theory] - [ParameterAttributeData] - public void Execute_should_create_collection_when_NoPadding_is_set( - [Values(false, true)] - bool noPadding, - [Values(false, true)] - bool async) - { - RequireServer.Check().ClusterTypes(ClusterType.Standalone, ClusterType.ReplicaSet).StorageEngine("mmapv1"); - DropCollection(); - var subject = new CreateCollectionOperation(_collectionNamespace, _messageEncoderSettings) - { - NoPadding = noPadding - }; - - BsonDocument info; - using (var binding = CreateReadWriteBinding()) - { - ExecuteOperation(subject, binding, async); - info = GetCollectionInfo(binding); - } - - info["options"]["flags"].Should().Be(noPadding ? 2 : 0); - } - [Theory] [ParameterAttributeData] public void Execute_should_create_collection_when_StorageEngine_is_set( @@ -754,31 +697,6 @@ public void Execute_should_create_collection_when_StorageEngine_is_set( info["options"]["storageEngine"].Should().Be(storageEngine); } - [Theory] - [ParameterAttributeData] - public void Execute_should_create_collection_when_UsePowerOf2Sizes_is_set( - [Values(false, true)] - bool usePowerOf2Sizes, - [Values(false, true)] - bool async) - { - RequireServer.Check().ClusterTypes(ClusterType.Standalone, ClusterType.ReplicaSet).StorageEngine("mmapv1"); - DropCollection(); - var subject = new CreateCollectionOperation(_collectionNamespace, _messageEncoderSettings) - { - UsePowerOf2Sizes = usePowerOf2Sizes - }; - - BsonDocument info; - using (var binding = CreateReadWriteBinding()) - { - ExecuteOperation(subject, binding, async); - info = GetCollectionInfo(binding); - } - - info["options"]["flags"].Should().Be(usePowerOf2Sizes ? 1 : 0); - } - [Theory] [ParameterAttributeData] public void Execute_should_create_collection_when_Validator_is_set( @@ -925,20 +843,6 @@ public void MaxSize_set_should_throw_when_value_is_invalid( argumentOutOfRangeException.ParamName.Should().Be("value"); } - [Theory] - [ParameterAttributeData] - public void NoPadding_get_and_set_should_work( - [Values(null, false, true)] - bool? value) - { - var subject = new CreateCollectionOperation(_collectionNamespace, _messageEncoderSettings); - - subject.NoPadding = value; - var result = subject.NoPadding; - - result.Should().Be(value); - } - [Theory] [ParameterAttributeData] public void StorageEngine_get_and_set_should_work( @@ -954,20 +858,6 @@ public void StorageEngine_get_and_set_should_work( result.Should().BeSameAs(value); } - [Theory] - [ParameterAttributeData] - public void UsePowerOf2Sizes_get_and_set_should_work( - [Values(null, false, true)] - bool? value) - { - var subject = new CreateCollectionOperation(_collectionNamespace, _messageEncoderSettings); - - subject.UsePowerOf2Sizes = value; - var result = subject.UsePowerOf2Sizes; - - result.Should().Be(value); - } - [Theory] [ParameterAttributeData] public void ValidationAction_get_and_set_should_work( @@ -1031,11 +921,11 @@ private BsonDocument ExecuteOperation(CreateCollectionOperation subject, IWriteB { if (async) { - return subject.ExecuteAsync(binding, CancellationToken.None).GetAwaiter().GetResult(); + return subject.ExecuteAsync(OperationContext.NoTimeout, binding).GetAwaiter().GetResult(); } else { - return subject.Execute(binding, CancellationToken.None); + return subject.Execute(OperationContext.NoTimeout, binding); } } @@ -1045,7 +935,7 @@ private BsonDocument GetCollectionInfo(IReadBinding binding) { Filter = new BsonDocument("name", _collectionNamespace.CollectionName) }; - return listCollectionsOperation.Execute(binding, CancellationToken.None).Single(); + return listCollectionsOperation.Execute(OperationContext.NoTimeout, binding).Single(); } } } diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/CreateIndexesOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/CreateIndexesOperationTests.cs index b6d385e80cb..7ae617e7cc3 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/CreateIndexesOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/CreateIndexesOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ using System; using System.Collections.Generic; using System.Linq; +using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; @@ -92,7 +93,7 @@ public void CreateCommand_should_return_expected_result_when_creating_one_index( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -114,7 +115,7 @@ public void CreateCommand_should_return_expected_result_when_creating_two_indexe var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -138,7 +139,7 @@ public void CreateCommand_should_return_expected_result_when_CommitQuorum_with_m var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(Feature.CreateIndexCommitQuorum.FirstSupportedWireVersion); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -164,7 +165,7 @@ public void CreateCommand_should_return_expected_result_when_CommitQuorum_with_w var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(Feature.CreateIndexCommitQuorum.FirstSupportedWireVersion); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -186,12 +187,14 @@ public void CreateCommand_should_return_expected_result_when_CommitQuorum_with_w public void CreateCommand_should_return_expected_result_when_MaxTime_is_Set(long maxTimeTicks, int expectedMaxTimeMS) { var requests = new[] { new CreateIndexRequest(new BsonDocument("x", 1)) }; - var subject = new CreateIndexesOperation(_collectionNamespace, requests, _messageEncoderSettings); - subject.MaxTime = TimeSpan.FromTicks(maxTimeTicks); + var subject = new CreateIndexesOperation(_collectionNamespace, requests, _messageEncoderSettings) + { + MaxTime = TimeSpan.FromTicks(maxTimeTicks) + }; var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -203,6 +206,25 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_Set(long result["maxTimeMS"].BsonType.Should().Be(BsonType.Int32); } + [Theory] + [InlineData(42)] + [InlineData(-1)] + public void CreateCommand_should_ignore_maxtime_if_timeout_specified(int timeoutMs) + { + var requests = new[] { new CreateIndexRequest(new BsonDocument("x", 1)) }; + var subject = new CreateIndexesOperation(_collectionNamespace, requests, _messageEncoderSettings) + { + MaxTime = TimeSpan.FromTicks(10) + }; + var session = OperationTestHelper.CreateSession(); + var connectionDescription = OperationTestHelper.CreateConnectionDescription(); + + var operationContext = new OperationContext(TimeSpan.FromMilliseconds(timeoutMs), CancellationToken.None); + var result = subject.CreateCommand(operationContext, session, connectionDescription); + + result.Should().NotContain("maxTimeMS"); + } + [Theory] [ParameterAttributeData] public void CreateCommand_should_return_expected_result_when_WriteConcern_is_set( @@ -218,7 +240,7 @@ public void CreateCommand_should_return_expected_result_when_WriteConcern_is_set var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -241,7 +263,7 @@ public void CreateCommand_should_throw_when_commitQuorum_is_specified_and_not_su var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(Feature.CreateIndexCommitQuorum.LastNotSupportedWireVersion); - var exception = Record.Exception(() => subject.CreateCommand(session, connectionDescription)); + var exception = Record.Exception(() => subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription)); exception.Should().BeOfType<NotSupportedException>(); } diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/CreateViewOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/CreateViewOperationTests.cs index ef72d20e5fa..0578053e9d5 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/CreateViewOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/CreateViewOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2016-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,18 +14,13 @@ */ using System; -using System.Collections.Generic; using System.Linq; -using System.Text; using System.Threading; -using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; using MongoDB.TestHelpers.XunitExtensions; using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Clusters; -using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.TestHelpers; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; using Xunit; @@ -256,7 +251,7 @@ public void CreateCommand_should_return_expected_result( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -281,7 +276,7 @@ public void CreateCommand_should_return_expected_result_when_Collation_is_set( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -295,24 +290,39 @@ public void CreateCommand_should_return_expected_result_when_Collation_is_set( [Theory] [ParameterAttributeData] public void CreateCommand_should_return_expected_result_when_WriteConcern_is_set( - [Values(null, 1, 2)] - int? w) + [Values(null, 1, 2)] int? w, + [Values(null, 100)] int? wtimeout, + [Values(true, false)] bool hasOperationTimeout) { var writeConcern = w.HasValue ? new WriteConcern(w.Value) : null; + if (wtimeout.HasValue) + { + writeConcern ??= WriteConcern.Acknowledged; + writeConcern = writeConcern.With(wTimeout: TimeSpan.FromMilliseconds(wtimeout.Value)); + } + var subject = new CreateViewOperation(_databaseNamespace, _viewName, _collectionNamespace.CollectionName, _pipeline, _messageEncoderSettings) { WriteConcern = writeConcern }; + var operationContext = hasOperationTimeout ? new OperationContext(TimeSpan.FromSeconds(42), CancellationToken.None) : OperationContext.NoTimeout; var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(operationContext, session, connectionDescription); + + var expectedConcern = writeConcern?.ToBsonDocument(); + if (hasOperationTimeout) + { + expectedConcern?.Remove("wtimeout"); + } + var expectedResult = new BsonDocument { { "create", _viewName }, { "viewOn", _collectionNamespace.CollectionName }, { "pipeline", new BsonArray(_pipeline) }, - { "writeConcern", () => writeConcern.ToBsonDocument(), writeConcern != null } + { "writeConcern", () => expectedConcern, w.HasValue || (wtimeout.HasValue && !hasOperationTimeout) } }; result.Should().Be(expectedResult); } @@ -331,7 +341,7 @@ private BsonDocument GetViewInfo(IReadBinding binding, string viewName) { Filter = new BsonDocument("name", viewName) }; - return listCollectionsOperation.Execute(binding, CancellationToken.None).Single(); + return listCollectionsOperation.Execute(OperationContext.NoTimeout, binding).Single(); } } } diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/DistinctOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/DistinctOperationTests.cs index 1c89d5c9eb4..b18236f2f0d 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/DistinctOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/DistinctOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,6 +14,7 @@ */ using System; +using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; @@ -152,7 +153,7 @@ public void MaxTime_set_should_throw_when_value_is_invalid( [Theory] [ParameterAttributeData] public void ReadConcern_get_and_set_should_work( - [Values(null, ReadConcernLevel.Local, ReadConcernLevel.Local)] + [Values(null, ReadConcernLevel.Local)] ReadConcernLevel? level) { var subject = new DistinctOperation<int>(_collectionNamespace, _valueSerializer, _fieldName, _messageEncoderSettings); @@ -186,7 +187,7 @@ public void CreateCommand_should_return_expected_result() var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -211,7 +212,7 @@ public void CreateCommand_should_return_expected_result_when_Collation_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -237,7 +238,7 @@ public void CreateCommand_should_return_expected_result_when_Filter_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -265,7 +266,7 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -277,6 +278,24 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long result["maxTimeMS"].BsonType.Should().Be(BsonType.Int32); } + [Theory] + [InlineData(42)] + [InlineData(-1)] + public void CreateCommand_should_ignore_maxtime_if_timeout_specified(int timeoutMs) + { + var subject = new DistinctOperation<int>(_collectionNamespace, _valueSerializer, _fieldName, _messageEncoderSettings) + { + MaxTime = TimeSpan.FromTicks(10) + }; + var session = OperationTestHelper.CreateSession(); + var connectionDescription = OperationTestHelper.CreateConnectionDescription(); + + var operationContext = new OperationContext(TimeSpan.FromMilliseconds(timeoutMs), CancellationToken.None); + var result = subject.CreateCommand(operationContext, session, connectionDescription); + + result.Should().NotContain("maxTimeMS"); + } + [Theory] [ParameterAttributeData] public void CreateCommand_should_return_expected_result_when_ReadConcern_is_set( @@ -292,7 +311,7 @@ public void CreateCommand_should_return_expected_result_when_ReadConcern_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -318,7 +337,7 @@ public void CreateCommand_should_return_the_expected_result_when_using_causal_co var connectionDescription = OperationTestHelper.CreateConnectionDescription(supportsSessions: true); var session = OperationTestHelper.CreateSession(true, new BsonTimestamp(100)); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedReadConcernDocument = readConcern.ToBsonDocument(); expectedReadConcernDocument["afterClusterTime"] = new BsonTimestamp(100); diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/DropCollectionOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/DropCollectionOperationTests.cs index 0270c1e811d..53357a7c9f5 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/DropCollectionOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/DropCollectionOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,6 +15,7 @@ using System; using System.Linq; +using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; @@ -53,9 +54,10 @@ public void constructor_should_initialize_subject() [Fact] public void constructor_should_throw_when_collectionNamespace_is_null() { - Action action = () => { new DropCollectionOperation(null, _messageEncoderSettings); }; + var exception = Record.Exception(() => { new DropCollectionOperation(null, _messageEncoderSettings); }); - action.ShouldThrow<ArgumentNullException>().And.ParamName.Should().Be("collectionNamespace"); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("collectionNamespace"); } [Fact] @@ -68,7 +70,7 @@ public void CreateCommand_should_return_expected_result() }; var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session); result.Should().Be(expectedResult); } @@ -76,22 +78,36 @@ public void CreateCommand_should_return_expected_result() [Theory] [ParameterAttributeData] public void CreateCommand_should_return_expected_result_when_WriteConcern_is_set( - [Values(null, 1, 2)] - int? w) + [Values(null, 1, 2)] int? w, + [Values(null, 100)] int? wtimeout, + [Values(true, false)] bool hasOperationTimeout) { var writeConcern = w.HasValue ? new WriteConcern(w.Value) : null; + if (wtimeout.HasValue) + { + writeConcern ??= WriteConcern.Acknowledged; + writeConcern = writeConcern.With(wTimeout: TimeSpan.FromMilliseconds(wtimeout.Value)); + } + var subject = new DropCollectionOperation(_collectionNamespace, _messageEncoderSettings) { WriteConcern = writeConcern }; + var operationContext = hasOperationTimeout ? new OperationContext(TimeSpan.FromSeconds(42), CancellationToken.None) : OperationContext.NoTimeout; var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(operationContext, session); + + var expectedConcern = writeConcern?.ToBsonDocument(); + if (hasOperationTimeout) + { + expectedConcern?.Remove("wtimeout"); + } var expectedResult = new BsonDocument { { "drop", _collectionNamespace.CollectionName }, - { "writeConcern", () => writeConcern.ToBsonDocument(), writeConcern != null } + { "writeConcern", () => expectedConcern, w.HasValue || (wtimeout.HasValue && !hasOperationTimeout) } }; result.Should().Be(expectedResult); } @@ -104,7 +120,7 @@ public void CreateEncryptedDropCollectionOperationIfConfigured_should_return_exp var s = subject.Should().BeOfType<DropCollectionOperation>().Subject; - var command = s.CreateCommand(session); + var command = s.CreateCommand(OperationContext.NoTimeout, session); var expectedResult = new BsonDocument { @@ -138,7 +154,7 @@ public void CreateEncryptedDropCollectionOperationIfConfigured_should_return_exp var subject = DropCollectionOperation.CreateEncryptedDropCollectionOperationIfConfigured(_collectionNamespace, encryptedFields, _messageEncoderSettings, null); var session = OperationTestHelper.CreateSession(); - + var operations = ((CompositeWriteOperation<BsonDocument>)subject)._operations<BsonDocument>(); // esc @@ -165,7 +181,7 @@ void AssertDropCollectionCommand((IWriteOperation<BsonDocument> Operation, bool { operationInfo.IsMainOperation.Should().Be(isMainOperation); var operation = operationInfo.Operation.Should().BeOfType<DropCollectionOperation>().Subject; - var result = operation.CreateCommand(session); + var result = operation.CreateCommand(OperationContext.NoTimeout, session); var expectedResult = new BsonDocument { { "drop", collectionNamespace.CollectionName }, diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/DropDatabaseOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/DropDatabaseOperationTests.cs index cd84b8a528a..8537b324b57 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/DropDatabaseOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/DropDatabaseOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,6 +14,7 @@ */ using System; +using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; @@ -39,9 +40,10 @@ public void constructor_should_initialize_subject() [Fact] public void constructor_should_throw_when_databaseNamespace_is_null() { - Action action = () => { new DropDatabaseOperation(null, _messageEncoderSettings); }; + var exception = Record.Exception(() => { new DropDatabaseOperation(null, _messageEncoderSettings); }); - action.ShouldThrow<ArgumentNullException>().And.ParamName.Should().Be("databaseNamespace"); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("databaseNamespace"); } [Fact] @@ -54,7 +56,7 @@ public void CreateCommand_should_return_expected_result() }; var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session); result.Should().Be(expectedResult); } @@ -62,22 +64,36 @@ public void CreateCommand_should_return_expected_result() [Theory] [ParameterAttributeData] public void CreateCommand_should_return_expected_result_when_WriteConcern_is_set( - [Values(null, 1, 2)] - int? w) + [Values(null, 1, 2)] int? w, + [Values(null, 100)] int? wtimeout, + [Values(true, false)] bool hasOperationTimeout) { var writeConcern = w.HasValue ? new WriteConcern(w.Value) : null; + if (wtimeout.HasValue) + { + writeConcern ??= WriteConcern.Acknowledged; + writeConcern = writeConcern.With(wTimeout: TimeSpan.FromMilliseconds(wtimeout.Value)); + } + var subject = new DropDatabaseOperation(_databaseNamespace, _messageEncoderSettings) { WriteConcern = writeConcern }; + var operationContext = hasOperationTimeout ? new OperationContext(TimeSpan.FromSeconds(42), CancellationToken.None) : OperationContext.NoTimeout; var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(operationContext, session); + + var expectedWriteConcern = writeConcern?.ToBsonDocument(); + if (hasOperationTimeout) + { + expectedWriteConcern?.Remove("wtimeout"); + } var expectedResult = new BsonDocument { { "dropDatabase", 1 }, - { "writeConcern", () => writeConcern.ToBsonDocument(), writeConcern != null } + { "writeConcern", () => expectedWriteConcern, w.HasValue || (wtimeout.HasValue && !hasOperationTimeout) } }; result.Should().Be(expectedResult); } diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/DropIndexOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/DropIndexOperationTests.cs index be3f8dc584a..c750caca3ca 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/DropIndexOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/DropIndexOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,6 +15,7 @@ using System; using System.Linq; +using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; @@ -56,29 +57,30 @@ public void constructor_with_collectionNamespace_indexName_messageEncoderSetting [Fact] public void constructor_with_collectionNamespace_indexName_messageEncoderSettings_should_throw_when_collectionNamespace_is_null() { - var indexName = "x_1"; - - Action action = () => { new DropIndexOperation(null, indexName, _messageEncoderSettings); }; + var exception = Record.Exception(() => { new DropIndexOperation(null, "x_1", _messageEncoderSettings); }); - action.ShouldThrow<ArgumentNullException>().And.ParamName.Should().Be("collectionNamespace"); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("collectionNamespace"); } - [Fact] - public void constructor_with_collectionNamespace_indexName_messageEncoderSettings_should_throw_when_indexName_is_null() + [Theory] + [InlineData(null)] + [InlineData("")] + public void constructor_with_collectionNamespace_indexName_messageEncoderSettings_should_throw_when_indexName_is_empty(string indexName) { - Action action = () => { new DropIndexOperation(_collectionNamespace, (string)null, _messageEncoderSettings); }; + var exception = Record.Exception(() => { new DropIndexOperation(_collectionNamespace, indexName, _messageEncoderSettings); }); - action.ShouldThrow<ArgumentNullException>().And.ParamName.Should().Be("indexName"); + exception.Should().BeAssignableTo<ArgumentException>().Subject + .ParamName.Should().Be("indexName"); } [Fact] public void constructor_with_collectionNamespace_indexName_messageEncoderSettings_should_throw_when_messageEncoderSettings_is_null() { - var indexName = "x_1"; - - Action action = () => { new DropIndexOperation(_collectionNamespace, indexName, null); }; + var exception = Record.Exception(() => { new DropIndexOperation(_collectionNamespace, "x_1", null); }); - action.ShouldThrow<ArgumentNullException>().And.ParamName.Should().Be("messageEncoderSettings"); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("messageEncoderSettings"); } [Fact] @@ -100,17 +102,19 @@ public void constructor_with_collectionNamespace_keys_messageEncoderSettings_sho { var keys = new BsonDocument { { "x", 1 } }; - Action action = () => { new DropIndexOperation(null, keys, _messageEncoderSettings); }; + var exception = Record.Exception(() => { new DropIndexOperation(null, keys, _messageEncoderSettings); }); - action.ShouldThrow<ArgumentNullException>().And.ParamName.Should().Be("collectionNamespace"); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("collectionNamespace"); } [Fact] public void constructor_with_collectionNamespace_keys_messageEncoderSettings_should_throw_when_indexName_is_null() { - Action action = () => { new DropIndexOperation(_collectionNamespace, (BsonDocument)null, _messageEncoderSettings); }; + var exception = Record.Exception(() => { new DropIndexOperation(_collectionNamespace, (BsonDocument)null, _messageEncoderSettings); }); - action.ShouldThrow<ArgumentNullException>().And.ParamName.Should().Be("keys"); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("keys"); } [Fact] @@ -118,9 +122,10 @@ public void constructor_with_collectionNamespace_keys_messageEncoderSettings_sho { var keys = new BsonDocument { { "x", 1 } }; - Action action = () => { new DropIndexOperation(_collectionNamespace, keys, null); }; + var exception = Record.Exception(() => { new DropIndexOperation(_collectionNamespace, keys, null); }); - action.ShouldThrow<ArgumentNullException>().And.ParamName.Should().Be("messageEncoderSettings"); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("messageEncoderSettings"); } [Fact] @@ -135,7 +140,7 @@ public void CreateCommand_should_return_expectedResult() }; var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session); result.Should().Be(expectedResult); } @@ -151,45 +156,76 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_Set(long { var indexName = "x_1"; var maxTime = TimeSpan.FromTicks(maxTimeTicks); - var subject = new DropIndexOperation(_collectionNamespace, indexName, _messageEncoderSettings); - subject.MaxTime = maxTime; + var subject = new DropIndexOperation(_collectionNamespace, indexName, _messageEncoderSettings) + { + MaxTime = maxTime + }; var expectedResult = new BsonDocument { { "dropIndexes", _collectionNamespace.CollectionName }, { "index", indexName }, - {"maxTimeMS", expectedMaxTimeMS } + { "maxTimeMS", expectedMaxTimeMS } }; var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session); result.Should().Be(expectedResult); result["maxTimeMS"].BsonType.Should().Be(BsonType.Int32); + } + [Theory] + [InlineData(42)] + [InlineData(-1)] + public void CreateCommand_should_ignore_maxtime_if_timeout_specified(int timeoutMs) + { + var indexName = "x_1"; + var subject = new DropIndexOperation(_collectionNamespace, indexName, _messageEncoderSettings) + { + MaxTime = TimeSpan.FromSeconds(10) + }; + var session = OperationTestHelper.CreateSession(); + + var operationContext = new OperationContext(TimeSpan.FromMilliseconds(timeoutMs), CancellationToken.None); + var result = subject.CreateCommand(operationContext, session); + + result.Should().NotContain("maxTimeMS"); } [Theory] [ParameterAttributeData] public void CreateCommand_should_return_expectedResult_when_WriteConcern_is_set( - [Values(null, 1, 2)] - int? w) + [Values(null, 1, 2)] int? w, + [Values(null, 100)] int? wtimeout, + [Values(true, false)] bool hasOperationTimeout) { var indexName = "x_1"; var writeConcern = w.HasValue ? new WriteConcern(w.Value) : null; + if (wtimeout.HasValue) + { + writeConcern ??= WriteConcern.Acknowledged; + writeConcern = writeConcern.With(wTimeout: TimeSpan.FromMilliseconds(wtimeout.Value)); + } + var subject = new DropIndexOperation(_collectionNamespace, indexName, _messageEncoderSettings) { WriteConcern = writeConcern }; + var operationContext = hasOperationTimeout ? new OperationContext(TimeSpan.FromSeconds(42), CancellationToken.None) : OperationContext.NoTimeout; var session = OperationTestHelper.CreateSession(); - var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session); + var result = subject.CreateCommand(operationContext, session); + var expectedWriteConcern = writeConcern?.ToBsonDocument(); + if (hasOperationTimeout) + { + expectedWriteConcern?.Remove("wtimeout"); + } var expectedResult = new BsonDocument { { "dropIndexes", _collectionNamespace.CollectionName }, { "index", indexName }, - { "writeConcern", () => writeConcern.ToBsonDocument(), writeConcern != null } + { "writeConcern", () => expectedWriteConcern, w.HasValue || (wtimeout.HasValue && !hasOperationTimeout) } }; result.Should().Be(expectedResult); } diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/EndTransactionOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/EndTransactionOperationTests.cs index ba929d55b27..067c5c45588 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/EndTransactionOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/EndTransactionOperationTests.cs @@ -223,7 +223,7 @@ internal static class EndTransactionOperationReflector public static BsonDocument CreateCommand(this EndTransactionOperation obj) { var methodInfo = typeof(EndTransactionOperation).GetMethod("CreateCommand", BindingFlags.NonPublic | BindingFlags.Instance); - return (BsonDocument)methodInfo.Invoke(obj, null); + return (BsonDocument)methodInfo.Invoke(obj, [OperationContext.NoTimeout]); } } } diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/EstimatedDocumentCountOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/EstimatedDocumentCountOperationTests.cs index 14e9ce31484..bc947aabc96 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/EstimatedDocumentCountOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/EstimatedDocumentCountOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2021-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -328,7 +328,7 @@ private void AssertCommandDocument(BsonDocument actualResult, int? expectedMaxTi private BsonDocument CreateCommand(EstimatedDocumentCountOperation subject, ConnectionDescription connectionDescription, ICoreSession session) { var countOperation = (CountOperation)subject.CreateCountOperation(); - return countOperation.CreateCommand(connectionDescription, session); + return countOperation.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); } private void EnsureTestData() diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/EvalOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/EvalOperationTests.cs deleted file mode 100644 index 64080a96efa..00000000000 --- a/tests/MongoDB.Driver.Tests/Core/Operations/EvalOperationTests.cs +++ /dev/null @@ -1,315 +0,0 @@ -/* Copyright 2013-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System; -using System.Threading; -using System.Threading.Tasks; -using FluentAssertions; -using MongoDB.Bson; -using MongoDB.TestHelpers.XunitExtensions; -using MongoDB.Driver.Core.Bindings; -using MongoDB.Driver.Core.Clusters; -using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.TestHelpers; -using MongoDB.Driver.Core.TestHelpers.XunitExtensions; -using MongoDB.Driver.Core.WireProtocol.Messages.Encoders; -using Xunit; - -namespace MongoDB.Driver.Core.Operations -{ - public class EvalOperationTests : OperationTestBase - { - private DatabaseNamespace _adminDatabaseNamespace; - - public EvalOperationTests() - { - _adminDatabaseNamespace = DatabaseNamespace.Admin; - } - - [Fact] - public void Args_should_work() - { - var function = new BsonJavaScript("return 1"); - var subject = new EvalOperation(_adminDatabaseNamespace, function, _messageEncoderSettings); - var args = new BsonValue[] { 1, 2, 3 }; - - subject.Args = args; - - subject.Args.Should().Equal(args); - } - - [Fact] - public void constructor_should_initialize_subject() - { - var function = new BsonJavaScript("return 1"); - - var subject = new EvalOperation(_adminDatabaseNamespace, function, _messageEncoderSettings); - - subject.Args.Should().BeNull(); - subject.DatabaseNamespace.Should().Be(_adminDatabaseNamespace); - subject.Function.Should().Be(function); - subject.MaxTime.Should().NotHaveValue(); - // subject.MessageEncoderSettings.Should().Be(_messageEncoderSettings); - Assert.Equal(_messageEncoderSettings, subject.MessageEncoderSettings); - subject.NoLock.Should().NotHaveValue(); - } - - [Fact] - public void constructor_should_throw_when_databaseNamespace_is_null() - { - var function = new BsonJavaScript("return 1"); - - Action action = () => new EvalOperation(null, function, _messageEncoderSettings); - - action.ShouldThrow<ArgumentNullException>(); - } - - [Fact] - public void constructor_should_throw_when_function_is_null() - { - Action action = () => new EvalOperation(_adminDatabaseNamespace, null, _messageEncoderSettings); - - action.ShouldThrow<ArgumentNullException>(); - } - - [Fact] - public void CreateCommand_should_return_expected_result() - { - var function = new BsonJavaScript("return 1"); - var subject = new EvalOperation(_adminDatabaseNamespace, function, _messageEncoderSettings); - var expectedResult = new BsonDocument - { - { "$eval", function } - }; - - var result = subject.CreateCommand(); - - result.Should().Be(expectedResult); - } - - [Fact] - public void CreateCommand_should_return_expected_result_when_args_are_provided() - { - var function = new BsonJavaScript("return 1"); - var subject = new EvalOperation(_adminDatabaseNamespace, function, _messageEncoderSettings); - var args = new BsonValue[] { 1, 2, 3 }; - subject.Args = args; - var expectedResult = new BsonDocument - { - { "$eval", function }, - { "args", new BsonArray(args) } - }; - - var result = subject.CreateCommand(); - - result.Should().Be(expectedResult); - } - - [Theory] - [InlineData(-10000, 0)] - [InlineData(0, 0)] - [InlineData(1, 1)] - [InlineData(9999, 1)] - [InlineData(10000, 1)] - [InlineData(10001, 2)] - public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long maxTimeTicks, int expectedMaxTimeMS) - { - var function = new BsonJavaScript("return 1"); - var subject = new EvalOperation(_adminDatabaseNamespace, function, _messageEncoderSettings) - { - MaxTime = TimeSpan.FromTicks(maxTimeTicks) - }; - var expectedResult = new BsonDocument - { - { "$eval", function }, - { "maxTimeMS", expectedMaxTimeMS } - }; - - var result = subject.CreateCommand(); - - result.Should().Be(expectedResult); - result["maxTimeMS"].BsonType.Should().Be(BsonType.Int32); - } - - [Fact] - public void CreateCommand_should_return_expected_result_when_noLock_is_provided() - { - var function = new BsonJavaScript("return 1"); - var subject = new EvalOperation(_adminDatabaseNamespace, function, _messageEncoderSettings); - subject.NoLock = true; - var expectedResult = new BsonDocument - { - { "$eval", function }, - { "nolock", true } - }; - - var result = subject.CreateCommand(); - - result.Should().Be(expectedResult); - } - - [Theory] - [ParameterAttributeData] - public void Execute_should_return_expected_result( - [Values(false, true)] - bool async) - { - RequireServer.Check().Supports(Feature.Eval).Authentication(false); - var function = "return 1"; - var subject = new EvalOperation(_adminDatabaseNamespace, function, _messageEncoderSettings); - - var result = ExecuteOperation(subject, async); - - result.Should().Be(1.0); - } - - [Theory] - [ParameterAttributeData] - public void Execute_should_return_expected_result_when_args_are_provided( - [Values(false, true)] - bool async) - { - RequireServer.Check().Supports(Feature.Eval).Authentication(false); - var function = "function(x) { return x; }"; - var subject = new EvalOperation(_adminDatabaseNamespace, function, _messageEncoderSettings); - subject.Args = new BsonValue[] { 1 }; - - var result = ExecuteOperation(subject, async); - - result.Should().Be(1.0); - } - - [Theory] - [ParameterAttributeData] - public void Execute_should_return_expected_result_when_maxTime_is_provided( - [Values(false, true)] - bool async) - { - // TODO: implement EvalOperation MaxTime test - } - - [Theory] - [ParameterAttributeData] - public void Execute_should_return_expected_result_when_noLock_is_provided( - [Values(false, true)] - bool async) - { - RequireServer.Check().Supports(Feature.Eval).Authentication(false); - var function = "return 1"; - var subject = new EvalOperation(_adminDatabaseNamespace, function, _messageEncoderSettings); - subject.NoLock = true; - - var result = ExecuteOperation(subject, async); - - result.Should().Be(1.0); - } - - [Theory] - [ParameterAttributeData] - public void Execute_should_throw_when_binding_isNull( - [Values(false, true)] - bool async) - { - var function = "return 1"; - var subject = new EvalOperation(_adminDatabaseNamespace, function, _messageEncoderSettings); - - Action action = () => ExecuteOperation(subject, null, async); - - action.ShouldThrow<ArgumentNullException>(); - } - - [Theory] - [ParameterAttributeData] - public void Execute_should_throw_when_maxTime_is_exceeded( - [Values(false, true)] bool async) - { - RequireServer.Check().Supports(Feature.Eval).Authentication(false).ClusterTypes(ClusterType.Standalone, ClusterType.ReplicaSet); - var function = "return 1"; - var subject = new EvalOperation(_adminDatabaseNamespace, function, _messageEncoderSettings) { MaxTime = TimeSpan.FromSeconds(9001) }; - - using (var failPoint = FailPoint.ConfigureAlwaysOn(_cluster, _session, FailPointName.MaxTimeAlwaysTimeout)) - { - var exception = Record.Exception(() => ExecuteOperation(subject, failPoint.Binding, async)); - - exception.Should().BeOfType<MongoExecutionTimeoutException>(); - } - } - - [Theory] - [ParameterAttributeData] - public void MaxTime_get_and_set_should_work( - [Values(-10000, 0, 1, 10000, 99999)] long maxTimeTicks) - { - var function = new BsonJavaScript("return 1"); - var subject = new EvalOperation(_adminDatabaseNamespace, function, _messageEncoderSettings); - var value = TimeSpan.FromTicks(maxTimeTicks); - - subject.MaxTime = value; - var result = subject.MaxTime; - - result.Should().Be(value); - } - - [Theory] - [ParameterAttributeData] - public void MaxTime_set_should_throw_when_value_is_invalid( - [Values(-10001, -9999, -1)] long maxTimeTicks) - { - var function = new BsonJavaScript("return 1"); - var subject = new EvalOperation(_adminDatabaseNamespace, function, _messageEncoderSettings); - var value = TimeSpan.FromTicks(maxTimeTicks); - - var exception = Record.Exception(() => subject.MaxTime = value); - - var e = exception.Should().BeOfType<ArgumentOutOfRangeException>().Subject; - e.ParamName.Should().Be("value"); - } - - [Theory] - [ParameterAttributeData] - public void NoLock_should_work( - [Values(null, false, true)] - bool? value) - { - var function = new BsonJavaScript("return 1"); - var subject = new EvalOperation(_adminDatabaseNamespace, function, _messageEncoderSettings); - - subject.NoLock = value; - - subject.NoLock.Should().Be(value); - } - - // private methods - private BsonValue ExecuteOperation(EvalOperation operation, bool async) - { - using (var binding = CreateReadWriteBinding()) - { - return ExecuteOperation(operation, binding, async); - } - } - - private BsonValue ExecuteOperation(EvalOperation operation, IWriteBinding binding, bool async) - { - if (async) - { - return operation.ExecuteAsync(binding, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - return operation.Execute(binding, CancellationToken.None); - } - } - } -} diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/FindOneAndDeleteOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/FindOneAndDeleteOperationTests.cs index ff475667efe..068a63996ca 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/FindOneAndDeleteOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/FindOneAndDeleteOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,6 +14,7 @@ */ using System; +using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; @@ -121,7 +122,7 @@ public void CreateCommand_should_return_expected_result_when_Hint_is_set( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(Feature.HintForFindAndModifyFeature.FirstSupportedWireVersion); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -293,7 +294,7 @@ public void CreateCommand_should_return_the_expected_result( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, transactionNumber); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, transactionNumber); var expectedResult = new BsonDocument { @@ -320,7 +321,7 @@ public void CreateCommand_should_return_the_expected_result_when_Collation_is_se var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -348,7 +349,7 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -361,6 +362,24 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long result["maxTimeMS"].BsonType.Should().Be(BsonType.Int32); } + [Theory] + [InlineData(42)] + [InlineData(-1)] + public void CreateCommand_should_ignore_maxtime_if_timeout_specified(int timeoutMs) + { + var subject = new FindOneAndDeleteOperation<BsonDocument>(_collectionNamespace, _filter, BsonDocumentSerializer.Instance, _messageEncoderSettings) + { + MaxTime = TimeSpan.FromTicks(10) + }; + var session = OperationTestHelper.CreateSession(); + var connectionDescription = OperationTestHelper.CreateConnectionDescription(); + + var operationContext = new OperationContext(TimeSpan.FromMilliseconds(timeoutMs), CancellationToken.None); + var result = subject.CreateCommand(operationContext, session, connectionDescription, null); + + result.Should().NotContain("maxTimeMS"); + } + [Theory] [ParameterAttributeData] public void CreateCommand_should_return_the_expected_result_when_Projection_is_set( @@ -375,7 +394,7 @@ public void CreateCommand_should_return_the_expected_result_when_Projection_is_s var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -401,7 +420,7 @@ public void CreateCommand_should_return_the_expected_result_when_Sort_is_set( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -416,25 +435,38 @@ public void CreateCommand_should_return_the_expected_result_when_Sort_is_set( [Theory] [ParameterAttributeData] public void CreateCommand_should_return_the_expected_result_when_WriteConcern_is_set( - [Values(null, 1, 2)] - int? w) + [Values(null, 1, 2)] int? w, + [Values(null, 100)] int? wtimeout, + [Values(true, false)] bool hasOperationTimeout) { var writeConcern = w.HasValue ? new WriteConcern(w.Value) : null; + if (wtimeout.HasValue) + { + writeConcern ??= WriteConcern.Acknowledged; + writeConcern = writeConcern.With(wTimeout: TimeSpan.FromMilliseconds(wtimeout.Value)); + } + var subject = new FindOneAndDeleteOperation<BsonDocument>(_collectionNamespace, _filter, BsonDocumentSerializer.Instance, _messageEncoderSettings) { WriteConcern = writeConcern }; + var operationContext = hasOperationTimeout ? new OperationContext(TimeSpan.FromSeconds(42), CancellationToken.None) : OperationContext.NoTimeout; var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(operationContext, session, connectionDescription, null); + var expectedWriteConcern = writeConcern?.ToBsonDocument(); + if (hasOperationTimeout) + { + expectedWriteConcern?.Remove("wtimeout"); + } var expectedResult = new BsonDocument { { "findAndModify", _collectionNamespace.CollectionName }, { "query", _filter }, { "remove", true }, - { "writeConcern", () => writeConcern.ToBsonDocument(), writeConcern != null } + { "writeConcern", () => expectedWriteConcern, w.HasValue || (wtimeout.HasValue && !hasOperationTimeout) } }; result.Should().Be(expectedResult); } diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/FindOneAndReplaceOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/FindOneAndReplaceOperationTests.cs index f201c3664ba..039a212d2cb 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/FindOneAndReplaceOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/FindOneAndReplaceOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,6 +14,7 @@ */ using System; +using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; @@ -280,7 +281,7 @@ public void CreateCommand_should_return_expected_result( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, transactionNumber); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, transactionNumber); var expectedResult = new BsonDocument { @@ -306,7 +307,7 @@ public void CreateCommand_should_return_expected_result_when_BypassDocumentValid var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -332,7 +333,7 @@ public void CreateCommand_should_return_expected_result_when_Collation_is_set( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -357,7 +358,7 @@ public void CreateCommand_should_return_expected_result_when_Hint_is_set( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(Feature.HintForFindAndModifyFeature.FirstSupportedWireVersion); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -382,7 +383,7 @@ public void CreateCommand_should_return_expected_result_when_IsUpsert_is_set( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -410,7 +411,7 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -423,6 +424,24 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long result["maxTimeMS"].BsonType.Should().Be(BsonType.Int32); } + [Theory] + [InlineData(42)] + [InlineData(-1)] + public void CreateCommand_should_ignore_maxtime_if_timeout_specified(int timeoutMs) + { + var subject = new FindOneAndReplaceOperation<BsonDocument>(_collectionNamespace, _filter, _replacement, BsonDocumentSerializer.Instance, _messageEncoderSettings) + { + MaxTime = TimeSpan.FromTicks(10) + }; + var session = OperationTestHelper.CreateSession(); + var connectionDescription = OperationTestHelper.CreateConnectionDescription(); + + var operationContext = new OperationContext(TimeSpan.FromMilliseconds(timeoutMs), CancellationToken.None); + var result = subject.CreateCommand(operationContext, session, connectionDescription, null); + + result.Should().NotContain("maxTimeMS"); + } + [Theory] [ParameterAttributeData] public void CreateCommand_should_return_expected_result_when_Projection_is_set( @@ -437,7 +456,7 @@ public void CreateCommand_should_return_expected_result_when_Projection_is_set( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -462,7 +481,7 @@ public void CreateCommand_should_return_expected_result_when_ReturnDocument_is_s var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -488,7 +507,7 @@ public void CreateCommand_should_return_expected_result_when_Sort_is_set( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -503,25 +522,38 @@ public void CreateCommand_should_return_expected_result_when_Sort_is_set( [Theory] [ParameterAttributeData] public void CreateCommand_should_return_expected_result_when_WriteConcern_is_set( - [Values(null, 1, 2)] - int? w) + [Values(null, 1, 2)] int? w, + [Values(null, 100)] int? wtimeout, + [Values(true, false)] bool hasOperationTimeout) { var writeConcern = w.HasValue ? new WriteConcern(w.Value) : null; + if (wtimeout.HasValue) + { + writeConcern ??= WriteConcern.Acknowledged; + writeConcern = writeConcern.With(wTimeout: TimeSpan.FromMilliseconds(wtimeout.Value)); + } + var subject = new FindOneAndReplaceOperation<BsonDocument>(_collectionNamespace, _filter, _replacement, BsonDocumentSerializer.Instance, _messageEncoderSettings) { WriteConcern = writeConcern }; + var operationContext = hasOperationTimeout ? new OperationContext(TimeSpan.FromSeconds(42), CancellationToken.None) : OperationContext.NoTimeout; var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(operationContext, session, connectionDescription, null); + var expectedWriteConcern = writeConcern?.ToBsonDocument(); + if (hasOperationTimeout) + { + expectedWriteConcern?.Remove("wtimeout"); + } var expectedResult = new BsonDocument { { "findAndModify", _collectionNamespace.CollectionName }, { "query", _filter }, { "update", _replacement }, - { "writeConcern", () => writeConcern.ToBsonDocument(), writeConcern != null } + { "writeConcern", () => expectedWriteConcern, w.HasValue || (wtimeout.HasValue && !hasOperationTimeout) } }; result.Should().Be(expectedResult); } diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/FindOneAndUpdateOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/FindOneAndUpdateOperationTests.cs index 3a621869207..39741ec5f05 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/FindOneAndUpdateOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/FindOneAndUpdateOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,6 +14,7 @@ */ using System; +using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; @@ -274,7 +275,7 @@ public void CreateCommand_should_return_expected_result( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, transactionNumber); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, transactionNumber); var expectedResult = new BsonDocument { @@ -300,7 +301,7 @@ public void CreateCommand_should_return_expected_result_when_BypassDocumentValid var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -326,7 +327,7 @@ public void CreateCommand_should_return_expected_result_when_Collation_is_set( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -351,7 +352,7 @@ public void CreateCommand_should_return_expected_result_when_Hint_is_set( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(Feature.HintForFindAndModifyFeature.FirstSupportedWireVersion); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -376,7 +377,7 @@ public void CreateCommand_should_return_expected_result_when_IsUpsert_is_set( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -404,7 +405,7 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -417,6 +418,25 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long result["maxTimeMS"].BsonType.Should().Be(BsonType.Int32); } + [Theory] + [InlineData(42)] + [InlineData(-1)] + public void CreateCommand_should_ignore_maxtime_if_timeout_specified(int timeoutMs) + { + var subject = new FindOneAndUpdateOperation<BsonDocument>(_collectionNamespace, _filter, _update, BsonDocumentSerializer.Instance, _messageEncoderSettings) + { + MaxTime = TimeSpan.FromTicks(10) + }; + var session = OperationTestHelper.CreateSession(); + var connectionDescription = OperationTestHelper.CreateConnectionDescription(); + + + var operationContext = new OperationContext(TimeSpan.FromMilliseconds(timeoutMs), CancellationToken.None); + var result = subject.CreateCommand(operationContext, session, connectionDescription, null); + + result.Should().NotContain("maxTimeMS"); + } + [Theory] [ParameterAttributeData] public void CreateCommand_should_return_expected_result_when_Projection_is_set( @@ -431,7 +451,7 @@ public void CreateCommand_should_return_expected_result_when_Projection_is_set( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -456,7 +476,7 @@ public void CreateCommand_should_return_expected_result_when_ReturnDocument_is_s var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -482,7 +502,7 @@ public void CreateCommand_should_return_expected_result_when_Sort_is_set( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription, null); var expectedResult = new BsonDocument { @@ -497,25 +517,38 @@ public void CreateCommand_should_return_expected_result_when_Sort_is_set( [Theory] [ParameterAttributeData] public void CreateCommand_should_return_expected_result_when_WriteConcern_is_set( - [Values(null, 1, 2)] - int? w) + [Values(null, 1, 2)] int? w, + [Values(null, 100)] int? wtimeout, + [Values(true, false)] bool hasOperationTimeout) { var writeConcern = w.HasValue ? new WriteConcern(w.Value) : null; + if (wtimeout.HasValue) + { + writeConcern ??= WriteConcern.Acknowledged; + writeConcern = writeConcern.With(wTimeout: TimeSpan.FromMilliseconds(wtimeout.Value)); + } + var subject = new FindOneAndUpdateOperation<BsonDocument>(_collectionNamespace, _filter, _update, BsonDocumentSerializer.Instance, _messageEncoderSettings) { WriteConcern = writeConcern }; + var operationContext = hasOperationTimeout ? new OperationContext(TimeSpan.FromSeconds(42), CancellationToken.None) : OperationContext.NoTimeout; var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription, null); + var result = subject.CreateCommand(operationContext, session, connectionDescription, null); + var expectedWriteConcern = writeConcern?.ToBsonDocument(); + if (hasOperationTimeout) + { + expectedWriteConcern?.Remove("wtimeout"); + } var expectedResult = new BsonDocument { { "findAndModify", _collectionNamespace.CollectionName }, { "query", _filter }, { "update", _update }, - { "writeConcern", () => writeConcern.ToBsonDocument(), writeConcern != null } + { "writeConcern", () => expectedWriteConcern, w.HasValue || (wtimeout.HasValue && !hasOperationTimeout) } }; result.Should().Be(expectedResult); } diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/FindOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/FindOperationTests.cs index e468f7bca46..e540885a514 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/FindOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/FindOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2015-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,6 +14,7 @@ */ using System; +using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; @@ -136,7 +137,6 @@ public void constructor_should_initialize_instance() subject.Comment.Should().BeNull(); subject.CursorType.Should().Be(CursorType.NonTailable); subject.Filter.Should().BeNull(); - subject.FirstBatchSize.Should().NotHaveValue(); subject.Hint.Should().BeNull(); subject.Limit.Should().NotHaveValue(); subject.Max.Should().BeNull(); @@ -192,7 +192,7 @@ public void CreateCommand_should_return_expected_result() var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -217,7 +217,7 @@ public void CreateCommand_should_return_expected_result_when_AllowDiskUse_is_set var connectionDescription = OperationTestHelper.CreateConnectionDescription(serverType: serverType); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -243,7 +243,7 @@ public void CreateCommand_should_return_expected_result_when_AllowPartialResults var connectionDescription = OperationTestHelper.CreateConnectionDescription(serverType: serverType); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -268,7 +268,7 @@ public void CreateCommand_should_return_expected_result_when_Collation_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -292,7 +292,7 @@ public void CreateCommand_should_return_expected_result_when_Comment_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -316,7 +316,7 @@ public void CreateCommand_should_return_expected_result_when_CursorType_is_Set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -342,7 +342,7 @@ public void CreateCommand_should_return_expected_result_when_Filter_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -352,30 +352,6 @@ public void CreateCommand_should_return_expected_result_when_Filter_is_set( result.Should().Be(expectedResult); } - [Theory] - [ParameterAttributeData] - public void CreateCommand_should_return_expected_result_when_FirstBatchSize_is_set( - [Values(null, 0, 1)] - int? firstBatchSize) - { - var subject = new FindOperation<BsonDocument>(_collectionNamespace, BsonDocumentSerializer.Instance, _messageEncoderSettings) - { - FirstBatchSize = firstBatchSize - }; - - var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var session = OperationTestHelper.CreateSession(); - - var result = subject.CreateCommand(connectionDescription, session); - - var expectedResult = new BsonDocument - { - { "find", _collectionNamespace.CollectionName }, - { "batchSize", () => firstBatchSize.Value, firstBatchSize.HasValue } - }; - result.Should().Be(expectedResult); - } - [Theory] [ParameterAttributeData] public void CreateCommand_should_return_expected_result_when_Hint_is_set( @@ -391,7 +367,7 @@ public void CreateCommand_should_return_expected_result_when_Hint_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -418,7 +394,7 @@ public void CreateCommand_should_return_expected_result_when_Limit_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -444,7 +420,7 @@ public void CreateCommand_should_return_expected_result_when_Max_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -470,7 +446,7 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -481,6 +457,24 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long result["maxTimeMS"].BsonType.Should().Be(BsonType.Int32); } + [Theory] + [InlineData(42)] + [InlineData(-1)] + public void CreateCommand_should_ignore_maxtime_if_timeout_specified(int timeoutMs) + { + var subject = new FindOperation<BsonDocument>(_collectionNamespace, BsonDocumentSerializer.Instance, _messageEncoderSettings) + { + MaxTime = TimeSpan.FromTicks(10) + }; + var connectionDescription = OperationTestHelper.CreateConnectionDescription(); + var session = OperationTestHelper.CreateSession(); + + var operationContext = new OperationContext(TimeSpan.FromMilliseconds(timeoutMs), CancellationToken.None); + var result = subject.CreateCommand(operationContext, session, connectionDescription); + + result.Should().NotContain("maxTimeMS"); + } + [Theory] [ParameterAttributeData] public void CreateCommand_should_return_expected_result_when_Min_is_set( @@ -496,7 +490,7 @@ public void CreateCommand_should_return_expected_result_when_Min_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -520,7 +514,7 @@ public void CreateCommand_should_return_expected_result_when_NoCursorTimeout_is_ var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -546,7 +540,7 @@ public void CreateCommand_should_return_expected_result_when_OplogReplay_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -571,7 +565,7 @@ public void CreateCommand_should_return_expected_result_when_Projection_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -596,7 +590,7 @@ public void CreateCommand_should_return_expected_result_when_ReadConcern_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -620,7 +614,7 @@ public void CreateCommand_should_return_expected_result_when_ReturnKey_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -644,7 +638,7 @@ public void CreateCommand_should_return_expected_result_when_ShowRecordId_is_set var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -668,7 +662,7 @@ public void CreateCommand_should_return_expected_result_when_Skip_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -693,7 +687,7 @@ public void CreateCommand_should_return_expected_result_when_Sort_is_set( var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -718,7 +712,7 @@ public void CreateCommand_should_return_the_expected_result_when_using_causal_co var connectionDescription = OperationTestHelper.CreateConnectionDescription(supportsSessions: true); var session = OperationTestHelper.CreateSession(true, new BsonTimestamp(100)); - var result = subject.CreateCommand(connectionDescription, session); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedReadConcernDocument = readConcern.ToBsonDocument(); expectedReadConcernDocument["afterClusterTime"] = new BsonTimestamp(100); @@ -937,34 +931,6 @@ public void Filter_get_and_set_should_work( result.Should().BeSameAs(value); } - [Theory] - [ParameterAttributeData] - public void FirstBatchSize_get_and_set_should_work( - [Values(null, 0, 1, 2)] - int? value) - { - var subject = new FindOperation<BsonDocument>(_collectionNamespace, BsonDocumentSerializer.Instance, _messageEncoderSettings); - - subject.FirstBatchSize = value; - var result = subject.FirstBatchSize; - - result.Should().Be(value); - } - - [Theory] - [ParameterAttributeData] - public void FirstBatchSize_set_should_throw_when_value_is_invalid( - [Values(-2, -1)] - int value) - { - var subject = new FindOperation<BsonDocument>(_collectionNamespace, BsonDocumentSerializer.Instance, _messageEncoderSettings); - - var exception = Record.Exception(() => { subject.FirstBatchSize = value; }); - - var argumentOutOfRangeException = exception.Should().BeOfType<ArgumentOutOfRangeException>().Subject; - argumentOutOfRangeException.ParamName.Should().Be("value"); - } - [Theory] [ParameterAttributeData] public void Hint_get_and_set_should_work( diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/GroupOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/GroupOperationTests.cs deleted file mode 100644 index 4a7d7d4de0a..00000000000 --- a/tests/MongoDB.Driver.Tests/Core/Operations/GroupOperationTests.cs +++ /dev/null @@ -1,576 +0,0 @@ -/* Copyright 2013-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System; -using System.Threading; -using FluentAssertions; -using MongoDB.Bson; -using MongoDB.Bson.Serialization.Serializers; -using MongoDB.TestHelpers.XunitExtensions; -using MongoDB.Driver.Core.Clusters; -using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.TestHelpers; -using MongoDB.Driver.Core.TestHelpers.XunitExtensions; -using Xunit; - -namespace MongoDB.Driver.Core.Operations -{ - public class GroupOperationTests : OperationTestBase - { - private readonly BsonDocument _filter = BsonDocument.Parse("{ y : 'a' }"); - private readonly BsonJavaScript _finalizeFunction = new BsonJavaScript("function(result) { result.count = -result.count; }"); - private readonly BsonDocument _initial = BsonDocument.Parse("{ count : 0.0 }"); - private readonly BsonDocument _key = BsonDocument.Parse("{ x : 1 }"); - private readonly BsonJavaScript _keyFunction = new BsonJavaScript("function(doc) { return { x : doc.x }; }"); - private readonly BsonJavaScript _reduceFunction = new BsonJavaScript("function(doc, result) { result.count += 1; }"); - - [Fact] - public void constructor_with_key_should_initialize_subject() - { - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _key, _initial, _reduceFunction, _filter, _messageEncoderSettings); - - subject.CollectionNamespace.Should().BeSameAs(_collectionNamespace); - subject.Key.Should().BeSameAs(_key); - subject.Initial.Should().BeSameAs(_initial); - subject.ReduceFunction.Should().BeSameAs(_reduceFunction); - subject.Filter.Should().BeSameAs(_filter); - subject.MessageEncoderSettings.Should().BeSameAs(_messageEncoderSettings); - - subject.Collation.Should().BeNull(); - subject.FinalizeFunction.Should().BeNull(); - subject.KeyFunction.Should().BeNull(); - subject.MaxTime.Should().NotHaveValue(); - subject.ResultSerializer.Should().BeNull(); - } - - [Fact] - public void constructor_with_key_should_throw_when_collectionNamespace_is_null() - { - var exception = Record.Exception(() => new GroupOperation<BsonDocument>(null, _key, _initial, _reduceFunction, _filter, _messageEncoderSettings)); - - var argumentNullException = exception.Should().BeOfType<ArgumentNullException>().Subject; - argumentNullException.ParamName.Should().Be("collectionNamespace"); - } - - [Fact] - public void constructor_with_key_should_throw_when_key_is_null() - { - var exception = Record.Exception(() => new GroupOperation<BsonDocument>(_collectionNamespace, (BsonDocument)null, _initial, _reduceFunction, _filter, _messageEncoderSettings)); - - var argumentNullException = exception.Should().BeOfType<ArgumentNullException>().Subject; - argumentNullException.ParamName.Should().Be("key"); - } - - [Fact] - public void constructor_with_key_should_throw_when_initial_is_null() - { - var exception = Record.Exception(() => new GroupOperation<BsonDocument>(_collectionNamespace, _key, null, _reduceFunction, _filter, _messageEncoderSettings)); - - var argumentNullException = exception.Should().BeOfType<ArgumentNullException>().Subject; - argumentNullException.ParamName.Should().Be("initial"); - } - - [Fact] - public void constructor_with_key_should_throw_when_reduceFunction_is_null() - { - var exception = Record.Exception(() => new GroupOperation<BsonDocument>(_collectionNamespace, _key, _initial, null, _filter, _messageEncoderSettings)); - - var argumentNullException = exception.Should().BeOfType<ArgumentNullException>().Subject; - argumentNullException.ParamName.Should().Be("reduceFunction"); - } - - [Fact] - public void constructor_with_keyFunction_should_initialize_subject() - { - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _keyFunction, _initial, _reduceFunction, _filter, _messageEncoderSettings); - - subject.CollectionNamespace.Should().Be(_collectionNamespace); - subject.KeyFunction.Should().Be(_keyFunction); - subject.Initial.Should().Be(_initial); - subject.ReduceFunction.Should().Be(_reduceFunction); - subject.Filter.Should().Be(_filter); - subject.MessageEncoderSettings.Should().BeSameAs(_messageEncoderSettings); - - subject.Collation.Should().BeNull(); - subject.FinalizeFunction.Should().BeNull(); - subject.Key.Should().BeNull(); - subject.MaxTime.Should().Be(default(TimeSpan?)); - subject.ResultSerializer.Should().BeNull(); - } - - [Fact] - public void constructor_with_keyFunction_should_throw_when_collectionNamespace_is_null() - { - var exception = Record.Exception(() => new GroupOperation<BsonDocument>(null, _keyFunction, _initial, _reduceFunction, _filter, _messageEncoderSettings)); - - var argumentNullException = exception.Should().BeOfType<ArgumentNullException>().Subject; - argumentNullException.ParamName.Should().Be("collectionNamespace"); - } - - [Fact] - public void constructor_with_keyFunction_should_throw_when_keyFunction_is_null() - { - var exception = Record.Exception(() => new GroupOperation<BsonDocument>(_collectionNamespace, (BsonJavaScript)null, _initial, _reduceFunction, _filter, _messageEncoderSettings)); - - var argumentNullException = exception.Should().BeOfType<ArgumentNullException>().Subject; - argumentNullException.ParamName.Should().Be("keyFunction"); - } - - [Fact] - public void constructor_with_keyFunction_should_throw_when_initial_is_null() - { - var exception = Record.Exception(() => new GroupOperation<BsonDocument>(_collectionNamespace, _keyFunction, null, _reduceFunction, _filter, _messageEncoderSettings)); - - var argumentNullException = exception.Should().BeOfType<ArgumentNullException>().Subject; - argumentNullException.ParamName.Should().Be("initial"); - } - - [Fact] - public void constructor_with_keyFunction_should_throw_when_reduceFunction_is_null() - { - var exception = Record.Exception(() => new GroupOperation<BsonDocument>(_collectionNamespace, _keyFunction, _initial, null, _filter, _messageEncoderSettings)); - - var argumentNullException = exception.Should().BeOfType<ArgumentNullException>().Subject; - argumentNullException.ParamName.Should().Be("reduceFunction"); - } - - [Theory] - [ParameterAttributeData] - public void Collation_get_and_set_should_work( - [Values(null, "en_US", "fr_CA")] - string locale) - { - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _key, _initial, _reduceFunction, _filter, _messageEncoderSettings); - var value = locale == null ? null : new Collation(locale); - - subject.Collation = value; - var result = subject.Collation; - - result.Should().BeSameAs(value); - } - - [Theory] - [ParameterAttributeData] - public void FinalizeFunction_get_and_set_should_work( - [Values(null, "x", "y")] - string code) - { - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _key, _initial, _reduceFunction, _filter, _messageEncoderSettings); - var value = code == null ? null : new BsonJavaScript(code); - - subject.FinalizeFunction = value; - var result = subject.FinalizeFunction; - - result.Should().BeSameAs(value); - } - - [Theory] - [ParameterAttributeData] - public void MaxTime_get_and_set_should_work( - [Values(-10000, 0, 1, 10000, 99999)] long maxTimeTicks) - { - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _key, _initial, _reduceFunction, _filter, _messageEncoderSettings); - var value = TimeSpan.FromTicks(maxTimeTicks); - - subject.MaxTime = value; - var result = subject.MaxTime; - - result.Should().Be(value); - } - - [Theory] - [ParameterAttributeData] - public void MaxTime_set_should_throw_when_value_is_invalid( - [Values(-10001, -9999, -1)] long maxTimeTicks) - { - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _key, _initial, _reduceFunction, _filter, _messageEncoderSettings); - var value = TimeSpan.FromTicks(maxTimeTicks); - - var exception = Record.Exception(() => subject.MaxTime = value); - - var e = exception.Should().BeOfType<ArgumentOutOfRangeException>().Subject; - e.ParamName.Should().Be("value"); - } - - [Theory] - [ParameterAttributeData] - public void ResultSerializer_get_and_set_should_work( - [Values(false, true)] - bool isNull) - { - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _key, _initial, _reduceFunction, _filter, _messageEncoderSettings); - var value = isNull ? null : new BsonDocumentSerializer(); - - subject.ResultSerializer = value; - var result = subject.ResultSerializer; - - result.Should().Be(value); - } - - [Theory] - [ParameterAttributeData] - public void CreateCommand_should_return_expected_result_when_key_is_used( - [Values(false, true)] - bool useFilter) - { - var filter = useFilter ? _filter : null; - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _key, _initial, _reduceFunction, filter, _messageEncoderSettings); - - var result = subject.CreateCommand(); - - var expectedResult = new BsonDocument - { - { "group", new BsonDocument - { - { "ns", _collectionNamespace.CollectionName }, - { "key", _key }, - { "$reduce", _reduceFunction }, - { "initial", _initial }, - { "cond", filter, filter != null } - } - } - }; - result.Should().Be(expectedResult); - } - - [Theory] - [ParameterAttributeData] - public void CreateCommand_should_return_expected_result_when_keyFunction_is_used( - [Values(false, true)] - bool isFilterNull) - { - var filter = isFilterNull ? _filter : null; - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _keyFunction, _initial, _reduceFunction, filter, _messageEncoderSettings); - - var result = subject.CreateCommand(); - - var expectedResult = new BsonDocument - { - { "group", new BsonDocument - { - { "ns", _collectionNamespace.CollectionName }, - { "$keyf", _keyFunction }, - { "$reduce", _reduceFunction }, - { "initial", _initial }, - { "cond", filter, filter != null } - } - } - }; - result.Should().Be(expectedResult); - } - - [Theory] - [ParameterAttributeData] - public void CreateCommand_should_return_expected_result_when_finalizeFunction_is_set( - [Values(false, true)] - bool isFinalizeFunctionNull) - { - var finalizeFunction = isFinalizeFunctionNull ? null : _finalizeFunction; - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _key, _initial, _reduceFunction, null, _messageEncoderSettings) - { - FinalizeFunction = finalizeFunction - }; - - var result = subject.CreateCommand(); - - var expectedResult = new BsonDocument - { - { "group", new BsonDocument - { - { "ns", _collectionNamespace.CollectionName }, - { "key", _key }, - { "$reduce", _reduceFunction }, - { "initial", _initial }, - { "finalize", finalizeFunction, finalizeFunction != null } - } - } - }; - result.Should().Be(expectedResult); - } - - [Theory] - [ParameterAttributeData] - public void CreateCommand_should_return_expected_result_when_Collation_is_set( - [Values(null, "en_US", "fr_CA")] - string locale) - { - var collation = locale == null ? null : new Collation(locale); - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _key, _initial, _reduceFunction, null, _messageEncoderSettings) - { - Collation = collation - }; - - var result = subject.CreateCommand(); - - var expectedResult = new BsonDocument - { - { "group", new BsonDocument - { - { "ns", _collectionNamespace.CollectionName }, - { "key", _key }, - { "$reduce", _reduceFunction }, - { "initial", _initial }, - { "collation", () => collation.ToBsonDocument(), collation != null } - } - } - }; - result.Should().Be(expectedResult); - } - - [Theory] - [InlineData(-10000, 0)] - [InlineData(0, 0)] - [InlineData(1, 1)] - [InlineData(9999, 1)] - [InlineData(10000, 1)] - [InlineData(10001, 2)] - public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long maxTimeTicks, int expectedMaxTimeMS) - { - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _key, _initial, _reduceFunction, null, _messageEncoderSettings) - { - MaxTime = TimeSpan.FromTicks(maxTimeTicks) - }; - - var result = subject.CreateCommand(); - - var expectedResult = new BsonDocument - { - { "group", new BsonDocument - { - { "ns", _collectionNamespace.CollectionName }, - { "key", _key }, - { "$reduce", _reduceFunction }, - { "initial", _initial } - } - }, - { "maxTimeMS", expectedMaxTimeMS } - }; - result.Should().Be(expectedResult); - result["maxTimeMS"].BsonType.Should().Be(BsonType.Int32); - } - - [Theory] - [ParameterAttributeData] - public void Execute_should_return_expected_result_when_key_is_used( - [Values(false, true)] - bool async) - { - RequireServer.Check().Supports(Feature.GroupCommand); - EnsureTestData(); - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _key, _initial, _reduceFunction, null, _messageEncoderSettings); - - var result = ExecuteOperation(subject, async); - - result.Should().Equal( - BsonDocument.Parse("{ x : 1, count : 2 }"), - BsonDocument.Parse("{ x : 2, count : 1 }"), - BsonDocument.Parse("{ x : 3, count : 3 }")); - } - - [Theory] - [ParameterAttributeData] - public void Execute_should_return_expected_result_when_keyFunction_is_used( - [Values(false, true)] - bool async) - { - RequireServer.Check().Supports(Feature.GroupCommand); - EnsureTestData(); - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _keyFunction, _initial, _reduceFunction, null, _messageEncoderSettings); - - var result = ExecuteOperation(subject, async); - - result.Should().Equal( - BsonDocument.Parse("{ x : 1, count : 2 }"), - BsonDocument.Parse("{ x : 2, count : 1 }"), - BsonDocument.Parse("{ x : 3, count : 3 }")); - } - - [Theory] - [ParameterAttributeData] - public void Execute_should_return_expected_result_when_Collation_is_set( - [Values(false, true)] - bool caseSensitive, - [Values(false, true)] - bool async) - { - RequireServer.Check().Supports(Feature.GroupCommand); - EnsureTestData(); - var collation = new Collation("en_US", caseLevel: caseSensitive, strength: CollationStrength.Primary); - var filter = BsonDocument.Parse("{ y : 'a' }"); - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _key, _initial, _reduceFunction, filter, _messageEncoderSettings) - { - Collation = collation - }; - - var result = ExecuteOperation(subject, async); - - BsonDocument[] expectedResult; - if (caseSensitive) - { - expectedResult = new[] - { - BsonDocument.Parse("{ x : 1, count : 2 }"), - BsonDocument.Parse("{ x : 3, count : 2 }") - }; - } - else - { - expectedResult = new[] - { - BsonDocument.Parse("{ x : 1, count : 2 }"), - BsonDocument.Parse("{ x : 2, count : 1 }"), - BsonDocument.Parse("{ x : 3, count : 3 }") - }; - } - result.Should().Equal(expectedResult); - } - - [Theory] - [ParameterAttributeData] - public void Execute_should_return_expected_result_when_FinalizeFunction_is_set( - [Values(false, true)] - bool async) - { - RequireServer.Check().Supports(Feature.GroupCommand); - EnsureTestData(); - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _key, _initial, _reduceFunction, null, _messageEncoderSettings) - { - FinalizeFunction = _finalizeFunction - }; - - var result = ExecuteOperation(subject, async); - - result.Should().Equal( - BsonDocument.Parse("{ x : 1, count : -2 }"), - BsonDocument.Parse("{ x : 2, count : -1 }"), - BsonDocument.Parse("{ x : 3, count : -3 }")); - } - - [Theory] - [ParameterAttributeData] - public void Execute_should_return_expected_result_when_MaxTime_is_used( - [Values(null, 1000)] - int? seconds, - [Values(false, true)] - bool async) - { - RequireServer.Check().Supports(Feature.GroupCommand); - EnsureTestData(); - var maxTime = seconds.HasValue ? TimeSpan.FromSeconds(seconds.Value) : (TimeSpan?)null; - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _key, _initial, _reduceFunction, null, _messageEncoderSettings) - { - MaxTime = maxTime - }; - - // TODO: force a timeout on the server? for now we're just smoke testing - var result = ExecuteOperation(subject, async); - - result.Should().Equal( - BsonDocument.Parse("{ x : 1, count : 2 }"), - BsonDocument.Parse("{ x : 2, count : 1 }"), - BsonDocument.Parse("{ x : 3, count : 3 }")); - } - - [Theory] - [ParameterAttributeData] - public void Execute_should_return_expected_result_when_ResultSerializer_is_used( - [Values(false, true)] - bool async) - { - RequireServer.Check().Supports(Feature.GroupCommand); - EnsureTestData(); - var resultSerializer = new ElementDeserializer<int>("x", new Int32Serializer()); - var subject = new GroupOperation<int>(_collectionNamespace, _key, _initial, _reduceFunction, null, _messageEncoderSettings) - { - ResultSerializer = resultSerializer - }; - - var result = ExecuteOperation(subject, async); - - result.Should().Equal(1, 2, 3); - } - - [Theory] - [ParameterAttributeData] - public void Execute_should_throw_when_binding_is_null( - [Values(false, true)] - bool async) - { - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _key, _initial, _reduceFunction, _filter, _messageEncoderSettings); - - var exception = Record.Exception(() => - { - if (async) - { - subject.ExecuteAsync(null, CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - subject.Execute(null, CancellationToken.None); - } - }); - - var argumentNullException = exception.Should().BeOfType<ArgumentNullException>().Subject; - argumentNullException.ParamName.Should().Be("binding"); - } - - [Theory] - [ParameterAttributeData] - public void Execute_should_send_session_id_when_supported( - [Values(false, true)] bool async) - { - RequireServer.Check().Supports(Feature.GroupCommand); - EnsureTestData(); - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _key, _initial, _reduceFunction, null, _messageEncoderSettings); - - VerifySessionIdWasSentWhenSupported(subject, "group", async); - } - - [Theory] - [ParameterAttributeData] - public void Execute_should_throw_when_maxTime_is_exceeded( - [Values(false, true)] bool async) - { - RequireServer.Check().Supports(Feature.GroupCommand).ClusterTypes(ClusterType.Standalone, ClusterType.ReplicaSet); - var subject = new GroupOperation<BsonDocument>(_collectionNamespace, _key, _initial, _reduceFunction, null, _messageEncoderSettings); - subject.MaxTime = TimeSpan.FromSeconds(9001); - - using (var failPoint = FailPoint.ConfigureAlwaysOn(_cluster, _session, FailPointName.MaxTimeAlwaysTimeout)) - { - var exception = Record.Exception(() => ExecuteOperation(subject, failPoint.Binding, async)); - - exception.Should().BeOfType<MongoExecutionTimeoutException>(); - } - } - - // helper methods - private void EnsureTestData() - { - RunOncePerFixture(() => - { - DropCollection(); - Insert( - BsonDocument.Parse("{ _id : 1, x : 1, y : 'a' }"), - BsonDocument.Parse("{ _id : 2, x : 1, y : 'a' }"), - BsonDocument.Parse("{ _id : 3, x : 2, y : 'A' }"), - BsonDocument.Parse("{ _id : 4, x : 3, y : 'a' }"), - BsonDocument.Parse("{ _id : 5, x : 3, y : 'a' }"), - BsonDocument.Parse("{ _id : 6, x : 3, y : 'A' }") - ); - CreateIndexes(new CreateIndexRequest(new BsonDocument("Location", "2d"))); - }); - } - } -} diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/InsertOpcodeOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/InsertOpcodeOperationTests.cs deleted file mode 100644 index bd8ff43b2f5..00000000000 --- a/tests/MongoDB.Driver.Tests/Core/Operations/InsertOpcodeOperationTests.cs +++ /dev/null @@ -1,176 +0,0 @@ -/* Copyright 2013-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System; -using System.Collections.Generic; -using FluentAssertions; -using MongoDB.Bson; -using MongoDB.Bson.Serialization.Serializers; -using MongoDB.TestHelpers.XunitExtensions; -using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.TestHelpers.XunitExtensions; -using Xunit; - -namespace MongoDB.Driver.Core.Operations -{ - public class InsertOpcodeOperationTests : OperationTestBase - { - private BsonDocument[] _documents; - - public InsertOpcodeOperationTests() - { - _documents = new[] - { - BsonDocument.Parse("{_id: 1, x: 1}") - }; - } - - [Fact] - public void Constructor_should_throw_when_collection_namespace_is_null() - { - Action act = () => new InsertOpcodeOperation<BsonDocument>(null, _documents, BsonDocumentSerializer.Instance, _messageEncoderSettings); - - act.ShouldThrow<ArgumentNullException>(); - } - - [Fact] - public void Constructor_should_throw_when_serializer_is_null() - { - Action act = () => new InsertOpcodeOperation<BsonDocument>(_collectionNamespace, _documents, null, _messageEncoderSettings); - - act.ShouldThrow<ArgumentNullException>(); - } - - [Fact] - public void Constructor_should_throw_when_message_encoder_settings_is_null() - { - Action act = () => new InsertOpcodeOperation<BsonDocument>(_collectionNamespace, _documents, BsonDocumentSerializer.Instance, null); - - act.ShouldThrow<ArgumentNullException>(); - } - - [Fact] - public void Constructor_should_initialize_object() - { - var subject = new InsertOpcodeOperation<BsonDocument>(_collectionNamespace, _documents, BsonDocumentSerializer.Instance, _messageEncoderSettings); - - subject.CollectionNamespace.FullName.Should().Be(_collectionNamespace.FullName); - subject.Documents.Count.Should().Be(_documents.Length); - subject.Serializer.Should().BeSameAs(BsonDocumentSerializer.Instance); - subject.MessageEncoderSettings.Should().BeEquivalentTo(_messageEncoderSettings); - } - - [Fact] - public void ContinueOnError_should_work() - { - var subject = new InsertOpcodeOperation<BsonDocument>(_collectionNamespace, _documents, BsonDocumentSerializer.Instance, _messageEncoderSettings); - - subject.ContinueOnError.Should().Be(false); - - subject.ContinueOnError = true; - - subject.ContinueOnError.Should().Be(true); - } - - [Fact] - public void MaxBatchCount_should_work() - { - var subject = new InsertOpcodeOperation<BsonDocument>(_collectionNamespace, _documents, BsonDocumentSerializer.Instance, _messageEncoderSettings); - - subject.MaxBatchCount.Should().Be(null); - - subject.MaxBatchCount = 20; - - subject.MaxBatchCount.Should().Be(20); - } - - [Fact] - public void MaxDocumentSize_should_work() - { - var subject = new InsertOpcodeOperation<BsonDocument>(_collectionNamespace, _documents, BsonDocumentSerializer.Instance, _messageEncoderSettings); - - subject.MaxDocumentSize.Should().Be(null); - - subject.MaxDocumentSize = 20; - - subject.MaxDocumentSize.Should().Be(20); - } - - [Fact] - public void MaxMessageSize_should_work() - { - var subject = new InsertOpcodeOperation<BsonDocument>(_collectionNamespace, _documents, BsonDocumentSerializer.Instance, _messageEncoderSettings); - - subject.MaxMessageSize.Should().Be(null); - - subject.MaxMessageSize = 20; - - subject.MaxMessageSize.Should().Be(20); - } - - [Fact] - public void WriteConcern_should_work() - { - var subject = new InsertOpcodeOperation<BsonDocument>(_collectionNamespace, _documents, BsonDocumentSerializer.Instance, _messageEncoderSettings); - - subject.WriteConcern.Should().Be(WriteConcern.Acknowledged); - - subject.WriteConcern = WriteConcern.W2; - - subject.WriteConcern.Should().Be(WriteConcern.W2); - } - - [Theory] - [ParameterAttributeData] - public void Execute_should_insert_a_single_document( - [Values(false, true)] - bool async) - { - RequireServer.Check(); - DropCollection(); - var subject = new InsertOpcodeOperation<BsonDocument>(_collectionNamespace, _documents, BsonDocumentSerializer.Instance, _messageEncoderSettings); - - var result = ExecuteOperation(subject, async); - result.Should().HaveCount(1); - - var list = ReadAllFromCollection(async); - list.Should().HaveCount(1); - } - - [Theory] - [ParameterAttributeData] - public void Execute_should_insert_multiple_documents( - [Values(false, true)] - bool async) - { - RequireServer.Check(); - DropCollection(); - var documents = new[] - { - BsonDocument.Parse("{_id: 1, x: 1}"), - BsonDocument.Parse("{_id: 2, x: 2}"), - BsonDocument.Parse("{_id: 3, x: 3}"), - BsonDocument.Parse("{_id: 4, x: 4}"), - }; - var subject = new InsertOpcodeOperation<BsonDocument>(_collectionNamespace, documents, BsonDocumentSerializer.Instance, _messageEncoderSettings); - - var result = ExecuteOperation(subject, async); - result.Should().HaveCount(1); - - var list = ReadAllFromCollection(async); - list.Should().HaveCount(4); - } - } -} diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/MapReduceOperationBaseTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/MapReduceOperationBaseTests.cs index 4213fdeffb4..8840542784f 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/MapReduceOperationBaseTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/MapReduceOperationBaseTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,10 +14,10 @@ */ using System; +using System.Threading; using FluentAssertions; using MongoDB.Bson; using MongoDB.TestHelpers.XunitExtensions; -using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.WireProtocol.Messages.Encoders; using Xunit; @@ -125,7 +125,7 @@ public void CreateCommand_should_return_the_expected_result() var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -152,7 +152,7 @@ public void CreateCommand_should_return_the_expected_result_when_Collation_is_pr var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -179,7 +179,7 @@ public void CreateCommand_should_return_the_expected_result_when_Filter_is_provi var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -206,7 +206,7 @@ public void CreateCommand_should_return_the_expected_result_when_FinalizeFunctio var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -234,7 +234,7 @@ public void CreateCommand_should_return_the_expected_result_when_JavaScriptMode_ var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -260,7 +260,7 @@ public void CreateCommand_should_return_the_expected_result_when_Limit_is_provid var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -289,7 +289,7 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -303,6 +303,24 @@ public void CreateCommand_should_return_expected_result_when_MaxTime_is_set(long result["maxTimeMS"].BsonType.Should().Be(BsonType.Int32); } + [Theory] + [InlineData(42)] + [InlineData(-1)] + public void CreateCommand_should_ignore_maxtime_if_timeout_specified(int timeoutMs) + { + var subject = new FakeMapReduceOperation(_collectionNamespace, _mapFunction, _reduceFunction, _messageEncoderSettings) + { + MaxTime = TimeSpan.FromTicks(10) + }; + var session = OperationTestHelper.CreateSession(); + var connectionDescription = OperationTestHelper.CreateConnectionDescription(); + + var operationContext = new OperationContext(TimeSpan.FromMilliseconds(timeoutMs), CancellationToken.None); + var result = subject.CreateCommand(operationContext, session, connectionDescription); + + result.Should().NotContain("maxTimeMS"); + } + [Theory] [ParameterAttributeData] public void CreateCommand_should_return_the_expected_result_when_Scope_is_provided( @@ -317,7 +335,7 @@ public void CreateCommand_should_return_the_expected_result_when_Scope_is_provid var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -344,7 +362,7 @@ public void CreateCommand_should_return_the_expected_result_when_Sort_is_provide var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -370,7 +388,7 @@ public void CreateCommand_should_return_the_expected_result_when_Verbose_is_prov var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/MapReduceOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/MapReduceOperationTests.cs index 27caafdb348..dfc5d489c68 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/MapReduceOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/MapReduceOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -507,7 +507,7 @@ public void CreateCommand_should_return_expected_result_when_ReadConcern_is_set( var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -536,7 +536,7 @@ public void CreateCommand_should_return_the_expected_result_when_using_causal_co var session = OperationTestHelper.CreateSession(isCausallyConsistent: true, operationTime: new BsonTimestamp(100)); var connectionDescription = OperationTestHelper.CreateConnectionDescription(supportsSessions: true); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedReadConcernDocument = readConcern.ToBsonDocument(); expectedReadConcernDocument["afterClusterTime"] = new BsonTimestamp(100); diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/MapReduceOutputToCollectionOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/MapReduceOutputToCollectionOperationTests.cs index 5255ae0d989..beeef0c685d 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/MapReduceOutputToCollectionOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/MapReduceOutputToCollectionOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,10 +18,7 @@ using FluentAssertions; using MongoDB.Bson; using MongoDB.TestHelpers.XunitExtensions; -using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Clusters; -using MongoDB.Driver.Core.Connections; -using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; using Xunit; @@ -217,7 +214,7 @@ public void CreateCommand_should_return_expected_result_when_BypassDocumentValid var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { @@ -246,7 +243,7 @@ public void CreateCommand_should_return_expected_result_when_WriteConcern_is_set var connectionDescription = OperationTestHelper.CreateConnectionDescription(); var session = OperationTestHelper.CreateSession(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); var expectedResult = new BsonDocument { diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/OperationTestBase.cs b/tests/MongoDB.Driver.Tests/Core/Operations/OperationTestBase.cs index b323f4e7e1a..e42b9a5a05a 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/OperationTestBase.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/OperationTestBase.cs @@ -30,6 +30,7 @@ namespace MongoDB.Driver.Core.Operations { + [Trait("Category", "Integration")] public abstract class OperationTestBase : IDisposable { private protected IClusterInternal _cluster; @@ -141,7 +142,7 @@ private protected TResult ExecuteOperation<TResult>(IReadOperation<TResult> oper using (var binding = CreateReadBinding()) using (var bindingHandle = new ReadBindingHandle(binding)) { - return operation.Execute(bindingHandle, CancellationToken.None); + return operation.Execute(OperationContext.NoTimeout, bindingHandle); } } @@ -162,14 +163,9 @@ private protected async Task<TResult> ExecuteOperationAsync<TResult>(IReadOperat using (var binding = CreateReadBinding(cluster)) using (var bindingHandle = new ReadBindingHandle(binding)) { - if (async) - { - return operation.Execute(bindingHandle, CancellationToken.None); - } - else - { - return await operation.ExecuteAsync(bindingHandle, CancellationToken.None); - } + return async ? + await operation.ExecuteAsync(OperationContext.NoTimeout, bindingHandle) : + operation.Execute(OperationContext.NoTimeout, bindingHandle); } } @@ -177,11 +173,11 @@ private protected TResult ExecuteOperation<TResult>(IReadOperation<TResult> oper { if (async) { - return operation.ExecuteAsync(binding, CancellationToken.None).GetAwaiter().GetResult(); + return operation.ExecuteAsync(OperationContext.NoTimeout, binding).GetAwaiter().GetResult(); } else { - return operation.Execute(binding, CancellationToken.None); + return operation.Execute(OperationContext.NoTimeout, binding); } } @@ -199,7 +195,7 @@ private protected TResult ExecuteOperationSync<TResult>(IWriteOperation<TResult> using (var binding = CreateReadWriteBinding(useImplicitSession)) using (var bindingHandle = new ReadWriteBindingHandle(binding)) { - return operation.Execute(bindingHandle, CancellationToken.None); + return operation.Execute(OperationContext.NoTimeout, bindingHandle); } } @@ -219,11 +215,11 @@ private protected TResult ExecuteOperation<TResult>(IWriteOperation<TResult> ope { if (async) { - return operation.ExecuteAsync(binding, CancellationToken.None).GetAwaiter().GetResult(); + return operation.ExecuteAsync(OperationContext.NoTimeout, binding).GetAwaiter().GetResult(); } else { - return operation.Execute(binding, CancellationToken.None); + return operation.Execute(OperationContext.NoTimeout, binding); } } @@ -238,7 +234,7 @@ private protected async Task<TResult> ExecuteOperationAsync<TResult>(IReadOperat private protected async Task<TResult> ExecuteOperationAsync<TResult>(IReadOperation<TResult> operation, IReadBinding binding) { - return await operation.ExecuteAsync(binding, CancellationToken.None); + return await operation.ExecuteAsync(OperationContext.NoTimeout, binding); } private protected async Task<TResult> ExecuteOperationAsync<TResult>(IWriteOperation<TResult> operation, bool useImplicitSession = false) @@ -246,7 +242,7 @@ private protected async Task<TResult> ExecuteOperationAsync<TResult>(IWriteOpera using (var binding = CreateReadWriteBinding(useImplicitSession)) using (var bindingHandle = new ReadWriteBindingHandle(binding)) { - return await operation.ExecuteAsync(bindingHandle, CancellationToken.None); + return await operation.ExecuteAsync(OperationContext.NoTimeout, bindingHandle); } } @@ -257,18 +253,23 @@ private protected async Task<TResult> ExecuteOperationAsync<TResult>(IWriteOpera { if (async) { - return await operation.ExecuteAsync(bindingHandle, CancellationToken.None); + return await operation.ExecuteAsync(OperationContext.NoTimeout, bindingHandle); } else { - return operation.Execute(bindingHandle, CancellationToken.None); + return operation.Execute(OperationContext.NoTimeout, bindingHandle); } } } + private protected TResult ExecuteOperation<TResult>(IWriteOperation<TResult> operation, IWriteBinding binding, bool async) + => async ? + operation.ExecuteAsync(OperationContext.NoTimeout, binding).GetAwaiter().GetResult() : + operation.Execute(OperationContext.NoTimeout, binding); + private protected async Task<TResult> ExecuteOperationAsync<TResult>(IWriteOperation<TResult> operation, IWriteBinding binding) { - return await operation.ExecuteAsync(binding, CancellationToken.None); + return await operation.ExecuteAsync(OperationContext.NoTimeout, binding); } private protected void CreateIndexes(params CreateIndexRequest[] requests) @@ -512,8 +513,8 @@ private protected void VerifySessionIdWasNotSentIfUnacknowledgedWrite<TResult>( bool useImplicitSession) { VerifySessionIdSending( - (binding, cancellationToken) => operation.ExecuteAsync(binding, cancellationToken), - (binding, cancellationToken) => operation.Execute(binding, cancellationToken), + (binding, cancellationToken) => operation.ExecuteAsync(cancellationToken, binding), + (binding, cancellationToken) => operation.Execute(cancellationToken, binding), AssertSessionIdWasNotSentIfUnacknowledgedWrite, commandName, async, @@ -523,8 +524,8 @@ private protected void VerifySessionIdWasNotSentIfUnacknowledgedWrite<TResult>( private protected void VerifySessionIdWasSentWhenSupported<TResult>(IReadOperation<TResult> operation, string commandName, bool async) { VerifySessionIdSending( - (binding, cancellationToken) => operation.ExecuteAsync(binding, cancellationToken), - (binding, cancellationToken) => operation.Execute(binding, cancellationToken), + (binding, cancellationToken) => operation.ExecuteAsync(cancellationToken, binding), + (binding, cancellationToken) => operation.Execute(cancellationToken, binding), AssertSessionIdWasSentWhenSupported, commandName, async); @@ -533,16 +534,16 @@ private protected void VerifySessionIdWasSentWhenSupported<TResult>(IReadOperati private protected void VerifySessionIdWasSentWhenSupported<TResult>(IWriteOperation<TResult> operation, string commandName, bool async) { VerifySessionIdSending( - (binding, cancellationToken) => operation.ExecuteAsync(binding, cancellationToken), - (binding, cancellationToken) => operation.Execute(binding, cancellationToken), + (binding, cancellationToken) => operation.ExecuteAsync(cancellationToken, binding), + (binding, cancellationToken) => operation.Execute(cancellationToken, binding), AssertSessionIdWasSentWhenSupported, commandName, async); } private protected void VerifySessionIdSending<TResult>( - Func<WritableServerBinding, CancellationToken, Task<TResult>> executeAsync, - Func<WritableServerBinding, CancellationToken, TResult> execute, + Func<WritableServerBinding, OperationContext, Task<TResult>> executeAsync, + Func<WritableServerBinding, OperationContext, TResult> execute, Action<EventCapturer, ICoreSessionHandle, Exception> assertResults, string commandName, bool async, @@ -554,16 +555,14 @@ private protected void VerifySessionIdSending<TResult>( using (var session = CreateSession(cluster, useImplicitSession)) using (var binding = new WritableServerBinding(cluster, session.Fork())) { - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; Exception exception; if (async) { - exception = Record.Exception(() => executeAsync(binding, cancellationToken).GetAwaiter().GetResult()); + exception = Record.Exception(() => executeAsync(binding, OperationContext.NoTimeout).GetAwaiter().GetResult()); } else { - exception = Record.Exception(() => execute(binding, cancellationToken)); + exception = Record.Exception(() => execute(binding, OperationContext.NoTimeout)); } assertResults(eventCapturer, session, exception); diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/ReadCommandOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/ReadCommandOperationTests.cs index e66a2fa67d1..48b4bed12bf 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/ReadCommandOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/ReadCommandOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2016-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,9 +13,8 @@ * limitations under the License. */ -using System; +using System.Collections.Generic; using System.Net; -using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; @@ -33,7 +32,7 @@ namespace MongoDB.Driver.Core.Operations { - public class ReadCommandOperationTests + public class ReadCommandOperationTests : OperationTestBase { // public methods [Fact] @@ -91,16 +90,13 @@ public void Execute_should_call_channel_Command_with_unwrapped_command_when_wrap var mockChannel = CreateMockChannel(); var channelSource = CreateMockChannelSource(serverDescription, mockChannel.Object).Object; var binding = CreateMockReadBinding(readPreference, channelSource).Object; - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; - BsonDocument result; + ExecuteOperation(subject, binding, async); if (async) { - result = subject.ExecuteAsync(binding, cancellationToken).GetAwaiter().GetResult(); - mockChannel.Verify( c => c.CommandAsync( + It.IsAny<OperationContext>(), binding.Session, readPreference, subject.DatabaseNamespace, @@ -111,16 +107,14 @@ public void Execute_should_call_channel_Command_with_unwrapped_command_when_wrap null, // postWriteAction CommandResponseHandling.Return, subject.ResultSerializer, - subject.MessageEncoderSettings, - cancellationToken), + subject.MessageEncoderSettings), Times.Once); } else { - result = subject.Execute(binding, cancellationToken); - mockChannel.Verify( c => c.Command( + It.IsAny<OperationContext>(), binding.Session, readPreference, subject.DatabaseNamespace, @@ -131,8 +125,7 @@ public void Execute_should_call_channel_Command_with_unwrapped_command_when_wrap null, // postWriteAction CommandResponseHandling.Return, subject.ResultSerializer, - subject.MessageEncoderSettings, - cancellationToken), + subject.MessageEncoderSettings), Times.Once); } } @@ -150,17 +143,14 @@ public void Execute_should_call_channel_Command_with_wrapped_command_when_additi var mockChannel = CreateMockChannel(); var channelSource = CreateMockChannelSource(serverDescription, mockChannel.Object).Object; var binding = CreateMockReadBinding(readPreference, channelSource).Object; - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; var additionalOptions = BsonDocument.Parse("{ $comment : \"comment\", additional : 1 }"); - BsonDocument result; + ExecuteOperation(subject, binding, async); if (async) { - result = subject.ExecuteAsync(binding, cancellationToken).GetAwaiter().GetResult(); - mockChannel.Verify( c => c.CommandAsync( + It.IsAny<OperationContext>(), binding.Session, readPreference, subject.DatabaseNamespace, @@ -171,16 +161,14 @@ public void Execute_should_call_channel_Command_with_wrapped_command_when_additi null, // postWriteAction CommandResponseHandling.Return, subject.ResultSerializer, - subject.MessageEncoderSettings, - cancellationToken), + subject.MessageEncoderSettings), Times.Once); } else { - result = subject.Execute(binding, cancellationToken); - mockChannel.Verify( c => c.Command( + It.IsAny<OperationContext>(), binding.Session, readPreference, subject.DatabaseNamespace, @@ -191,8 +179,7 @@ public void Execute_should_call_channel_Command_with_wrapped_command_when_additi null, // postWriteAction CommandResponseHandling.Return, subject.ResultSerializer, - subject.MessageEncoderSettings, - cancellationToken), + subject.MessageEncoderSettings), Times.Once); } } @@ -209,17 +196,14 @@ public void Execute_should_call_channel_Command_with_wrapped_command_when_commen var mockChannel = CreateMockChannel(); var channelSource = CreateMockChannelSource(serverDescription, mockChannel.Object).Object; var binding = CreateMockReadBinding(readPreference, channelSource).Object; - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; var additionalOptions = BsonDocument.Parse("{ $comment : \"comment\" }"); - BsonDocument result; + ExecuteOperation(subject, binding, async); if (async) { - result = subject.ExecuteAsync(binding, cancellationToken).GetAwaiter().GetResult(); - mockChannel.Verify( c => c.CommandAsync( + It.IsAny<OperationContext>(), binding.Session, readPreference, subject.DatabaseNamespace, @@ -230,16 +214,14 @@ public void Execute_should_call_channel_Command_with_wrapped_command_when_commen null, // postWriteAction CommandResponseHandling.Return, subject.ResultSerializer, - subject.MessageEncoderSettings, - cancellationToken), + subject.MessageEncoderSettings), Times.Once); } else { - result = subject.Execute(binding, cancellationToken); - mockChannel.Verify( c => c.Command( + It.IsAny<OperationContext>(), binding.Session, readPreference, subject.DatabaseNamespace, @@ -250,8 +232,7 @@ public void Execute_should_call_channel_Command_with_wrapped_command_when_commen null, // postWriteAction CommandResponseHandling.Return, subject.ResultSerializer, - subject.MessageEncoderSettings, - cancellationToken), + subject.MessageEncoderSettings), Times.Once); } } @@ -269,17 +250,14 @@ public void Execute_should_call_channel_Command_with_wrapped_command_when_readPr var mockChannel = CreateMockChannel(); var channelSource = CreateMockChannelSource(serverDescription, mockChannel.Object).Object; var binding = CreateMockReadBinding(readPreference, channelSource).Object; - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; var additionalOptions = BsonDocument.Parse("{ $comment : \"comment\", additional : 1 }"); - BsonDocument result; + ExecuteOperation(subject, binding, async); if (async) { - result = subject.ExecuteAsync(binding, cancellationToken).GetAwaiter().GetResult(); - mockChannel.Verify( c => c.CommandAsync( + It.IsAny<OperationContext>(), binding.Session, readPreference, subject.DatabaseNamespace, @@ -290,16 +268,14 @@ public void Execute_should_call_channel_Command_with_wrapped_command_when_readPr null, // postWriteAction CommandResponseHandling.Return, subject.ResultSerializer, - subject.MessageEncoderSettings, - cancellationToken), + subject.MessageEncoderSettings), Times.Once); } else { - result = subject.Execute(binding, cancellationToken); - mockChannel.Verify( c => c.Command( + It.IsAny<OperationContext>(), binding.Session, readPreference, subject.DatabaseNamespace, @@ -310,8 +286,7 @@ public void Execute_should_call_channel_Command_with_wrapped_command_when_readPr null, // postWriteAction CommandResponseHandling.Return, subject.ResultSerializer, - subject.MessageEncoderSettings, - cancellationToken), + subject.MessageEncoderSettings), Times.Once); } } @@ -326,18 +301,15 @@ public void Execute_should_call_GetChannel_only_once([Values(false, true)] bool var mockChannel = CreateMockChannel(); var mockChannelSource = CreateMockChannelSource(serverDescription, mockChannel.Object); var binding = CreateMockReadBinding(readPreference, mockChannelSource.Object).Object; - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; + ExecuteOperation(subject, binding, async); if (async) { - subject.ExecuteAsync(binding, cancellationToken).GetAwaiter().GetResult(); - mockChannelSource.Verify(c => c.GetChannelAsync(cancellationToken), Times.Once); + mockChannelSource.Verify(c => c.GetChannelAsync(It.IsAny<OperationContext>()), Times.Once); } else { - subject.Execute(binding, cancellationToken); - mockChannelSource.Verify(c => c.GetChannel(cancellationToken), Times.Once); + mockChannelSource.Verify(c => c.GetChannel(It.IsAny<OperationContext>()), Times.Once); } } @@ -348,8 +320,8 @@ private Mock<IReadBinding> CreateMockReadBinding(ReadPreference readPreference, var mockSession = new Mock<ICoreSessionHandle>(); mockBinding.SetupGet(b => b.ReadPreference).Returns(readPreference); mockBinding.SetupGet(b => b.Session).Returns(mockSession.Object); - mockBinding.Setup(b => b.GetReadChannelSource(It.IsAny<CancellationToken>())).Returns(channelSource); - mockBinding.Setup(b => b.GetReadChannelSourceAsync(It.IsAny<CancellationToken>())).Returns(Task.FromResult(channelSource)); + mockBinding.Setup(b => b.GetReadChannelSource(It.IsAny<OperationContext>(), It.IsAny<IReadOnlyCollection<ServerDescription>>())).Returns(channelSource); + mockBinding.Setup(b => b.GetReadChannelSourceAsync(It.IsAny<OperationContext>(), It.IsAny<IReadOnlyCollection<ServerDescription>>())).Returns(Task.FromResult(channelSource)); return mockBinding; } @@ -363,8 +335,8 @@ private Mock<IChannelSourceHandle> CreateMockChannelSource(ServerDescription ser { var mockChannelSource = new Mock<IChannelSourceHandle>(); mockChannelSource.SetupGet(s => s.ServerDescription).Returns(serverDescription); - mockChannelSource.Setup(s => s.GetChannel(It.IsAny<CancellationToken>())).Returns(channel); - mockChannelSource.Setup(s => s.GetChannelAsync(It.IsAny<CancellationToken>())).Returns(Task.FromResult(channel)); + mockChannelSource.Setup(s => s.GetChannel(It.IsAny<OperationContext>())).Returns(channel); + mockChannelSource.Setup(s => s.GetChannelAsync(It.IsAny<OperationContext>())).Returns(Task.FromResult(channel)); return mockChannelSource; } diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/RenameCollectionOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/RenameCollectionOperationTests.cs index a8ac7ffc284..c95030ff9ea 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/RenameCollectionOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/RenameCollectionOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,6 +14,7 @@ */ using System; +using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; @@ -62,17 +63,19 @@ public void constructor_should_initialize_subject() [Fact] public void constructor_should_throw_when_collectionNamespace_is_null() { - Action action = () => new RenameCollectionOperation(null, _newCollectionNamespace, _messageEncoderSettings); + var exception = Record.Exception(() => new RenameCollectionOperation(null, _newCollectionNamespace, _messageEncoderSettings)); - action.ShouldThrow<ArgumentNullException>().And.ParamName.Should().Be("collectionNamespace"); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("collectionNamespace"); } [Fact] public void constructor_should_throw_when_newCollectionNamespace_is_null() { - Action action = () => new RenameCollectionOperation(_collectionNamespace, null, _messageEncoderSettings); + var exception = Record.Exception(() => new RenameCollectionOperation(_collectionNamespace, null, _messageEncoderSettings)); - action.ShouldThrow<ArgumentNullException>().And.ParamName.Should().Be("newCollectionNamespace"); + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("newCollectionNamespace"); } [Fact] @@ -87,7 +90,7 @@ public void CreateCommand_should_return_expected_result() var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); result.Should().Be(expectedResult); } @@ -111,7 +114,7 @@ public void CreateCommand_should_return_expected_result_when_dropTarget_is_provi var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(OperationContext.NoTimeout, session, connectionDescription); result.Should().Be(expectedResult); } @@ -119,24 +122,39 @@ public void CreateCommand_should_return_expected_result_when_dropTarget_is_provi [Theory] [ParameterAttributeData] public void CreateCommand_should_return_expected_result_when_WriteConcern_is_set( - [Values(null, 1, 2)] - int? w) + [Values(null, 1, 2)] int? w, + [Values(null, 100)] int? wtimeout, + [Values(true, false)] bool hasOperationTimeout + ) { var writeConcern = w.HasValue ? new WriteConcern(w.Value) : null; + if (wtimeout.HasValue) + { + writeConcern ??= WriteConcern.Acknowledged; + writeConcern = writeConcern.With(wTimeout: TimeSpan.FromMilliseconds(wtimeout.Value)); + } + var subject = new RenameCollectionOperation(_collectionNamespace, _newCollectionNamespace, _messageEncoderSettings) { WriteConcern = writeConcern }; + var operationContext = hasOperationTimeout ? new OperationContext(TimeSpan.FromSeconds(42), CancellationToken.None) : OperationContext.NoTimeout; var session = OperationTestHelper.CreateSession(); var connectionDescription = OperationTestHelper.CreateConnectionDescription(); - var result = subject.CreateCommand(session, connectionDescription); + var result = subject.CreateCommand(operationContext, session, connectionDescription); + + var expectedWriteConcern = writeConcern?.ToBsonDocument(); + if (hasOperationTimeout) + { + expectedWriteConcern?.Remove("wtimeout"); + } var expectedResult = new BsonDocument { { "renameCollection", _collectionNamespace.FullName }, { "to", _newCollectionNamespace.FullName }, - { "writeConcern", () => writeConcern.ToBsonDocument(), writeConcern != null } + { "writeConcern", () => expectedWriteConcern, w.HasValue || (wtimeout.HasValue && !hasOperationTimeout) } }; result.Should().Be(expectedResult); } diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/RetryableReadOperationExecutorTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/RetryableReadOperationExecutorTests.cs new file mode 100644 index 00000000000..6b537fee55a --- /dev/null +++ b/tests/MongoDB.Driver.Tests/Core/Operations/RetryableReadOperationExecutorTests.cs @@ -0,0 +1,80 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using System.IO; +using System.Threading; +using MongoDB.Bson.TestHelpers; +using MongoDB.Driver.Core.Bindings; +using MongoDB.Driver.Core.Operations; +using MongoDB.Driver.Core.TestHelpers; +using Moq; +using Xunit; + +namespace MongoDB.Driver.Core.Tests.Core.Operations +{ + public class RetryableReadOperationExecutorTests + { + [Theory] + // No retries if retryRequested == false + [InlineData(false, false, false, true, false, 1)] + [InlineData(false, false, false, true, true, 1)] + // No retries if in transaction + [InlineData(false, true, true, true, false, 1)] + [InlineData(false, true, true, true, true, 1)] + // No retries in non-retriable exception + [InlineData(false, true, false, false, false, 1)] + [InlineData(false, true, false, false, true, 1)] + // No timeout configured - should retry once + [InlineData(true, true, false, true, false, 1)] + [InlineData(false, true, false, true, false, 2)] + // Timeout configured - should retry as many times as possible + [InlineData(true, true, false, true, true, 1)] + [InlineData(true, true, false, true, true, 2)] + [InlineData(true, true, false, true, true, 10)] + public void ShouldRetryOperation_should_return_expected_result( + bool expected, + bool isRetryRequested, + bool isInTransaction, + bool isRetriableException, + bool hasTimeout, + int attempt) + { + var retryableReadContext = CreateSubject(isRetryRequested, isInTransaction); + var exception =CoreExceptionHelper.CreateException(isRetriableException ? nameof(MongoNodeIsRecoveringException) : nameof(IOException)); + var operationContext = new OperationContext(hasTimeout ? TimeSpan.FromSeconds(42) : null, CancellationToken.None); + + var result = RetryableReadOperationExecutorReflector.ShouldRetryOperation(operationContext, retryableReadContext, exception, attempt); + + Assert.Equal(expected, result); + } + + private static RetryableReadContext CreateSubject(bool retryRequested, bool isInTransaction) + { + var sessionMock = new Mock<ICoreSessionHandle>(); + sessionMock.SetupGet(m => m.IsInTransaction).Returns(isInTransaction); + var bindingMock = new Mock<IReadBinding>(); + bindingMock.SetupGet(m => m.Session).Returns(sessionMock.Object); + return new RetryableReadContext(bindingMock.Object, retryRequested); + } + + private static class RetryableReadOperationExecutorReflector + { + public static bool ShouldRetryOperation(OperationContext operationContext, RetryableReadContext context, Exception exception, int attempt) + => (bool)Reflector.InvokeStatic(typeof(RetryableReadOperationExecutor), nameof(ShouldRetryOperation), operationContext, context, exception, attempt); + } + } +} + diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/RetryableWriteOperationExecutorTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/RetryableWriteOperationExecutorTests.cs index 9dd51a1ec42..d01706d6128 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/RetryableWriteOperationExecutorTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/RetryableWriteOperationExecutorTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2020-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,17 +13,14 @@ * limitations under the License. */ -using System.Linq; +using System; +using System.Collections.Generic; using System.Net; -using System.Reflection; -using System.Threading; using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.TestHelpers; using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Clusters; -using MongoDB.Driver.Core.Connections; -using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.Operations; using MongoDB.Driver.Core.Servers; using Moq; @@ -36,9 +33,9 @@ public class RetryableWriteOperationExecutorTests [Fact] public void AreRetryableWritesSupportedTest() { - var connectionDescription = CreateConnectionDescription(withLogicalSessionTimeout: false, serviceId: true); + var serverDescription = CreateServerDescription(withLogicalSessionTimeout: false, isLoadBalanced: true); - var result = RetryableWriteOperationExecutorReflector.AreRetryableWritesSupported(connectionDescription); + var result = RetryableWriteOperationExecutorReflector.AreRetryableWritesSupported(serverDescription); result.Should().BeTrue(); } @@ -69,24 +66,20 @@ public void DoesContextAllowRetries_should_return_expected_result( { var context = CreateContext(retryRequested, areRetryableWritesSupported, hasSessionId, isInTransaction); - var result = RetryableWriteOperationExecutorReflector.DoesContextAllowRetries(context); + var result = RetryableWriteOperationExecutorReflector.DoesContextAllowRetries(context, context.ChannelSource.ServerDescription); result.Should().Be(expectedResult); } [Theory] - [InlineData(false, false, true)] - [InlineData(false, true, true)] - [InlineData(true, false, false)] - [InlineData(true, true, true)] - public void IsOperationAcknowledged_should_return_expected_result( - bool withWriteConcern, - bool isAcknowledged, - bool expectedResult) + [InlineData(null, true)] + [InlineData(false, false)] + [InlineData(true, true)] + public void IsOperationAcknowledged_should_return_expected_result(bool? isAcknowledged, bool expectedResult) { - var operation = CreateOperation(withWriteConcern, isAcknowledged); + var writeConcern = isAcknowledged.HasValue ? (isAcknowledged.Value ? WriteConcern.Acknowledged : WriteConcern.Unacknowledged) : null; - var result = RetryableWriteOperationExecutorReflector.IsOperationAcknowledged(operation); + var result = RetryableWriteOperationExecutorReflector.IsOperationAcknowledged(writeConcern); result.Should().Be(expectedResult); } @@ -98,59 +91,35 @@ private IWriteBinding CreateBinding(bool areRetryableWritesSupported, bool hasSe var session = CreateSession(hasSessionId, isInTransaction); var channelSource = CreateChannelSource(areRetryableWritesSupported); mockBinding.SetupGet(m => m.Session).Returns(session); - mockBinding.Setup(m => m.GetWriteChannelSource(CancellationToken.None)).Returns(channelSource); + mockBinding.Setup(m => m.GetWriteChannelSource(It.IsAny<OperationContext>(), It.IsAny<IReadOnlyCollection<ServerDescription>>())).Returns(channelSource); return mockBinding.Object; } - private IChannelHandle CreateChannel(bool areRetryableWritesSupported) - { - var mockChannel = new Mock<IChannelHandle>(); - var connectionDescription = CreateConnectionDescription(withLogicalSessionTimeout: areRetryableWritesSupported); - mockChannel.SetupGet(m => m.ConnectionDescription).Returns(connectionDescription); - return mockChannel.Object; - } - private IChannelSourceHandle CreateChannelSource(bool areRetryableWritesSupported) { var mockChannelSource = new Mock<IChannelSourceHandle>(); - var channel = CreateChannel(areRetryableWritesSupported); - mockChannelSource.Setup(m => m.GetChannel(CancellationToken.None)).Returns(channel); + var channel = Mock.Of<IChannelHandle>(); + mockChannelSource.Setup(m => m.GetChannel(It.IsAny<OperationContext>())).Returns(channel); + mockChannelSource.Setup(m => m.ServerDescription).Returns(CreateServerDescription(withLogicalSessionTimeout: areRetryableWritesSupported)); return mockChannelSource.Object; } - private ConnectionDescription CreateConnectionDescription(bool withLogicalSessionTimeout, bool? serviceId = null) + private ServerDescription CreateServerDescription(bool withLogicalSessionTimeout, bool isLoadBalanced = false) { var clusterId = new ClusterId(1); var endPoint = new DnsEndPoint("localhost", 27017); var serverId = new ServerId(clusterId, endPoint); - var connectionId = new ConnectionId(serverId, 1); - var helloResultDocument = BsonDocument.Parse($"{{ ok : 1, maxWireVersion : {WireVersion.Server42} }}"); - if (withLogicalSessionTimeout) - { - helloResultDocument["logicalSessionTimeoutMinutes"] = 1; - helloResultDocument["msg"] = "isdbgrid"; // mongos - } - if (serviceId.HasValue) - { - helloResultDocument["serviceId"] = ObjectId.Empty; // load balancing mode - } - var helloResult = new HelloResult(helloResultDocument); - var connectionDescription = new ConnectionDescription(connectionId, helloResult); - return connectionDescription; + TimeSpan? logicalSessionTimeout = withLogicalSessionTimeout ? TimeSpan.FromMinutes(1) : null; + var serverType = isLoadBalanced ? ServerType.LoadBalanced : ServerType.ShardRouter; + + return new ServerDescription(serverId, endPoint, logicalSessionTimeout: logicalSessionTimeout, type: serverType); } private RetryableWriteContext CreateContext(bool retryRequested, bool areRetryableWritesSupported, bool hasSessionId, bool isInTransaction) { var binding = CreateBinding(areRetryableWritesSupported, hasSessionId, isInTransaction); - return RetryableWriteContext.Create(binding, retryRequested, CancellationToken.None); - } - - private IRetryableWriteOperation<BsonDocument> CreateOperation(bool withWriteConcern, bool isAcknowledged) - { - var mockOperation = new Mock<IRetryableWriteOperation<BsonDocument>>(); - var writeConcern = withWriteConcern ? (isAcknowledged ? WriteConcern.Acknowledged : WriteConcern.Unacknowledged) : null; - mockOperation.SetupGet(m => m.WriteConcern).Returns(writeConcern); - return mockOperation.Object; + var context = RetryableWriteContext.Create(OperationContext.NoTimeout, binding, retryRequested); + return context; } private ICoreSessionHandle CreateSession(bool hasSessionId, bool isInTransaction) @@ -165,28 +134,13 @@ private ICoreSessionHandle CreateSession(bool hasSessionId, bool isInTransaction // nested types internal static class RetryableWriteOperationExecutorReflector { - public static bool AreRetryableWritesSupported(ConnectionDescription connectionDescription) - { - return (bool)Reflector.InvokeStatic(typeof(RetryableWriteOperationExecutor), nameof(AreRetryableWritesSupported), connectionDescription); - } + public static bool AreRetryableWritesSupported(ServerDescription serverDescription) + => (bool)Reflector.InvokeStatic(typeof(RetryableWriteOperationExecutor), nameof(AreRetryableWritesSupported), serverDescription); - public static bool DoesContextAllowRetries(RetryableWriteContext context) => - (bool)Reflector.InvokeStatic(typeof(RetryableWriteOperationExecutor), nameof(DoesContextAllowRetries), context); + public static bool DoesContextAllowRetries(RetryableWriteContext context, ServerDescription server) + => (bool)Reflector.InvokeStatic(typeof(RetryableWriteOperationExecutor), nameof(DoesContextAllowRetries), context, server); - public static bool IsOperationAcknowledged(IRetryableWriteOperation<BsonDocument> operation) - { - var methodInfoDefinition = typeof(RetryableWriteOperationExecutor).GetMethods(BindingFlags.NonPublic | BindingFlags.Static) - .Where(m => m.Name == nameof(IsOperationAcknowledged)) - .Single(); - var methodInfo = methodInfoDefinition.MakeGenericMethod(typeof(BsonDocument)); - try - { - return (bool)methodInfo.Invoke(null, new object[] { operation }); - } - catch (TargetInvocationException exception) - { - throw exception.InnerException; - } - } + public static bool IsOperationAcknowledged(WriteConcern writeConcern) + => (bool)Reflector.InvokeStatic(typeof(RetryableWriteOperationExecutor), nameof(IsOperationAcknowledged), writeConcern); } } diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/UpdateOpcodeOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/UpdateOpcodeOperationTests.cs deleted file mode 100644 index 150eaa56d90..00000000000 --- a/tests/MongoDB.Driver.Tests/Core/Operations/UpdateOpcodeOperationTests.cs +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright 2020-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System; -using FluentAssertions; -using MongoDB.Bson; -using MongoDB.TestHelpers.XunitExtensions; -using MongoDB.Driver.Core.Misc; -using Xunit; - -namespace MongoDB.Driver.Core.Operations -{ - public class UpdateOpcodeOperationTests : OperationTestBase - { - [Theory] - [ParameterAttributeData] - public void Execute_with_hint_should_throw_when_hint_is_not_supported( - [Values(0, 1)] int w, - [Values(false, true)] bool async) - { - var writeConcern = new WriteConcern(w); - var request = new UpdateRequest( - UpdateType.Update, - new BsonDocument("x", 1), - new BsonDocument("$set", new BsonDocument("x", 2))) - { - Hint = new BsonDocument("_id", 1) - }; - var subject = new UpdateOpcodeOperation(_collectionNamespace, request, _messageEncoderSettings) - { - WriteConcern = writeConcern - }; - - var exception = Record.Exception(() => ExecuteOperation(subject, async)); - - if (!writeConcern.IsAcknowledged) - { - exception.Should().BeOfType<NotSupportedException>(); - } -#pragma warning disable CS0618 // Type or member is obsolete - else if (Feature.HintForUpdateAndReplaceOperations.IsSupported(CoreTestConfiguration.MaxWireVersion)) -#pragma warning restore CS0618 // Type or member is obsolete - { - exception.Should().BeNull(); - } - else - { - exception.Should().BeOfType<MongoCommandException>(); - } - } - } -} diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/WriteCommandOperationTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/WriteCommandOperationTests.cs index 6da9926706f..71c26aebdba 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/WriteCommandOperationTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/WriteCommandOperationTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2016-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,9 +13,8 @@ * limitations under the License. */ -using System; +using System.Collections.Generic; using System.Net; -using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; @@ -33,7 +32,7 @@ namespace MongoDB.Driver.Core.Operations { - public class WriteCommandOperationTests + public class WriteCommandOperationTests : OperationTestBase { // public methods [Fact] @@ -65,16 +64,13 @@ public void Execute_should_call_channel_Command_with_unwrapped_command_when_wrap var mockChannel = CreateMockChannel(); var channelSource = CreateMockChannelSource(serverDescription, mockChannel.Object).Object; var binding = CreateMockWriteBinding(channelSource).Object; - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; - BsonDocument result; + ExecuteOperation(subject, binding, async); if (async) { - result = subject.ExecuteAsync(binding, cancellationToken).GetAwaiter().GetResult(); - mockChannel.Verify( c => c.CommandAsync( + It.IsAny<OperationContext>(), binding.Session, ReadPreference.Primary, subject.DatabaseNamespace, @@ -85,16 +81,14 @@ public void Execute_should_call_channel_Command_with_unwrapped_command_when_wrap null, // postWriteAction CommandResponseHandling.Return, subject.ResultSerializer, - subject.MessageEncoderSettings, - cancellationToken), + subject.MessageEncoderSettings), Times.Once); } else { - result = subject.Execute(binding, cancellationToken); - mockChannel.Verify( c => c.Command( + It.IsAny<OperationContext>(), binding.Session, ReadPreference.Primary, subject.DatabaseNamespace, @@ -105,8 +99,7 @@ public void Execute_should_call_channel_Command_with_unwrapped_command_when_wrap null, // postWriteAction CommandResponseHandling.Return, subject.ResultSerializer, - subject.MessageEncoderSettings, - cancellationToken), + subject.MessageEncoderSettings), Times.Once); } } @@ -122,17 +115,14 @@ public void Execute_should_call_channel_Command_with_wrapped_command_when_additi var mockChannel = CreateMockChannel(); var channelSource = CreateMockChannelSource(serverDescription, mockChannel.Object).Object; var binding = CreateMockWriteBinding(channelSource).Object; - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; var command = BsonDocument.Parse("{ command : 1 }"); - BsonDocument result; + ExecuteOperation(subject, binding, async); if (async) { - result = subject.ExecuteAsync(binding, cancellationToken).GetAwaiter().GetResult(); - mockChannel.Verify( c => c.CommandAsync( + It.IsAny<OperationContext>(), It.IsAny<ICoreSessionHandle>(), It.IsAny<ReadPreference>(), subject.DatabaseNamespace, @@ -143,16 +133,14 @@ public void Execute_should_call_channel_Command_with_wrapped_command_when_additi null, // postWriteAction CommandResponseHandling.Return, subject.ResultSerializer, - subject.MessageEncoderSettings, - cancellationToken), + subject.MessageEncoderSettings), Times.Once); } else { - result = subject.Execute(binding, cancellationToken); - mockChannel.Verify( c => c.Command( + It.IsAny<OperationContext>(), It.IsAny<ICoreSessionHandle>(), It.IsAny<ReadPreference>(), subject.DatabaseNamespace, @@ -163,8 +151,7 @@ public void Execute_should_call_channel_Command_with_wrapped_command_when_additi null, // postWriteAction CommandResponseHandling.Return, subject.ResultSerializer, - subject.MessageEncoderSettings, - cancellationToken), + subject.MessageEncoderSettings), Times.Once); } } @@ -180,17 +167,14 @@ public void Execute_should_call_channel_Command_with_wrapped_command_when_commen var mockChannel = CreateMockChannel(); var channelSource = CreateMockChannelSource(serverDescription, mockChannel.Object).Object; var binding = CreateMockWriteBinding(channelSource).Object; - using var cancellationTokenSource = new CancellationTokenSource(); - var cancellationToken = cancellationTokenSource.Token; var additionalOptions = BsonDocument.Parse("{ $comment : \"comment\" }"); - BsonDocument result; + ExecuteOperation(subject, binding, async); if (async) { - result = subject.ExecuteAsync(binding, cancellationToken).GetAwaiter().GetResult(); - mockChannel.Verify( c => c.CommandAsync( + It.IsAny<OperationContext>(), binding.Session, ReadPreference.Primary, subject.DatabaseNamespace, @@ -201,16 +185,14 @@ public void Execute_should_call_channel_Command_with_wrapped_command_when_commen null, // postWriteAction CommandResponseHandling.Return, subject.ResultSerializer, - subject.MessageEncoderSettings, - cancellationToken), + subject.MessageEncoderSettings), Times.Once); } else { - result = subject.Execute(binding, cancellationToken); - mockChannel.Verify( c => c.Command( + It.IsAny<OperationContext>(), binding.Session, ReadPreference.Primary, subject.DatabaseNamespace, @@ -221,8 +203,7 @@ public void Execute_should_call_channel_Command_with_wrapped_command_when_commen null, // postWriteAction CommandResponseHandling.Return, subject.ResultSerializer, - subject.MessageEncoderSettings, - cancellationToken), + subject.MessageEncoderSettings), Times.Once); } } @@ -233,8 +214,10 @@ private Mock<IWriteBinding> CreateMockWriteBinding(IChannelSourceHandle channelS var mockBinding = new Mock<IWriteBinding>(); var mockSession = new Mock<ICoreSessionHandle>(); mockBinding.SetupGet(b => b.Session).Returns(mockSession.Object); - mockBinding.Setup(b => b.GetWriteChannelSource(It.IsAny<CancellationToken>())).Returns(channelSource); - mockBinding.Setup(b => b.GetWriteChannelSourceAsync(It.IsAny<CancellationToken>())).Returns(Task.FromResult(channelSource)); + mockBinding.Setup(b => b.GetWriteChannelSource(It.IsAny<OperationContext>())).Returns(channelSource); + mockBinding.Setup(b => b.GetWriteChannelSourceAsync(It.IsAny<OperationContext>())).Returns(Task.FromResult(channelSource)); + mockBinding.Setup(b => b.GetWriteChannelSource(It.IsAny<OperationContext>(), It.IsAny<IReadOnlyCollection<ServerDescription>>())).Returns(channelSource); + mockBinding.Setup(b => b.GetWriteChannelSourceAsync(It.IsAny<OperationContext>(), It.IsAny<IReadOnlyCollection<ServerDescription>>())).Returns(Task.FromResult(channelSource)); return mockBinding; } @@ -248,8 +231,8 @@ private Mock<IChannelSourceHandle> CreateMockChannelSource(ServerDescription ser { var mockChannelSource = new Mock<IChannelSourceHandle>(); mockChannelSource.SetupGet(s => s.ServerDescription).Returns(serverDescription); - mockChannelSource.Setup(s => s.GetChannel(It.IsAny<CancellationToken>())).Returns(channel); - mockChannelSource.Setup(s => s.GetChannelAsync(It.IsAny<CancellationToken>())).Returns(Task.FromResult(channel)); + mockChannelSource.Setup(s => s.GetChannel(It.IsAny<OperationContext>())).Returns(channel); + mockChannelSource.Setup(s => s.GetChannelAsync(It.IsAny<OperationContext>())).Returns(Task.FromResult(channel)); return mockChannelSource; } diff --git a/tests/MongoDB.Driver.Tests/Core/Operations/WriteConcernHelperTests.cs b/tests/MongoDB.Driver.Tests/Core/Operations/WriteConcernHelperTests.cs index a90f64cf45f..8f85439ccb3 100644 --- a/tests/MongoDB.Driver.Tests/Core/Operations/WriteConcernHelperTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Operations/WriteConcernHelperTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2018-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,14 +14,10 @@ */ using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading.Tasks; +using System.Threading; using FluentAssertions; using MongoDB.Bson; using MongoDB.Driver.Core.Bindings; -using MongoDB.Driver.Core.Misc; using Moq; using Xunit; @@ -30,55 +26,38 @@ namespace MongoDB.Driver.Core.Operations public class WriteConcernHelperTests { [Theory] - [InlineData(false, "{ }", null)] - [InlineData(false, "{ w : 1 }", "{ w : 1 }")] - [InlineData(true, "{ }", null)] - [InlineData(true, "{ w : 1 }", null)] - public void GetWriteConcernForCommand_should_return_expected_result( - bool isInTransaction, - string writeConcernJson, - string expectedResult) - { - var session = CreateSession(isInTransaction: isInTransaction); - var writeConcern = writeConcernJson == null ? null : WriteConcern.FromBsonDocument(BsonDocument.Parse(writeConcernJson)); + [InlineData(false, false, null, null)] + [InlineData(false, false, "{ }", null)] + [InlineData(false, false, "{ w : 1 }", "{ w : 1 }")] + [InlineData(false, false, "{ wtimeout : 100 }", "{ wtimeout : 100 }")] + [InlineData(false, false, "{ w : 1, wtimeout : 100 }", "{ w : 1, wtimeout : 100 }")] + [InlineData(false, true, null, null)] + [InlineData(false, true, "{ }", null)] + [InlineData(false, true, "{ w : 1 }", "{ w : 1 }")] + [InlineData(false, true, "{ wtimeout : 100 }", null)] + [InlineData(false, true, "{ w : 1, wtimeout : 100 }", "{ w : 1 }")] - var result = WriteConcernHelper.GetEffectiveWriteConcern(session, writeConcern); - - result.Should().Be(expectedResult); - } - - [Theory] - [InlineData(false, "{ }", null)] - [InlineData(false, "{ w : 1 }", "{ w : 1 }")] - [InlineData(true, "{ }", null)] - [InlineData(true, "{ w : 1 }", null)] - public void GetWriteConcernForCommandThatWrites_should_return_expected_result( - bool isInTransaction, - string writeConcernJson, - string expectedResult) - { - var session = CreateSession(isInTransaction: isInTransaction); - var writeConcern = writeConcernJson == null ? null : WriteConcern.FromBsonDocument(BsonDocument.Parse(writeConcernJson)); - - var result = WriteConcernHelper.GetEffectiveWriteConcern(session, writeConcern); - - result.Should().Be(expectedResult); - } - - [Theory] - [InlineData(false, "{ }", null)] - [InlineData(false, "{ w : 1 }", "{ w : 1 }")] - [InlineData(true, "{ }", null)] - [InlineData(true, "{ w : 1 }", null)] - public void GetWriteConcernForWriteCommand_should_return_expected_result( + [InlineData(true, false, null, null)] + [InlineData(true, false, "{ }", null)] + [InlineData(true, false, "{ w : 1 }", null)] + [InlineData(true, false, "{ wtimeout : 100 }", null)] + [InlineData(true, false, "{ w : 1, wtimeout : 100 }", null)] + [InlineData(true, true, null, null)] + [InlineData(true, true, "{ }", null)] + [InlineData(true, true, "{ w : 1 }", null)] + [InlineData(true, true, "{ wtimeout : 100 }", null)] + [InlineData(true, true, "{ w : 1, wtimeout : 100 }", null)] + public void GetEffectiveWriteConcern_should_return_expected_result( bool isInTransaction, + bool hasOperationTimeout, string writeConcernJson, string expectedResult) { var session = CreateSession(isInTransaction: isInTransaction); + var operationContext = hasOperationTimeout ? new OperationContext(TimeSpan.FromMilliseconds(42), CancellationToken.None) : OperationContext.NoTimeout; var writeConcern = writeConcernJson == null ? null : WriteConcern.FromBsonDocument(BsonDocument.Parse(writeConcernJson)); - var result = WriteConcernHelper.GetEffectiveWriteConcern(session, writeConcern); + var result = WriteConcernHelper.GetEffectiveWriteConcern(operationContext, session, writeConcern); result.Should().Be(expectedResult); } diff --git a/tests/MongoDB.Driver.Tests/Core/Servers/LoadBalancedServerTests.cs b/tests/MongoDB.Driver.Tests/Core/Servers/LoadBalancedServerTests.cs index 6a663956ebe..26c7a3b7eac 100644 --- a/tests/MongoDB.Driver.Tests/Core/Servers/LoadBalancedServerTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Servers/LoadBalancedServerTests.cs @@ -57,12 +57,10 @@ public LoadBalancedTests(ITestOutputHelper output) : base(output) _clusterId = new ClusterId(); _endPoint = new DnsEndPoint("localhost", 27017); - var mockConnectionExceptionHandler = new Mock<IConnectionExceptionHandler>(); - _clusterClock = new Mock<IClusterClock>().Object; _mockConnectionPool = new Mock<IConnectionPool>(); - _mockConnectionPool.Setup(p => p.AcquireConnection(It.IsAny<CancellationToken>())).Returns(new Mock<IConnectionHandle>().Object); - _mockConnectionPool.Setup(p => p.AcquireConnectionAsync(It.IsAny<CancellationToken>())).Returns(Task.FromResult(new Mock<IConnectionHandle>().Object)); + _mockConnectionPool.Setup(p => p.AcquireConnection(It.IsAny<OperationContext>())).Returns(new Mock<IConnectionHandle>().Object); + _mockConnectionPool.Setup(p => p.AcquireConnectionAsync(It.IsAny<OperationContext>())).Returns(Task.FromResult(new Mock<IConnectionHandle>().Object)); _mockConnectionPoolFactory = new Mock<IConnectionPoolFactory>(); _mockConnectionPoolFactory .Setup(f => f.CreateConnectionPool(It.IsAny<ServerId>(), _endPoint, It.IsAny<IConnectionExceptionHandler>())) @@ -77,28 +75,6 @@ public LoadBalancedTests(ITestOutputHelper output) : base(output) _connectionId = new ConnectionId(_subject.ServerId); } - [Theory] - [ParameterAttributeData] - public void ChannelFork_should_not_affect_operations_count([Values(false, true)] bool async) - { - IClusterableServer server = SetupServer(false, false); - - var channel = async ? - server.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult() : - server.GetChannel(CancellationToken.None); - - server.OutstandingOperationsCount.Should().Be(1); - - var forkedChannel = channel.Fork(); - server.OutstandingOperationsCount.Should().Be(1); - - forkedChannel.Dispose(); - server.OutstandingOperationsCount.Should().Be(1); - - channel.Dispose(); - server.OutstandingOperationsCount.Should().Be(0); - } - [Fact] public void Constructor_should_not_throw_when_serverApi_is_null() { @@ -169,7 +145,7 @@ public void Dispose_should_dispose_the_server() [Theory] [ParameterAttributeData] - public void GetChannel_should_clear_connection_pool_when_opening_connection_throws_MongoAuthenticationException( + public async Task GetChannel_should_clear_connection_pool_when_opening_connection_throws_MongoAuthenticationException( [Values(false, true)] bool async) { var connectionId = new ConnectionId(new ServerId(_clusterId, _endPoint)); @@ -181,11 +157,11 @@ public void GetChannel_should_clear_connection_pool_when_opening_connection_thro var mockConnectionPool = new Mock<IConnectionPool>(); var authenticationException = new MongoAuthenticationException(connectionId, "Invalid login.") { ServiceId = ObjectId.GenerateNewId() }; mockConnectionPool - .Setup(p => p.AcquireConnection(It.IsAny<CancellationToken>())) + .Setup(p => p.AcquireConnection(It.IsAny<OperationContext>())) .Callback(() => server.HandleExceptionOnOpen(authenticationException)) .Throws(authenticationException); mockConnectionPool - .Setup(p => p.AcquireConnectionAsync(It.IsAny<CancellationToken>())) + .Setup(p => p.AcquireConnectionAsync(It.IsAny<OperationContext>())) .Callback(() => server.HandleExceptionOnOpen(authenticationException)) .Throws(authenticationException); mockConnectionPool.Setup(p => p.Clear(It.IsAny<ObjectId>())); @@ -205,17 +181,9 @@ public void GetChannel_should_clear_connection_pool_when_opening_connection_thro _eventLogger); server.Initialize(); - var exception = Record.Exception(() => - { - if (async) - { - server.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - server.GetChannel(CancellationToken.None); - } - }); + var exception = async ? + await Record.ExceptionAsync(() => server.GetChannelAsync(OperationContext.NoTimeout)) : + Record.Exception(() => server.GetChannel(OperationContext.NoTimeout)); exception.Should().BeOfType<MongoAuthenticationException>(); mockConnectionPool.Verify(p => p.Clear(It.IsAny<ObjectId>()), Times.Once()); @@ -223,42 +191,28 @@ public void GetChannel_should_clear_connection_pool_when_opening_connection_thro [Theory] [ParameterAttributeData] - public void GetChannel_should_get_a_connection([Values(false, true)] bool async) + public async Task GetChannel_should_get_a_connection([Values(false, true)] bool async) { _subject.Initialize(); - IChannelHandle channel; - if (async) - { - channel = _subject.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - channel = _subject.GetChannel(CancellationToken.None); - } + var channel = async ? + await _subject.GetChannelAsync(OperationContext.NoTimeout) : + _subject.GetChannel(OperationContext.NoTimeout); channel.Should().NotBeNull(); } [Theory] [ParameterAttributeData] - public void GetChannel_should_not_increase_operations_count_on_exception( + public async Task GetChannel_should_not_increase_operations_count_on_exception( [Values(false, true)] bool async, [Values(false, true)] bool connectionOpenException) { IClusterableServer server = SetupServer(connectionOpenException, !connectionOpenException); - var exception = Record.Exception(() => - { - if (async) - { - server.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - server.GetChannel(CancellationToken.None); - } - }); + var exception = async ? + await Record.ExceptionAsync(() => _subject.GetChannelAsync(OperationContext.NoTimeout)) : + Record.Exception(() => _subject.GetChannel(OperationContext.NoTimeout)); exception.Should().NotBeNull(); server.OutstandingOperationsCount.Should().Be(0); @@ -266,74 +220,58 @@ public void GetChannel_should_not_increase_operations_count_on_exception( [Theory] [ParameterAttributeData] - public void GetChannel_should_set_operations_count_correctly( + public async Task GetChannel_should_set_operations_count_correctly( [Values(false, true)] bool async, [Values(0, 1, 2, 10)] int operationsCount) { IClusterableServer server = SetupServer(false, false); - var channels = new List<IChannel>(); + var channels = new List<IChannelHandle>(); for (int i = 0; i < operationsCount; i++) { - if (async) - { - channels.Add(server.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult()); - } - else - { - channels.Add(server.GetChannel(CancellationToken.None)); - } + var connection = async ? + await server.GetChannelAsync(OperationContext.NoTimeout) : + server.GetChannel(OperationContext.NoTimeout); + channels.Add(connection); } server.OutstandingOperationsCount.Should().Be(operationsCount); foreach (var channel in channels) { - channel.Dispose(); + server.DecrementOutstandingOperationsCount(); server.OutstandingOperationsCount.Should().Be(--operationsCount); } } [Theory] [ParameterAttributeData] - public void GetChannel_should_throw_when_not_initialized( + public async Task GetChannel_should_throw_when_not_initialized( [Values(false, true)] bool async) { - Exception exception; - if (async) - { - exception = Record.Exception(() => _subject.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult()); - } - else - { - exception = Record.Exception(() => _subject.GetChannel(CancellationToken.None)); - } + var exception = async ? + await Record.ExceptionAsync(() => _subject.GetChannelAsync(OperationContext.NoTimeout)) : + Record.Exception(() => _subject.GetChannel(OperationContext.NoTimeout)); exception.Should().BeOfType<InvalidOperationException>(); } [Theory] [ParameterAttributeData] - public void GetChannel_should_throw_when_disposed([Values(false, true)] bool async) + public async Task GetChannel_should_throw_when_disposed([Values(false, true)] bool async) { _subject.Dispose(); - Exception exception; - if (async) - { - exception = Record.Exception(() => _subject.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult()); - } - else - { - exception = Record.Exception(() => _subject.GetChannel(CancellationToken.None)); - } + var exception = async ? + await Record.ExceptionAsync(() => _subject.GetChannelAsync(OperationContext.NoTimeout)) : + Record.Exception(() => _subject.GetChannel(OperationContext.NoTimeout)); exception.Should().BeOfType<ObjectDisposedException>(); } [Theory] [ParameterAttributeData] - public void GetChannel_should_not_update_topology_and_clear_connection_pool_on_MongoConnectionException( + public async Task GetChannel_should_not_update_topology_and_clear_connection_pool_on_MongoConnectionException( [Values("TimedOutSocketException", "NetworkUnreachableSocketException")] string errorType, [Values(false, true)] bool async) { @@ -345,8 +283,8 @@ public void GetChannel_should_not_update_topology_and_clear_connection_pool_on_M var openConnectionException = new MongoConnectionException(connectionId, "Oops", new IOException("Cry", innerMostException)); var mockConnection = new Mock<IConnectionHandle>(); mockConnection.Setup(c => c.ConnectionId).Returns(connectionId); - mockConnection.Setup(c => c.Open(It.IsAny<CancellationToken>())).Throws(openConnectionException); - mockConnection.Setup(c => c.OpenAsync(It.IsAny<CancellationToken>())).ThrowsAsync(openConnectionException); + mockConnection.Setup(c => c.Open(It.IsAny<OperationContext>())).Throws(openConnectionException); + mockConnection.Setup(c => c.OpenAsync(It.IsAny<OperationContext>())).ThrowsAsync(openConnectionException); var connectionFactory = new Mock<IConnectionFactory>(); connectionFactory.Setup(cf => cf.CreateConnection(serverId, _endPoint)).Returns(mockConnection.Object); @@ -363,18 +301,10 @@ public void GetChannel_should_not_update_topology_and_clear_connection_pool_on_M var subject = new LoadBalancedServer(_clusterId, _clusterClock, _settings, _endPoint, mockConnectionPoolFactory.Object, _serverApi, _eventLogger); subject.Initialize(); - IChannelHandle channel = null; - Exception exception; - if (async) - { - exception = Record.Exception(() => channel = subject.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult()); - } - else - { - exception = Record.Exception(() => channel = subject.GetChannel(CancellationToken.None)); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.GetChannelAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.GetChannel(OperationContext.NoTimeout)); - channel.Should().BeNull(); exception.Should().Be(openConnectionException); subject.Description.Type.Should().Be(ServerType.LoadBalanced); subject.Description.ReasonChanged.Should().Be("Initialized"); @@ -434,30 +364,30 @@ private Server SetupServer(bool exceptionOnConnectionOpen, bool exceptionOnConne if (exceptionOnConnectionAcquire) { mockConnectionPool - .Setup(p => p.AcquireConnection(It.IsAny<CancellationToken>())) + .Setup(p => p.AcquireConnection(It.IsAny<OperationContext>())) .Throws(new TimeoutException("Timeout")); mockConnectionPool - .Setup(p => p.AcquireConnectionAsync(It.IsAny<CancellationToken>())) + .Setup(p => p.AcquireConnectionAsync(It.IsAny<OperationContext>())) .Throws(new TimeoutException("Timeout")); mockConnectionPool.Setup(p => p.Clear(It.IsAny<bool>())); } else if (exceptionOnConnectionOpen) { mockConnectionPool - .Setup(p => p.AcquireConnection(It.IsAny<CancellationToken>())) + .Setup(p => p.AcquireConnection(It.IsAny<OperationContext>())) .Throws(new MongoAuthenticationException(connectionId, "Invalid login.")); mockConnectionPool - .Setup(p => p.AcquireConnectionAsync(It.IsAny<CancellationToken>())) + .Setup(p => p.AcquireConnectionAsync(It.IsAny<OperationContext>())) .Throws(new MongoAuthenticationException(connectionId, "Invalid login.")); mockConnectionPool.Setup(p => p.Clear(It.IsAny<bool>())); } else { mockConnectionPool - .Setup(p => p.AcquireConnection(It.IsAny<CancellationToken>())) + .Setup(p => p.AcquireConnection(It.IsAny<OperationContext>())) .Returns(mockConnectionHandle.Object); mockConnectionPool - .Setup(p => p.AcquireConnectionAsync(It.IsAny<CancellationToken>())) + .Setup(p => p.AcquireConnectionAsync(It.IsAny<OperationContext>())) .Returns(Task.FromResult(mockConnectionHandle.Object)); mockConnectionPool.Setup(p => p.Clear(It.IsAny<bool>())); } diff --git a/tests/MongoDB.Driver.Tests/Core/Servers/RoundTripTimeMonitorTests.cs b/tests/MongoDB.Driver.Tests/Core/Servers/RoundTripTimeMonitorTests.cs index 701fe502cdd..c1a9080b360 100644 --- a/tests/MongoDB.Driver.Tests/Core/Servers/RoundTripTimeMonitorTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Servers/RoundTripTimeMonitorTests.cs @@ -43,7 +43,7 @@ public class RoundTripTimeMonitorTests [Fact] public void Constructor_should_throw_connection_endpoint_is_null() { - var exception = Record.Exception(() => new RoundTripTimeMonitor(Mock.Of<IConnectionFactory>(), __serverId, endpoint: null, TimeSpan.Zero, serverApi: null, logger: null)); + var exception = Record.Exception(() => new RoundTripTimeMonitor(Mock.Of<IConnectionFactory>(), __serverId, endpoint: null, TimeSpan.Zero, TimeSpan.Zero, serverApi: null, logger: null)); var e = exception.Should().BeOfType<ArgumentNullException>().Subject; e.ParamName.Should().Be("endpoint"); } @@ -51,7 +51,7 @@ public void Constructor_should_throw_connection_endpoint_is_null() [Fact] public void Constructor_should_throw_connection_factory_is_null() { - var exception = Record.Exception(() => new RoundTripTimeMonitor(connectionFactory: null, __serverId, __endPoint, TimeSpan.Zero, serverApi: null, logger: null)); + var exception = Record.Exception(() => new RoundTripTimeMonitor(connectionFactory: null, __serverId, __endPoint, TimeSpan.Zero, TimeSpan.Zero, serverApi: null, logger: null)); var e = exception.Should().BeOfType<ArgumentNullException>().Subject; e.ParamName.Should().Be("connectionFactory"); } @@ -59,7 +59,7 @@ public void Constructor_should_throw_connection_factory_is_null() [Fact] public void Constructor_should_throw_connection_serverId_is_null() { - var exception = Record.Exception(() => new RoundTripTimeMonitor(Mock.Of<IConnectionFactory>(), serverId: null, __endPoint, TimeSpan.Zero, serverApi: null, logger: null)); + var exception = Record.Exception(() => new RoundTripTimeMonitor(Mock.Of<IConnectionFactory>(), serverId: null, __endPoint, TimeSpan.Zero, TimeSpan.Zero, serverApi: null, logger: null)); var e = exception.Should().BeOfType<ArgumentNullException>().Subject; e.ParamName.Should().Be("serverId"); } @@ -107,7 +107,7 @@ public void Round_trip_time_monitor_should_work_as_expected() }); mockConnection - .SetupSequence(c => c.ReceiveMessage(It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), It.IsAny<MessageEncoderSettings>(), It.IsAny<CancellationToken>())) + .SetupSequence(c => c.ReceiveMessage(It.IsAny<OperationContext>(), It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), It.IsAny<MessageEncoderSettings>())) .Returns( () => { @@ -176,6 +176,7 @@ public void Start_should_use_serverApi() __serverId, __endPoint, TimeSpan.FromMilliseconds(10), + TimeSpan.FromMilliseconds(10), serverApi, logger: null)) { @@ -217,6 +218,7 @@ public void RoundTripTimeMonitor_without_serverApi_but_with_loadBalancedConnecti __serverId, __endPoint, TimeSpan.FromMilliseconds(10), + TimeSpan.FromMilliseconds(10), null, logger: null)) { @@ -262,6 +264,7 @@ private RoundTripTimeMonitor CreateSubject( __serverId, __endPoint, frequency, + TimeSpan.FromMilliseconds(10), serverApi: null, logger: null); } @@ -281,7 +284,7 @@ private ConnectionDescription CreateConnectionDescription() private RoundTripTimeMonitor CreateSubject(TimeSpan frequency, Mock<IConnection> mockConnection) { mockConnection - .Setup(c => c.ReceiveMessage(It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), It.IsAny<MessageEncoderSettings>(), It.IsAny<CancellationToken>())) + .Setup(c => c.ReceiveMessage(It.IsAny<OperationContext>(), It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), It.IsAny<MessageEncoderSettings>())) .Returns(() => CreateResponseMessage()); var mockConnectionFactory = new Mock<IConnectionFactory>(); diff --git a/tests/MongoDB.Driver.Tests/Core/Servers/ServerDescriptionTests.cs b/tests/MongoDB.Driver.Tests/Core/Servers/ServerDescriptionTests.cs index 4f629ff7d9b..16e928d8d11 100644 --- a/tests/MongoDB.Driver.Tests/Core/Servers/ServerDescriptionTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Servers/ServerDescriptionTests.cs @@ -261,34 +261,15 @@ public void Equals_should_return_true_when_all_fields_are_equal() [Theory] [InlineData(null, true)] [InlineData(new[] { 0, 0 }, false)] - [InlineData(new[] { 0, 1 }, false)] - [InlineData(new[] { 0, 2 }, false)] - [InlineData(new[] { 0, 6 }, false)] - [InlineData(new[] { 0, 7 }, true)] - [InlineData(new[] { 2, 2 }, false)] - [InlineData(new[] { 2, 6 }, false)] - [InlineData(new[] { 2, 7 }, true)] - [InlineData(new[] { 6, 6 }, false)] - [InlineData(new[] { 6, 7 }, true)] - [InlineData(new[] { 7, 7 }, true)] + [InlineData(new[] { 0, 7 }, false)] + [InlineData(new[] { 7, 7 }, false)] [InlineData(new[] { 7, 8 }, true)] - [InlineData(new[] { 10, 13 }, true)] - [InlineData(new[] { 10, 14 }, true)] - [InlineData(new[] { 13, 15 }, true)] - [InlineData(new[] { 14, 15 }, true)] - [InlineData(new[] { 15, 16 }, true)] - [InlineData(new[] { 16, 17 }, true)] - [InlineData(new[] { 18, 19 }, true)] - [InlineData(new[] { 19, 20 }, true)] - [InlineData(new[] { 20, 21 }, true)] - [InlineData(new[] { 21, 22 }, true)] - [InlineData(new[] { 22, 23 }, true)] - [InlineData(new[] { 23, 24 }, true)] - [InlineData(new[] { 24, 25 }, true)] - [InlineData(new[] { 25, 26 }, true)] - [InlineData(new[] { 26, 27 }, true)] - [InlineData(new[] { 27, 28 }, true)] - [InlineData(new[] { 28, 29 }, false)] + [InlineData(new[] { 8, 8 }, true)] + [InlineData(new[] { 8, 21 }, true)] + [InlineData(new[] { 28, 28 }, true)] + [InlineData(new[] { 28, 29 }, true)] + [InlineData(new[] { 29, 29 }, false)] + [InlineData(new[] { 29, 30 }, false)] public void IsCompatibleWithDriver_should_return_expected_result(int[] minMaxWireVersions, bool expectedResult) { var clusterId = new ClusterId(1); diff --git a/tests/MongoDB.Driver.Tests/Core/Servers/ServerFactoryTests.cs b/tests/MongoDB.Driver.Tests/Core/Servers/ServerFactoryTests.cs index 08c5f09ce37..ed279786905 100644 --- a/tests/MongoDB.Driver.Tests/Core/Servers/ServerFactoryTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Servers/ServerFactoryTests.cs @@ -132,7 +132,7 @@ public void CreateServer_should_return_correct_Server(ClusterType clusterType, T { var subject = new ServerFactory(_directConnection, _settings, _connectionPoolFactory, _serverMonitorFactory, _eventSubscriber, _serverApi, null); var clusterClock = new Mock<IClusterClock>().Object; - + var result = subject.CreateServer(clusterType, _clusterId, clusterClock, _endPoint); diff --git a/tests/MongoDB.Driver.Tests/Core/Servers/ServerMonitorTests.cs b/tests/MongoDB.Driver.Tests/Core/Servers/ServerMonitorTests.cs index 83233d68d0d..5970f9339bf 100644 --- a/tests/MongoDB.Driver.Tests/Core/Servers/ServerMonitorTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Servers/ServerMonitorTests.cs @@ -43,7 +43,7 @@ public class ServerMonitorTests : LoggableTestClass #region static private static readonly EndPoint __endPoint = new DnsEndPoint("localhost", 27017); private static readonly ServerId __serverId = new ServerId(new ClusterId(), __endPoint); - private static readonly ServerMonitorSettings __serverMonitorSettings = new ServerMonitorSettings(TimeSpan.FromSeconds(30), Timeout.InfiniteTimeSpan); + private static readonly ServerMonitorSettings __serverMonitorSettings = new ServerMonitorSettings(TimeSpan.FromSeconds(30), Timeout.InfiniteTimeSpan, TimeSpan.FromSeconds(30)); #endregion public ServerMonitorTests(ITestOutputHelper output) : base(output) @@ -67,7 +67,7 @@ public void CancelCurrentCheck_should_dispose_connection() [Fact] public void CancelCurrentCheck_should_do_nothing_if_disposed() { - using var subject = CreateSubject(out _, out _, out _); + var subject = CreateSubject(out _, out _, out _); subject.Dispose(); subject.CancelCurrentCheck(); @@ -141,7 +141,7 @@ public void Description_should_return_default_when_uninitialized() [Fact] public void Description_should_return_default_when_disposed() { - using var subject = CreateSubject(out _, out _, out _); + var subject = CreateSubject(out _, out _, out _); subject.Dispose(); @@ -151,7 +151,6 @@ public void Description_should_return_default_when_disposed() description.State.Should().Be(ServerState.Disconnected); } -#if WINDOWS [Fact] public void DescriptionChanged_should_be_raised_during_initial_handshake() { @@ -179,7 +178,6 @@ public void DescriptionChanged_should_be_raised_during_initial_handshake() capturedEvents.Next().Should().BeOfType<ServerHeartbeatStartedEvent>(); capturedEvents.Next().Should().BeOfType<ServerHeartbeatSucceededEvent>(); } -#endif [Fact] public void Description_should_be_connected_after_successful_heartbeat() @@ -204,7 +202,7 @@ public void Dispose_should_clear_all_resources_only_once() { var capturedEvents = new EventCapturer(); - using var subject = CreateSubject(out var mockConnection, out _, out var mockRoundTripTimeMonitor, capturedEvents, captureConnectionEvents: true); + var subject = CreateSubject(out var mockConnection, out _, out var mockRoundTripTimeMonitor, capturedEvents, captureConnectionEvents: true); SetupHeartbeatConnection(mockConnection); subject.Initialize(); @@ -308,11 +306,9 @@ public void InitializeHelloProtocol_should_use_streaming_protocol_when_available using var subject = CreateSubject(out var mockConnection, out _, out _); SetupHeartbeatConnection(mockConnection, isStreamable, autoFillStreamingResponses: true); - mockConnection.WasReadTimeoutChanged.Should().Be(null); var resultProtocol = subject.InitializeHelloProtocol(mockConnection, helloOk); if (isStreamable) { - mockConnection.WasReadTimeoutChanged.Should().BeTrue(); resultProtocol._command().Should().Contain(expectedCommand); resultProtocol._command().Should().Contain("topologyVersion"); resultProtocol._command().Should().Contain("maxAwaitTimeMS"); @@ -320,7 +316,6 @@ public void InitializeHelloProtocol_should_use_streaming_protocol_when_available } else { - mockConnection.WasReadTimeoutChanged.Should().Be(null); resultProtocol._command().Should().Contain(expectedCommand); resultProtocol._command().Should().NotContain("topologyVersion"); resultProtocol._command().Should().NotContain("maxAwaitTimeMS"); @@ -332,7 +327,7 @@ public void InitializeHelloProtocol_should_use_streaming_protocol_when_available public void RoundTripTimeMonitor_should_be_started_only_once_if_using_streaming_protocol() { var capturedEvents = new EventCapturer().Capture<ServerHeartbeatSucceededEvent>(); - var serverMonitorSettings = new ServerMonitorSettings(TimeSpan.FromSeconds(5), TimeSpan.FromMilliseconds(10)); + var serverMonitorSettings = new ServerMonitorSettings(TimeSpan.FromSeconds(5), TimeSpan.FromMilliseconds(10), TimeSpan.FromSeconds(5)); using var subject = CreateSubject(out var mockConnection, out _, out var mockRoundTripTimeMonitor, capturedEvents, serverMonitorSettings: serverMonitorSettings); SetupHeartbeatConnection(mockConnection, isStreamable: true, autoFillStreamingResponses: false); @@ -345,16 +340,16 @@ public void RoundTripTimeMonitor_should_be_started_only_once_if_using_streaming_ SpinWait.SpinUntil(() => capturedEvents.Count >= 4, TimeSpan.FromSeconds(5)).Should().BeTrue(); mockRoundTripTimeMonitor.Verify(m => m.Start(), Times.Once); mockRoundTripTimeMonitor.Verify(m => m.IsStarted, Times.AtLeast(4)); - subject.Dispose(); } [Fact] public void RoundTripTimeMonitor_should_not_be_started_if_using_polling_protocol() { var serverMonitorSettings = new ServerMonitorSettings( - TimeSpan.FromSeconds(5), - TimeSpan.FromMilliseconds(10), - serverMonitoringMode: ServerMonitoringMode.Poll); + ConnectTimeout: TimeSpan.FromSeconds(5), + HeartbeatInterval: TimeSpan.FromMilliseconds(10), + HeartbeatTimeout: TimeSpan.FromSeconds(5), + ServerMonitoringMode: ServerMonitoringMode.Poll); var capturedEvents = new EventCapturer().Capture<ServerHeartbeatSucceededEvent>(); using var subject = CreateSubject(out var mockConnection, out _, out var mockRoundTripTimeMonitor, capturedEvents, serverMonitorSettings: serverMonitorSettings); @@ -368,7 +363,6 @@ public void RoundTripTimeMonitor_should_not_be_started_if_using_polling_protocol SpinWait.SpinUntil(() => capturedEvents.Count >= 4, TimeSpan.FromSeconds(5)).Should().BeTrue(); mockRoundTripTimeMonitor.Verify(m => m.Start(), Times.Never); - subject.Dispose(); } [Fact] @@ -420,16 +414,16 @@ public void ServerHeartBeatEvents_should_not_be_awaited_if_using_polling_protoco .Capture<ServerHeartbeatStartedEvent>() .Capture<ServerHeartbeatSucceededEvent>(); - var serverMonitorSettings = new ServerMonitorSettings(TimeSpan.FromSeconds(5), TimeSpan.FromMilliseconds(10), serverMonitoringMode: ServerMonitoringMode.Poll); - using var subject = CreateSubject(out var mockConnection, out _, out _, capturedEvents, serverMonitorSettings: serverMonitorSettings); - - SetupHeartbeatConnection(mockConnection, isStreamable: true, autoFillStreamingResponses: false); - mockConnection.EnqueueCommandResponseMessage(CreateHeartbeatCommandResponseMessage()); - mockConnection.EnqueueCommandResponseMessage(CreateHeartbeatCommandResponseMessage()); + var serverMonitorSettings = new ServerMonitorSettings(TimeSpan.FromSeconds(5), TimeSpan.FromMilliseconds(10), TimeSpan.FromSeconds(5), ServerMonitoringMode.Poll); + using (var subject = CreateSubject(out var mockConnection, out _, out _, capturedEvents, serverMonitorSettings: serverMonitorSettings)) + { + SetupHeartbeatConnection(mockConnection, isStreamable: true, autoFillStreamingResponses: false); + mockConnection.EnqueueCommandResponseMessage(CreateHeartbeatCommandResponseMessage()); + mockConnection.EnqueueCommandResponseMessage(CreateHeartbeatCommandResponseMessage()); - subject.Initialize(); - SpinWait.SpinUntil(() => capturedEvents.Count >= 6, TimeSpan.FromSeconds(5)).Should().BeTrue(); - subject.Dispose(); + subject.Initialize(); + SpinWait.SpinUntil(() => capturedEvents.Count >= 6, TimeSpan.FromSeconds(5)).Should().BeTrue(); + } capturedEvents.Next().Should().BeOfType<ServerHeartbeatStartedEvent>().Subject.Awaited.Should().Be(false); capturedEvents.Next().Should().BeOfType<ServerHeartbeatSucceededEvent>().Subject.Awaited.Should().Be(false); @@ -516,16 +510,16 @@ public void Should_use_polling_protocol_if_running_in_FaaS_platform(string envir .Capture<ServerHeartbeatStartedEvent>() .Capture<ServerHeartbeatSucceededEvent>(); - var serverMonitorSettings = new ServerMonitorSettings(TimeSpan.FromSeconds(5), TimeSpan.FromMilliseconds(10)); - using var subject = CreateSubject(out var mockConnection, out _, out _, capturedEvents, serverMonitorSettings: serverMonitorSettings, environmentVariableProviderMock: environmentVariableProviderMock); - - SetupHeartbeatConnection(mockConnection, isStreamable: true, autoFillStreamingResponses: false); - mockConnection.EnqueueCommandResponseMessage(CreateHeartbeatCommandResponseMessage()); - mockConnection.EnqueueCommandResponseMessage(CreateHeartbeatCommandResponseMessage()); + var serverMonitorSettings = new ServerMonitorSettings(TimeSpan.FromSeconds(5), TimeSpan.FromMilliseconds(10), TimeSpan.FromSeconds(5)); + using (var subject = CreateSubject(out var mockConnection, out _, out _, capturedEvents, serverMonitorSettings: serverMonitorSettings, environmentVariableProviderMock: environmentVariableProviderMock)) + { + SetupHeartbeatConnection(mockConnection, isStreamable: true, autoFillStreamingResponses: false); + mockConnection.EnqueueCommandResponseMessage(CreateHeartbeatCommandResponseMessage()); + mockConnection.EnqueueCommandResponseMessage(CreateHeartbeatCommandResponseMessage()); - subject.Initialize(); - SpinWait.SpinUntil(() => capturedEvents.Count >= 6, TimeSpan.FromSeconds(5)).Should().BeTrue(); - subject.Dispose(); + subject.Initialize(); + SpinWait.SpinUntil(() => capturedEvents.Count >= 6, TimeSpan.FromSeconds(5)).Should().BeTrue(); + } capturedEvents.Next().Should().BeOfType<ServerHeartbeatStartedEvent>().Subject.Awaited.Should().Be(false); capturedEvents.Next().Should().BeOfType<ServerHeartbeatSucceededEvent>().Subject.Awaited.Should().Be(false); diff --git a/tests/MongoDB.Driver.Tests/Core/Servers/ServerTests.cs b/tests/MongoDB.Driver.Tests/Core/Servers/ServerTests.cs index 6ecc3ac6234..39a1810e780 100644 --- a/tests/MongoDB.Driver.Tests/Core/Servers/ServerTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/Servers/ServerTests.cs @@ -71,8 +71,8 @@ public ServerTests(ITestOutputHelper output) : base(output) _clusterClock = new Mock<IClusterClock>().Object; _directConnection = false; _mockConnectionPool = new Mock<IConnectionPool>(); - _mockConnectionPool.Setup(p => p.AcquireConnection(It.IsAny<CancellationToken>())).Returns(new Mock<IConnectionHandle>().Object); - _mockConnectionPool.Setup(p => p.AcquireConnectionAsync(It.IsAny<CancellationToken>())).Returns(Task.FromResult(new Mock<IConnectionHandle>().Object)); + _mockConnectionPool.Setup(p => p.AcquireConnection(It.IsAny<OperationContext>())).Returns(new Mock<IConnectionHandle>().Object); + _mockConnectionPool.Setup(p => p.AcquireConnectionAsync(It.IsAny<OperationContext>())).Returns(Task.FromResult(new Mock<IConnectionHandle>().Object)); _mockConnectionPoolFactory = new Mock<IConnectionPoolFactory>(); _mockConnectionPoolFactory .Setup(f => f.CreateConnectionPool(It.IsAny<ServerId>(), _endPoint, It.IsAny<IConnectionExceptionHandler>())) @@ -97,28 +97,6 @@ protected override void DisposeInternal() _subject.Dispose(); } - [Theory] - [ParameterAttributeData] - public void ChannelFork_should_not_affect_operations_count([Values(false, true)] bool async) - { - IClusterableServer server = SetupServer(false, false); - - var channel = async ? - server.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult() : - server.GetChannel(CancellationToken.None); - - server.OutstandingOperationsCount.Should().Be(1); - - var forkedChannel = channel.Fork(); - server.OutstandingOperationsCount.Should().Be(1); - - forkedChannel.Dispose(); - server.OutstandingOperationsCount.Should().Be(1); - - channel.Dispose(); - server.OutstandingOperationsCount.Should().Be(0); - } - [Fact] public void Constructor_should_not_throw_when_serverApi_is_null() { @@ -200,12 +178,10 @@ public void Dispose_should_dispose_the_server() [Theory] [ParameterAttributeData] - public void GetChannel_should_clear_connection_pool_when_opening_connection_throws_MongoAuthenticationException( + public async Task GetChannel_should_clear_connection_pool_when_opening_connection_throws_MongoAuthenticationException( [Values(false, true)] bool async) { var connectionId = new ConnectionId(new ServerId(_clusterId, _endPoint)); - var mockConnectionHandle = new Mock<IConnectionHandle>(); - var mockConnectionPool = new Mock<IConnectionPool>(); var mockConnectionPoolFactory = new Mock<IConnectionPoolFactory>(); @@ -226,28 +202,20 @@ public void GetChannel_should_clear_connection_pool_when_opening_connection_thro var exceptionToThrow = new MongoAuthenticationException(connectionId, "Invalid login."); mockConnectionPool - .Setup(p => p.AcquireConnection(It.IsAny<CancellationToken>())) + .Setup(p => p.AcquireConnection(It.IsAny<OperationContext>())) .Callback(() => server.HandleExceptionOnOpen(exceptionToThrow)) .Throws(exceptionToThrow); mockConnectionPool - .Setup(p => p.AcquireConnectionAsync(It.IsAny<CancellationToken>())) + .Setup(p => p.AcquireConnectionAsync(It.IsAny<OperationContext>())) .Callback(() => server.HandleExceptionOnOpen(exceptionToThrow)) .Throws(exceptionToThrow); mockConnectionPool.Setup(p => p.Clear(It.IsAny<bool>())); server.Initialize(); - var exception = Record.Exception(() => - { - if (async) - { - server.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - server.GetChannel(CancellationToken.None); - } - }); + var exception = async ? + await Record.ExceptionAsync(() => server.GetChannelAsync(OperationContext.NoTimeout)) : + Record.Exception(() => server.GetChannel(OperationContext.NoTimeout)); exception.Should().BeOfType<MongoAuthenticationException>(); mockConnectionPool.Verify(p => p.Clear(It.IsAny<bool>()), Times.Once()); @@ -255,120 +223,91 @@ public void GetChannel_should_clear_connection_pool_when_opening_connection_thro [Theory] [ParameterAttributeData] - public void GetChannel_should_get_a_connection( + public async Task GetChannel_should_get_a_connection( [Values(false, true)] bool async) { _subject.Initialize(); - IChannelHandle channel; - if (async) - { - channel = _subject.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - channel = _subject.GetChannel(CancellationToken.None); - } + var connection = async ? + await _subject.GetChannelAsync(OperationContext.NoTimeout) : + _subject.GetChannel(OperationContext.NoTimeout); - channel.Should().NotBeNull(); + connection.Should().NotBeNull(); } [Theory] [ParameterAttributeData] - public void GetChannel_should_not_increase_operations_count_on_exception( + public async Task GetChannel_should_not_increase_operations_count_on_exception( [Values(false, true)] bool async, [Values(false, true)] bool connectionOpenException) { IClusterableServer server = SetupServer(connectionOpenException, !connectionOpenException); - _ = Record.Exception(() => - { - if (async) - { - server.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - server.GetChannel(CancellationToken.None); - } - }); + var exception = async ? + await Record.ExceptionAsync(() => _subject.GetChannelAsync(OperationContext.NoTimeout)) : + Record.Exception(() => _subject.GetChannel(OperationContext.NoTimeout)); + exception.Should().NotBeNull(); server.OutstandingOperationsCount.Should().Be(0); } [Theory] [ParameterAttributeData] - public void GetChannel_should_set_operations_count_correctly( + public async Task GetChannel_should_set_operations_count_correctly( [Values(false, true)] bool async, [Values(0, 1, 2, 10)] int operationsCount) { IClusterableServer server = SetupServer(false, false); - var channels = new List<IChannel>(); + var channels = new List<IChannelHandle>(); for (int i = 0; i < operationsCount; i++) { - if (async) - { - channels.Add(server.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult()); - } - else - { - channels.Add(server.GetChannel(CancellationToken.None)); - } + var connection = async ? + await server.GetChannelAsync(OperationContext.NoTimeout) : + server.GetChannel(OperationContext.NoTimeout); + channels.Add(connection); } server.OutstandingOperationsCount.Should().Be(operationsCount); foreach (var channel in channels) { - channel.Dispose(); + server.DecrementOutstandingOperationsCount(); server.OutstandingOperationsCount.Should().Be(--operationsCount); } } [Theory] [ParameterAttributeData] - public void GetChannel_should_throw_when_not_initialized( + public async Task GetChannel_should_throw_when_not_initialized( [Values(false, true)] bool async) { - Action act; - if (async) - { - act = () => _subject.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => _subject.GetChannel(CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => _subject.GetChannelAsync(OperationContext.NoTimeout)) : + Record.Exception(() => _subject.GetChannel(OperationContext.NoTimeout)); - act.ShouldThrow<InvalidOperationException>(); + exception.Should().BeOfType<InvalidOperationException>(); } [Theory] [ParameterAttributeData] - public void GetChannel_should_throw_when_disposed( + public async Task GetChannel_should_throw_when_disposed( [Values(false, true)] bool async) { _subject.Dispose(); - Action act; - if (async) - { - act = () => _subject.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult(); - } - else - { - act = () => _subject.GetChannel(CancellationToken.None); - } + var exception = async ? + await Record.ExceptionAsync(() => _subject.GetChannelAsync(OperationContext.NoTimeout)) : + Record.Exception(() => _subject.GetChannel(OperationContext.NoTimeout)); - act.ShouldThrow<ObjectDisposedException>(); + exception.Should().BeOfType<ObjectDisposedException>(); } [Theory] [ParameterAttributeData] - public void GetChannel_should_update_topology_and_clear_connection_pool_on_network_error_or_timeout( + public async Task GetChannel_should_update_topology_and_clear_connection_pool_on_network_error_or_timeout( [Values("TimedOutSocketException", "NetworkUnreachableSocketException")] string errorType, [Values(false, true)] bool async) { @@ -379,8 +318,8 @@ public void GetChannel_should_update_topology_and_clear_connection_pool_on_netwo var openConnectionException = new MongoConnectionException(connectionId, "Oops", new IOException("Cry", innerMostException)); var mockConnection = new Mock<IConnectionHandle>(); mockConnection.Setup(c => c.ConnectionId).Returns(connectionId); - mockConnection.Setup(c => c.Open(It.IsAny<CancellationToken>())).Throws(openConnectionException); - mockConnection.Setup(c => c.OpenAsync(It.IsAny<CancellationToken>())).ThrowsAsync(openConnectionException); + mockConnection.Setup(c => c.Open(It.IsAny<OperationContext>())).Throws(openConnectionException); + mockConnection.Setup(c => c.OpenAsync(It.IsAny<OperationContext>())).ThrowsAsync(openConnectionException); var connectionFactory = new Mock<IConnectionFactory>(); connectionFactory.Setup(f => f.ConnectionSettings).Returns(() => new ConnectionSettings()); @@ -406,18 +345,10 @@ public void GetChannel_should_update_topology_and_clear_connection_pool_on_netwo subject.Initialize(); connectionPool.SetReady(); - IChannelHandle channel = null; - Exception exception; - if (async) - { - exception = Record.Exception(() => channel = subject.GetChannelAsync(CancellationToken.None).GetAwaiter().GetResult()); - } - else - { - exception = Record.Exception(() => channel = subject.GetChannel(CancellationToken.None)); - } + var exception = async ? + await Record.ExceptionAsync(() => subject.GetChannelAsync(OperationContext.NoTimeout)) : + Record.Exception(() => subject.GetChannel(OperationContext.NoTimeout)); - channel.Should().BeNull(); exception.Should().Be(openConnectionException); subject.Description.Type.Should().Be(ServerType.Unknown); subject.Description.ReasonChanged.Should().Contain("ChannelException during handshake"); @@ -449,8 +380,8 @@ public void HandleChannelException_should_update_topology_as_expected_on_network mockConnection.SetupGet(c => c.Description) .Returns(new ConnectionDescription(new ConnectionId(serverId, 0), helloResult)); var mockConnectionPool = new Mock<IConnectionPool>(); - mockConnectionPool.Setup(p => p.AcquireConnection(It.IsAny<CancellationToken>())).Returns(mockConnection.Object); - mockConnectionPool.Setup(p => p.AcquireConnectionAsync(It.IsAny<CancellationToken>())).ReturnsAsync(mockConnection.Object); + mockConnectionPool.Setup(p => p.AcquireConnection(It.IsAny<OperationContext>())).Returns(mockConnection.Object); + mockConnectionPool.Setup(p => p.AcquireConnectionAsync(It.IsAny<OperationContext>())).ReturnsAsync(mockConnection.Object); var mockConnectionPoolFactory = new Mock<IConnectionPoolFactory>(); mockConnectionPoolFactory .Setup(f => f.CreateConnectionPool(It.IsAny<ServerId>(), _endPoint, It.IsAny<IConnectionExceptionHandler>())) @@ -834,30 +765,30 @@ private Server SetupServer(bool exceptionOnConnectionOpen, bool exceptionOnConne if (exceptionOnConnectionAcquire) { mockConnectionPool - .Setup(p => p.AcquireConnection(It.IsAny<CancellationToken>())) + .Setup(p => p.AcquireConnection(It.IsAny<OperationContext>())) .Throws(new TimeoutException("Timeout")); mockConnectionPool - .Setup(p => p.AcquireConnectionAsync(It.IsAny<CancellationToken>())) + .Setup(p => p.AcquireConnectionAsync(It.IsAny<OperationContext>())) .Throws(new TimeoutException("Timeout")); mockConnectionPool.Setup(p => p.Clear(It.IsAny<bool>())); } else if (exceptionOnConnectionOpen) { mockConnectionPool - .Setup(p => p.AcquireConnection(It.IsAny<CancellationToken>())) + .Setup(p => p.AcquireConnection(It.IsAny<OperationContext>())) .Throws(new MongoAuthenticationException(connectionId, "Invalid login.")); mockConnectionPool - .Setup(p => p.AcquireConnectionAsync(It.IsAny<CancellationToken>())) + .Setup(p => p.AcquireConnectionAsync(It.IsAny<OperationContext>())) .Throws(new MongoAuthenticationException(connectionId, "Invalid login.")); mockConnectionPool.Setup(p => p.Clear(It.IsAny<bool>())); } else { mockConnectionPool - .Setup(p => p.AcquireConnection(It.IsAny<CancellationToken>())) + .Setup(p => p.AcquireConnection(It.IsAny<OperationContext>())) .Returns(mockConnectionHandle.Object); mockConnectionPool - .Setup(p => p.AcquireConnectionAsync(It.IsAny<CancellationToken>())) + .Setup(p => p.AcquireConnectionAsync(It.IsAny<OperationContext>())) .Returns(Task.FromResult(mockConnectionHandle.Object)); mockConnectionPool.Setup(p => p.Clear(It.IsAny<bool>())); } @@ -883,6 +814,7 @@ private Server SetupServer(bool exceptionOnConnectionOpen, bool exceptionOnConne } } + [Trait("Category", "Integration")] public class ServerChannelTests { [Theory] @@ -899,9 +831,8 @@ public void Command_should_send_the_greater_of_the_session_and_cluster_cluster_t using (var cluster = CoreTestConfiguration.CreateCluster(b => b.Subscribe(eventCapturer))) using (var session = cluster.StartSession()) { - var cancellationToken = CancellationToken.None; - var server = (Server)cluster.SelectServer(WritableServerSelector.Instance, cancellationToken); - using (var channel = server.GetChannel(cancellationToken)) + var server = cluster.SelectServer(OperationContext.NoTimeout, WritableServerSelector.Instance); + using (var channel = server.GetChannel(OperationContext.NoTimeout)) { session.AdvanceClusterTime(sessionClusterTime); server.ClusterClock.AdvanceClusterTime(clusterClusterTime); @@ -910,6 +841,7 @@ public void Command_should_send_the_greater_of_the_session_and_cluster_cluster_t try { channel.Command<BsonDocument>( + OperationContext.NoTimeout, session, ReadPreference.Primary, DatabaseNamespace.Admin, @@ -920,8 +852,7 @@ public void Command_should_send_the_greater_of_the_session_and_cluster_cluster_t null, // postWriteAction CommandResponseHandling.Return, BsonDocumentSerializer.Instance, - new MessageEncoderSettings(), - cancellationToken); + new MessageEncoderSettings()); } catch (MongoCommandException ex) { @@ -947,12 +878,12 @@ public void Command_should_update_the_session_and_cluster_cluster_times() using (var cluster = CoreTestConfiguration.CreateCluster(b => b.Subscribe(eventCapturer))) using (var session = cluster.StartSession()) { - var cancellationToken = CancellationToken.None; - var server = (Server)cluster.SelectServer(WritableServerSelector.Instance, cancellationToken); - using (var channel = server.GetChannel(cancellationToken)) + var server = cluster.SelectServer(OperationContext.NoTimeout, WritableServerSelector.Instance); + using (var channel = server.GetChannel(OperationContext.NoTimeout)) { var command = BsonDocument.Parse("{ ping : 1 }"); channel.Command<BsonDocument>( + OperationContext.NoTimeout, session, ReadPreference.Primary, DatabaseNamespace.Admin, @@ -963,21 +894,20 @@ public void Command_should_update_the_session_and_cluster_cluster_times() null, // postWriteAction CommandResponseHandling.Return, BsonDocumentSerializer.Instance, - new MessageEncoderSettings(), - cancellationToken); - } + new MessageEncoderSettings()); - var commandSucceededEvent = eventCapturer.Next().Should().BeOfType<CommandSucceededEvent>().Subject; - var actualReply = commandSucceededEvent.Reply; - var actualClusterTime = actualReply["$clusterTime"].AsBsonDocument; - session.ClusterTime.Should().Be(actualClusterTime); - server.ClusterClock.ClusterTime.Should().Be(actualClusterTime); + var commandSucceededEvent = eventCapturer.Next().Should().BeOfType<CommandSucceededEvent>().Subject; + var actualReply = commandSucceededEvent.Reply; + var actualClusterTime = actualReply["$clusterTime"].AsBsonDocument; + session.ClusterTime.Should().Be(actualClusterTime); + server.ClusterClock.ClusterTime.Should().Be(actualClusterTime); + } } } [Theory] [ParameterAttributeData] - public void Command_should_use_serverApi([Values(false, true)] bool async) + public async Task Command_should_use_serverApi([Values(false, true)] bool async) { RequireServer.Check(); @@ -991,15 +921,15 @@ public void Command_should_use_serverApi([Values(false, true)] bool async) using (var cluster = CoreTestConfiguration.CreateCluster(builder)) using (var session = cluster.StartSession()) { - var cancellationToken = CancellationToken.None; - var server = (Server)cluster.SelectServer(WritableServerSelector.Instance, cancellationToken); - using (var channel = server.GetChannel(cancellationToken)) + var server = cluster.SelectServer(OperationContext.NoTimeout, WritableServerSelector.Instance); + using (var channel = server.GetChannel(OperationContext.NoTimeout)) { var command = BsonDocument.Parse("{ ping : 1 }"); if (async) { - channel + await channel .CommandAsync( + OperationContext.NoTimeout, session, ReadPreference.Primary, DatabaseNamespace.Admin, @@ -1010,14 +940,12 @@ public void Command_should_use_serverApi([Values(false, true)] bool async) null, // postWriteAction CommandResponseHandling.Return, BsonDocumentSerializer.Instance, - new MessageEncoderSettings(), - cancellationToken) - .GetAwaiter() - .GetResult(); + new MessageEncoderSettings()); } else { channel.Command( + OperationContext.NoTimeout, session, ReadPreference.Primary, DatabaseNamespace.Admin, @@ -1028,8 +956,7 @@ public void Command_should_use_serverApi([Values(false, true)] bool async) null, // postWriteAction CommandResponseHandling.Return, BsonDocumentSerializer.Instance, - new MessageEncoderSettings(), - cancellationToken); + new MessageEncoderSettings()); } } } diff --git a/tests/MongoDB.Driver.Tests/Core/WireProtocol/CommandWriteProtocolTests.cs b/tests/MongoDB.Driver.Tests/Core/WireProtocol/CommandWriteProtocolTests.cs index 9eef407362b..2e989358f7e 100644 --- a/tests/MongoDB.Driver.Tests/Core/WireProtocol/CommandWriteProtocolTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/WireProtocol/CommandWriteProtocolTests.cs @@ -71,13 +71,14 @@ public void Execute_should_use_cached_IWireProtocol_if_available([Values(false, responseHandling, BsonDocumentSerializer.Instance, messageEncoderSettings, - null); // serverApi + null, // serverApi + TimeSpan.FromMilliseconds(42)); var mockConnection = new Mock<IConnection>(); var commandResponse = MessageHelper.BuildCommandResponse(CreateRawBsonDocument(new BsonDocument("ok", 1))); var connectionId = SetupConnection(mockConnection); - var result = subject.Execute(mockConnection.Object, CancellationToken.None); + var result = subject.Execute(OperationContext.NoTimeout, mockConnection.Object); var cachedWireProtocol = subject._cachedWireProtocol(); cachedWireProtocol.Should().NotBeNull(); @@ -91,7 +92,7 @@ public void Execute_should_use_cached_IWireProtocol_if_available([Values(false, subject._responseHandling(CommandResponseHandling.Ignore); // will trigger the exception if the CommandUsingCommandMessageWireProtocol ctor will be called result = null; - var exception = Record.Exception(() => { result = subject.Execute(mockConnection.Object, CancellationToken.None); }); + var exception = Record.Exception(() => { result = subject.Execute(OperationContext.NoTimeout, mockConnection.Object); }); if (withSameConnection) { @@ -118,7 +119,7 @@ ConnectionId SetupConnection(Mock<IConnection> connection, ConnectionId id = nul } connection - .Setup(c => c.ReceiveMessage(It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), messageEncoderSettings, CancellationToken.None)) + .Setup(c => c.ReceiveMessage(OperationContext.NoTimeout, It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), messageEncoderSettings)) .Returns(commandResponse); connection.SetupGet(c => c.ConnectionId).Returns(id); connection @@ -133,7 +134,7 @@ ConnectionId SetupConnection(Mock<IConnection> connection, ConnectionId id = nul [Theory] [ParameterAttributeData] - public void Execute_should_use_serverApi_with_getMoreCommand( + public async Task Execute_should_use_serverApi_with_getMoreCommand( [Values(false, true)] bool useServerApi, [Values(false, true)] bool async) { @@ -155,15 +156,16 @@ public void Execute_should_use_serverApi_with_getMoreCommand( CommandResponseHandling.Return, BsonDocumentSerializer.Instance, new MessageEncoderSettings(), - serverApi); + serverApi, + TimeSpan.FromMilliseconds(42)); if (async) { - subject.ExecuteAsync(connection, CancellationToken.None).GetAwaiter().GetResult(); + await subject.ExecuteAsync(OperationContext.NoTimeout, connection); } else { - subject.Execute(connection, CancellationToken.None); + subject.Execute(OperationContext.NoTimeout, connection); } SpinWait.SpinUntil(() => connection.GetSentMessages().Count >= 1, TimeSpan.FromSeconds(4)).Should().BeTrue(); @@ -177,7 +179,7 @@ public void Execute_should_use_serverApi_with_getMoreCommand( [Theory] [ParameterAttributeData] - public void Execute_should_use_serverApi_in_transaction( + public async Task Execute_should_use_serverApi_in_transaction( [Values(false, true)] bool useServerApi, [Values(false, true)] bool async) { @@ -199,15 +201,16 @@ public void Execute_should_use_serverApi_in_transaction( CommandResponseHandling.Return, BsonDocumentSerializer.Instance, new MessageEncoderSettings(), - serverApi); + serverApi, + TimeSpan.FromMilliseconds(42)); if (async) { - subject.ExecuteAsync(connection, CancellationToken.None).GetAwaiter().GetResult(); + await subject.ExecuteAsync(OperationContext.NoTimeout, connection); } else { - subject.Execute(connection, CancellationToken.None); + subject.Execute(OperationContext.NoTimeout, connection); } SpinWait.SpinUntil(() => connection.GetSentMessages().Count >= 1, TimeSpan.FromSeconds(4)).Should().BeTrue(); @@ -247,17 +250,18 @@ public void Execute_should_wait_for_response_when_CommandResponseHandling_is_Ret CommandResponseHandling.Return, BsonDocumentSerializer.Instance, messageEncoderSettings, - null); // serverApi + null, // serverApi + TimeSpan.FromMilliseconds(42)); var mockConnection = new Mock<IConnection>(); mockConnection.Setup(c => c.Settings).Returns(() => new ConnectionSettings()); var commandResponse = MessageHelper.BuildReply(CreateRawBsonDocument(new BsonDocument("ok", 1))); mockConnection - .Setup(c => c.ReceiveMessage(It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), messageEncoderSettings, CancellationToken.None)) + .Setup(c => c.ReceiveMessage(It.IsAny<OperationContext>(), It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), messageEncoderSettings)) .Returns(commandResponse); - var result = subject.Execute(mockConnection.Object, CancellationToken.None); + var result = subject.Execute(OperationContext.NoTimeout, mockConnection.Object); result.Should().Be("{ok: 1}"); } @@ -277,21 +281,22 @@ public void Execute_should_not_wait_for_response_when_CommandResponseHandling_is CommandResponseHandling.NoResponseExpected, BsonDocumentSerializer.Instance, messageEncoderSettings, - null); // serverApi + null, // serverApi + TimeSpan.FromMilliseconds(42)); var mockConnection = new Mock<IConnection>(); mockConnection.Setup(c => c.Settings).Returns(() => new ConnectionSettings()); - var result = subject.Execute(mockConnection.Object, CancellationToken.None); + var result = subject.Execute(OperationContext.NoTimeout, mockConnection.Object); result.Should().BeNull(); mockConnection.Verify( - c => c.ReceiveMessageAsync(It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), messageEncoderSettings, CancellationToken.None), + c => c.ReceiveMessageAsync(It.IsAny<OperationContext>(), It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), messageEncoderSettings), Times.Once); } [Fact] - public void ExecuteAsync_should_wait_for_response_when_CommandResponseHandling_is_Return() + public async Task ExecuteAsync_should_wait_for_response_when_CommandResponseHandling_is_Return() { var messageEncoderSettings = new MessageEncoderSettings(); var subject = new CommandWireProtocol<BsonDocument>( @@ -306,22 +311,23 @@ public void ExecuteAsync_should_wait_for_response_when_CommandResponseHandling_i CommandResponseHandling.Return, BsonDocumentSerializer.Instance, messageEncoderSettings, - null); // serverApi + null, // serverApi + TimeSpan.FromMilliseconds(42)); var mockConnection = new Mock<IConnection>(); mockConnection.Setup(c => c.Settings).Returns(() => new ConnectionSettings()); var commandResponse = MessageHelper.BuildReply(CreateRawBsonDocument(new BsonDocument("ok", 1))); mockConnection - .Setup(c => c.ReceiveMessageAsync(It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), messageEncoderSettings, CancellationToken.None)) + .Setup(c => c.ReceiveMessageAsync(It.IsAny<OperationContext>(), It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), messageEncoderSettings)) .Returns(Task.FromResult<ResponseMessage>(commandResponse)); - var result = subject.ExecuteAsync(mockConnection.Object, CancellationToken.None).GetAwaiter().GetResult(); + var result = await subject.ExecuteAsync(OperationContext.NoTimeout, mockConnection.Object); result.Should().Be("{ok: 1}"); } [Fact] - public void ExecuteAsync_should_not_wait_for_response_when_CommandResponseHandling_is_NoResponseExpected() + public async Task ExecuteAsync_should_not_wait_for_response_when_CommandResponseHandling_is_NoResponseExpected() { var messageEncoderSettings = new MessageEncoderSettings(); var subject = new CommandWireProtocol<BsonDocument>( @@ -336,15 +342,16 @@ public void ExecuteAsync_should_not_wait_for_response_when_CommandResponseHandli CommandResponseHandling.NoResponseExpected, BsonDocumentSerializer.Instance, messageEncoderSettings, - null); // serverApi + null, // serverApi + TimeSpan.FromMilliseconds(42)); var mockConnection = new Mock<IConnection>(); mockConnection.Setup(c => c.Settings).Returns(() => new ConnectionSettings()); - var result = subject.ExecuteAsync(mockConnection.Object, CancellationToken.None).GetAwaiter().GetResult(); + var result = await subject.ExecuteAsync(OperationContext.NoTimeout, mockConnection.Object); result.Should().BeNull(); - mockConnection.Verify(c => c.ReceiveMessageAsync(It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), messageEncoderSettings, CancellationToken.None), Times.Once); + mockConnection.Verify(c => c.ReceiveMessageAsync(It.IsAny<OperationContext>(), It.IsAny<int>(), It.IsAny<IMessageEncoderSelector>(), messageEncoderSettings), Times.Once); } // private methods diff --git a/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/CommandRequestMessageTests.cs b/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/CommandRequestMessageTests.cs index 955072e1b18..436638ea9f6 100644 --- a/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/CommandRequestMessageTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/CommandRequestMessageTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2018-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,7 +13,6 @@ * limitations under the License. */ -using System; using System.IO; using FluentAssertions; using MongoDB.Bson; @@ -31,12 +30,10 @@ public void constructor_should_initialize_instance() { var sections = new[] { new Type0CommandMessageSection<BsonDocument>(new BsonDocument(), BsonDocumentSerializer.Instance) }; var wrappedMessage = new CommandMessage(1, 2, sections, false); - Func<bool> shouldBeSent = () => true; - var result = new CommandRequestMessage(wrappedMessage, shouldBeSent); + var result = new CommandRequestMessage(wrappedMessage); result.WrappedMessage.Should().BeSameAs(wrappedMessage); - result.ShouldBeSent.Should().BeSameAs(shouldBeSent); } [Fact] @@ -75,17 +72,14 @@ public void GetEncoder_should_return_expected_result() } // private methods - private CommandRequestMessage CreateSubject( - CommandMessage wrappedMessage = null, - Func<bool> shouldBeSent = null) + private CommandRequestMessage CreateSubject(CommandMessage wrappedMessage = null) { if (wrappedMessage == null) { var sections = new[] { new Type0CommandMessageSection<BsonDocument>(new BsonDocument(), BsonDocumentSerializer.Instance) }; wrappedMessage = new CommandMessage(1, 2, sections, false); } - shouldBeSent = shouldBeSent ?? (() => true); - return new CommandRequestMessage(wrappedMessage, shouldBeSent); + return new CommandRequestMessage(wrappedMessage); } } } diff --git a/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/Encoders/BinaryEncoders/CommandMessageBinaryEncoderTests.cs b/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/Encoders/BinaryEncoders/CommandMessageBinaryEncoderTests.cs index 2c99282dbd1..b31ab7fa042 100644 --- a/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/Encoders/BinaryEncoders/CommandMessageBinaryEncoderTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/Encoders/BinaryEncoders/CommandMessageBinaryEncoderTests.cs @@ -25,6 +25,7 @@ using MongoDB.TestHelpers.XunitExtensions; using MongoDB.Driver.Core.Misc; using Xunit; +using System.Buffers.Binary; namespace MongoDB.Driver.Core.WireProtocol.Messages.Encoders.BinaryEncoders { @@ -141,7 +142,7 @@ public void ReadMessage_should_throw_when_flags_is_invalid( [Values(-1, 1, 4)] int flags) { var bytes = CreateMessageBytes(); - BitConverter.GetBytes(flags).CopyTo(bytes, 16); + BinaryPrimitives.WriteInt32LittleEndian(bytes.AsSpan(16, 4), flags); var subject = CreateSubject(bytes); var expectedMessage = flags == 1 ? "Command message CheckSumPresent flag not supported." : "Command message has invalid flags"; @@ -271,7 +272,7 @@ public void WriteMessage_should_write_messageLength(int[] sectionTypes) var result = stream.ToArray(); result.Length.Should().Be(expectedMessageLength); - var writtenMessageLength = BitConverter.ToInt32(result, 0); + var writtenMessageLength = BinaryPrimitives.ReadInt32LittleEndian(result.AsSpan(0, 4)); writtenMessageLength.Should().Be(expectedMessageLength); } @@ -287,7 +288,7 @@ public void WriteMessage_should_write_requestId( subject.WriteMessage(message); var result = stream.ToArray(); - var resultRequestId = BitConverter.ToInt32(result, 4); + var resultRequestId = BinaryPrimitives.ReadInt32LittleEndian(result.AsSpan(4, 4)); resultRequestId.Should().Be(requestId); } @@ -303,7 +304,7 @@ public void WriteMessage_should_write_responseTo( subject.WriteMessage(message); var result = stream.ToArray(); - var resultResponseTo = BitConverter.ToInt32(result, 8); + var resultResponseTo = BinaryPrimitives.ReadInt32LittleEndian(result.AsSpan(8, 4)); resultResponseTo.Should().Be(responseTo); } @@ -317,7 +318,7 @@ public void WriteMessage_should_write_expected_opcode() subject.WriteMessage(message); var result = stream.ToArray(); - var opcode = BitConverter.ToInt32(result, 12); + var opcode = BinaryPrimitives.ReadInt32LittleEndian(result.AsSpan(12, 4)); opcode.Should().Be((int)Opcode.OpMsg); } @@ -334,7 +335,7 @@ public void WriteMessage_should_write_flags( subject.WriteMessage(message); var result = stream.ToArray(); - var flags = (OpMsgFlags)BitConverter.ToInt32(result, 16); + var flags = (OpMsgFlags)BinaryPrimitives.ReadInt32LittleEndian(result.AsSpan(16, 4)); flags.HasFlag(OpMsgFlags.MoreToCome).Should().Be(moreToCome); flags.HasFlag(OpMsgFlags.ExhaustAllowed).Should().Be(exhaustAllowed); } diff --git a/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/Encoders/BinaryEncoders/CommandRequestMessageBinaryEncoderTests.cs b/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/Encoders/BinaryEncoders/CommandRequestMessageBinaryEncoderTests.cs index cc65f6d01c0..6eb255c4159 100644 --- a/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/Encoders/BinaryEncoders/CommandRequestMessageBinaryEncoderTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/Encoders/BinaryEncoders/CommandRequestMessageBinaryEncoderTests.cs @@ -66,7 +66,7 @@ public void WriteMessage_should_delegate_to_wrapped_encoder() var document = new BsonDocument("x", 1); var sections = new[] { new Type0CommandMessageSection<BsonDocument>(document, BsonDocumentSerializer.Instance) }; var wrappedMessage = new CommandMessage(1, 2, sections, false); - var message = new CommandRequestMessage(wrappedMessage, () => true); + var message = new CommandRequestMessage(wrappedMessage); var expectedBytes = CreateMessageBytes(wrappedMessage); subject.WriteMessage(message); diff --git a/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/Encoders/BinaryEncoders/QueryMessageBinaryEncoderTests.cs b/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/Encoders/BinaryEncoders/QueryMessageBinaryEncoderTests.cs index 5f9196472d1..fbcf6c1685c 100644 --- a/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/Encoders/BinaryEncoders/QueryMessageBinaryEncoderTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/Encoders/BinaryEncoders/QueryMessageBinaryEncoderTests.cs @@ -1,4 +1,4 @@ -/* Copyright 2013-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -211,7 +211,7 @@ public void WriteMessage_should_invoke_encoding_post_processor( var command = BsonDocument.Parse("{ command : \"x\", writeConcern : { w : 0 } }"); var query = wrapped ? new BsonDocument("$query", command) : command; #pragma warning disable 618 - var message = new QueryMessage(0, collectionNamespace, query, null, NoOpElementNameValidator.Instance, 0, 0, false, false, false, false, false, false, null) + var message = new QueryMessage(0, collectionNamespace, query, null, NoOpElementNameValidator.Instance, 0, 0, false, false, false, false, false, false) { PostWriteAction = encoder => encoder.ChangeWriteConcernFromW0ToW1(), ResponseHandling = CommandResponseHandling.Ignore diff --git a/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/Encoders/JsonEncoders/CommandRequestMessageJsonEncoderTests.cs b/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/Encoders/JsonEncoders/CommandRequestMessageJsonEncoderTests.cs index 573a40d30a2..e4394ceb4ee 100644 --- a/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/Encoders/JsonEncoders/CommandRequestMessageJsonEncoderTests.cs +++ b/tests/MongoDB.Driver.Tests/Core/WireProtocol/Messages/Encoders/JsonEncoders/CommandRequestMessageJsonEncoderTests.cs @@ -66,7 +66,7 @@ public void WriteMessage_should_delegate_to_wrapped_encoder() var document = new BsonDocument("x", 1); var sections = new[] { new Type0CommandMessageSection<BsonDocument>(document, BsonDocumentSerializer.Instance) }; var wrappedMessage = new CommandMessage(1, 2, sections, false); - var message = new CommandRequestMessage(wrappedMessage, () => true); + var message = new CommandRequestMessage(wrappedMessage); var expectedJson = CreateMessageJson(wrappedMessage); subject.WriteMessage(message); diff --git a/tests/MongoDB.Driver.Tests/CustomServerSelectorTests.cs b/tests/MongoDB.Driver.Tests/CustomServerSelectorTests.cs index def4ac29b96..14eb15bc38c 100644 --- a/tests/MongoDB.Driver.Tests/CustomServerSelectorTests.cs +++ b/tests/MongoDB.Driver.Tests/CustomServerSelectorTests.cs @@ -27,6 +27,7 @@ namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class CustomServerSelectorTests : LoggableTestClass { public CustomServerSelectorTests(ITestOutputHelper output) : base(output) diff --git a/tests/MongoDB.Driver.Tests/Encryption/AutoEncryptionTests.cs b/tests/MongoDB.Driver.Tests/Encryption/AutoEncryptionTests.cs index 546897cbdbc..3a3c3cbf6a6 100644 --- a/tests/MongoDB.Driver.Tests/Encryption/AutoEncryptionTests.cs +++ b/tests/MongoDB.Driver.Tests/Encryption/AutoEncryptionTests.cs @@ -24,6 +24,7 @@ using MongoDB.Driver.Core.TestHelpers.Logging; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; using MongoDB.Driver.Encryption; +using MongoDB.Driver.TestHelpers; using MongoDB.Driver.Tests.Specifications.client_side_encryption; using MongoDB.TestHelpers.XunitExtensions; using Xunit; @@ -32,6 +33,7 @@ namespace MongoDB.Driver.Tests.Encryption { [Trait("Category", "CSFLE")] + [Trait("Category", "Integration")] public class AutoEncryptionTests : LoggableTestClass { #region static @@ -86,13 +88,28 @@ public async Task Mongocryptd_should_be_initialized_when_auto_encryption([Values var coll = client.GetDatabase(__collectionNamespace.DatabaseNamespace.DatabaseName).GetCollection<BsonDocument>(__collectionNamespace.CollectionName); - if (async) + try { - await coll.InsertOneAsync(new BsonDocument()); + if (async) + { + await coll.InsertOneAsync(new BsonDocument()); + } + else + { + coll.InsertOne(new BsonDocument()); + } } - else + catch (Exception ex) { - coll.InsertOne(new BsonDocument()); + if (CoreTestConfiguration.ShouldSkipMongocryptdTests_SERVER_106469()) + { + ex.Should().BeOfType<MongoEncryptionException>(); + return; + } + else + { + throw; + } } mongocryptdClient.IsValueCreated.Should().BeTrue(); diff --git a/tests/MongoDB.Driver.Tests/Encryption/ClientEncryptionTests.cs b/tests/MongoDB.Driver.Tests/Encryption/ClientEncryptionTests.cs index 3cb767016ea..01ed41022a4 100644 --- a/tests/MongoDB.Driver.Tests/Encryption/ClientEncryptionTests.cs +++ b/tests/MongoDB.Driver.Tests/Encryption/ClientEncryptionTests.cs @@ -37,6 +37,7 @@ namespace MongoDB.Driver.Tests.Encryption { [Trait("Category", "CSFLE")] + [Trait("Category", "Integration")] public class ClientEncryptionTests { #region static @@ -135,15 +136,15 @@ public async Task CreateEncryptedCollection_should_handle_generated_key_when_sec mockCluster.SetupGet(c => c.Description).Returns(clusterDescription); var mockServer = new Mock<IServer>(); mockServer.SetupGet(s => s.Description).Returns(serverDescription); - var channel = Mock.Of<IChannelHandle>(c => c.ConnectionDescription == new ConnectionDescription(new ConnectionId(serverId), new HelloResult(new BsonDocument("maxWireVersion", serverDescription.WireVersionRange.Max)))); - mockServer.Setup(s => s.GetChannel(It.IsAny<CancellationToken>())).Returns(channel); - mockServer.Setup(s => s.GetChannelAsync(It.IsAny<CancellationToken>())).ReturnsAsync(channel); + var connection = Mock.Of<IChannelHandle>(c => c.ConnectionDescription == new ConnectionDescription(new ConnectionId(serverId), new HelloResult(new BsonDocument("maxWireVersion", serverDescription.WireVersionRange.Max)))); + mockServer.Setup(s => s.GetChannel(It.IsAny<OperationContext>())).Returns(connection); + mockServer.Setup(s => s.GetChannelAsync(It.IsAny<OperationContext>())).ReturnsAsync(connection); mockCluster - .Setup(m => m.SelectServer(It.IsAny<IServerSelector>(), It.IsAny<CancellationToken>())) + .Setup(m => m.SelectServer(It.IsAny<OperationContext>(), It.IsAny<IServerSelector>())) .Returns(mockServer.Object); mockCluster - .Setup(m => m.SelectServerAsync(It.IsAny<IServerSelector>(), It.IsAny<CancellationToken>())) + .Setup(m => m.SelectServerAsync(It.IsAny<OperationContext>(), It.IsAny<IServerSelector>())) .ReturnsAsync(mockServer.Object); var database = Mock.Of<IMongoDatabase>(d => @@ -224,15 +225,15 @@ public async Task CreateEncryptedCollection_should_handle_various_encryptedField mockCluster.SetupGet(c => c.Description).Returns(clusterDescription); var mockServer = new Mock<IServer>(); mockServer.SetupGet(s => s.Description).Returns(serverDescription); - var channel = Mock.Of<IChannelHandle>(c => c.ConnectionDescription == new ConnectionDescription(new ConnectionId(serverId), new HelloResult(new BsonDocument("maxWireVersion", serverDescription.WireVersionRange.Max)))); - mockServer.Setup(s => s.GetChannel(It.IsAny<CancellationToken>())).Returns(channel); - mockServer.Setup(s => s.GetChannelAsync(It.IsAny<CancellationToken>())).ReturnsAsync(channel); + var connection = Mock.Of<IChannelHandle>(c => c.ConnectionDescription == new ConnectionDescription(new ConnectionId(serverId), new HelloResult(new BsonDocument("maxWireVersion", serverDescription.WireVersionRange.Max)))); + mockServer.Setup(s => s.GetChannel(It.IsAny<OperationContext>())).Returns(connection); + mockServer.Setup(s => s.GetChannelAsync(It.IsAny<OperationContext>())).ReturnsAsync(connection); mockCluster - .Setup(m => m.SelectServer(It.IsAny<IServerSelector>(), It.IsAny<CancellationToken>())) + .Setup(m => m.SelectServer(It.IsAny<OperationContext>(), It.IsAny<IServerSelector>())) .Returns(mockServer.Object); mockCluster - .Setup(m => m.SelectServerAsync(It.IsAny<IServerSelector>(), It.IsAny<CancellationToken>())) + .Setup(m => m.SelectServerAsync(It.IsAny<OperationContext>(), It.IsAny<IServerSelector>())) .ReturnsAsync(mockServer.Object); var database = Mock.Of<IMongoDatabase>(d => diff --git a/tests/MongoDB.Driver.Tests/Encryption/CsfleSchemaBuilderTests.cs b/tests/MongoDB.Driver.Tests/Encryption/CsfleSchemaBuilderTests.cs new file mode 100644 index 00000000000..ba00c581ce5 --- /dev/null +++ b/tests/MongoDB.Driver.Tests/Encryption/CsfleSchemaBuilderTests.cs @@ -0,0 +1,710 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using System.Collections.Generic; +using FluentAssertions; +using MongoDB.Bson; +using MongoDB.Bson.Serialization.Attributes; +using MongoDB.Driver.Encryption; +using Xunit; + +namespace MongoDB.Driver.Tests.Encryption +{ + public class CsfleSchemaBuilderTests + { + private readonly CollectionNamespace _collectionNamespace = CollectionNamespace.FromFullName("medicalRecords.patients"); + private const string _keyIdString = "6f4af470-00d1-401f-ac39-f45902a0c0c8"; + private static Guid _keyId = Guid.Parse(_keyIdString); + + [Fact] + public void CsfleSchemaBuilder_works_as_expected() + { + var builder = CsfleSchemaBuilder.Create(schemaBuilder => + { + schemaBuilder.Encrypt<Patient>(_collectionNamespace, builder => + { + builder + .EncryptMetadata(keyId: _keyId) + .Property(p => p.MedicalRecords, BsonType.Array, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random) + .Property("bloodType", BsonType.String, + algorithm: EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random) + .Property(p => p.Ssn, BsonType.Int32, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic) + .Property(p => p.Insurance, innerBuilder => + { + innerBuilder + .Property(i => i.PolicyNumber, BsonType.Int32, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic); + }) + .PatternProperty("_PIIString$", BsonType.String, EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic) + .PatternProperty("_PIIArray$", BsonType.Array, EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random) + .PatternProperty(p => p.Insurance, innerBuilder => + { + innerBuilder + .PatternProperty("_PIIString$", BsonType.String, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic) + .PatternProperty("_PIINumber$", BsonType.Int32, + algorithm: EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic); + }); + + } ); + }); + + var expected = new Dictionary<string, string> + { + [_collectionNamespace.FullName] = """ + { + "bsonType": "object", + "encryptMetadata": { + "keyId": [{ "$binary" : { "base64" : "b0r0cADRQB+sOfRZAqDAyA==", "subType" : "04" } }] + }, + "properties": { + "insurance": { + "bsonType": "object", + "properties": { + "policyNumber": { + "encrypt": { + "bsonType": "int", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + } + }, + "medicalRecords": { + "encrypt": { + "bsonType": "array", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "bloodType": { + "encrypt": { + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + "ssn": { + "encrypt": { + "bsonType": "int", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "patternProperties": { + "_PIIString$": { + "encrypt": { + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + }, + }, + "_PIIArray$": { + "encrypt": { + "bsonType": "array", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", + }, + }, + "insurance": { + "bsonType": "object", + "patternProperties": { + "_PIINumber$": { + "encrypt": { + "bsonType": "int", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + }, + }, + "_PIIString$": { + "encrypt": { + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic", + }, + }, + }, + }, + }, + } + """ + }; + + AssertOutcomeCsfleSchemaBuilder(builder, expected); + } + + [Fact] + public void CsfleSchemaBuilder_with_multiple_types_works_as_expected() + { + var testCollectionNamespace = CollectionNamespace.FromFullName("test.class"); + + var builder = CsfleSchemaBuilder.Create(schemaBuilder => + { + schemaBuilder.Encrypt<Patient>(_collectionNamespace, builder => + { + builder + .EncryptMetadata(keyId: _keyId) + .Property(p => p.MedicalRecords, BsonType.Array, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random); + }); + + schemaBuilder.Encrypt<TestClass>(testCollectionNamespace, builder => + { + builder.Property(t => t.TestString, BsonType.String); + }); + }); + + var expected = new Dictionary<string, string> + { + [_collectionNamespace.FullName] = """ + { + "bsonType": "object", + "encryptMetadata": { + "keyId": [{ "$binary" : { "base64" : "b0r0cADRQB+sOfRZAqDAyA==", "subType" : "04" } }] + }, + "properties": { + "medicalRecords": { + "encrypt": { + "bsonType": "array", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + }, + }, + } + """, + [testCollectionNamespace.FullName] = """ + { + "bsonType": "object", + "properties": { + "TestString": { + "encrypt": { + "bsonType": "string", + } + }, + } + } + """ + }; + + AssertOutcomeCsfleSchemaBuilder(builder, expected); + } + + [Fact] + public void CsfleSchemaBuilder_with_no_schemas_throws() + { + var builder = CsfleSchemaBuilder.Create(_ => + { + // No schemas added + }); + + var exception = Record.Exception(() => builder.Build()); + + exception.Should().NotBeNull(); + exception.Should().BeOfType<InvalidOperationException>(); + } + + [Theory] + [InlineData( + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random, + null, + """ "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" """)] + [InlineData( + null, + _keyIdString, + """ "keyId": [{ "$binary" : { "base64" : "b0r0cADRQB+sOfRZAqDAyA==", "subType" : "04" } }] """)] + public void EncryptedCollection_Metadata_works_as_expected(EncryptionAlgorithm? algorithm, string keyString, string expectedContent) + { + Guid? keyId = keyString is null ? null : Guid.Parse(keyString); + var builder = new EncryptedCollectionBuilder<Patient>(); + + builder.EncryptMetadata(keyId, algorithm); + + var expected = $$""" + { + "bsonType": "object", + "encryptMetadata": { + {{expectedContent}} + } + } + """; + + AssertOutcomeCollectionBuilder(builder, expected); + } + + [Theory] + [InlineData(BsonType.Array, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random, + null, + """ "bsonType": "array", "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" """)] + [InlineData(BsonType.Array, + null, + _keyIdString, + """ "bsonType": "array", "keyId": [{ "$binary" : { "base64" : "b0r0cADRQB+sOfRZAqDAyA==", "subType" : "04" } }] """)] + [InlineData(BsonType.Array, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random, + _keyIdString, + """ "bsonType": "array", "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", "keyId": [{ "$binary" : { "base64" : "b0r0cADRQB+sOfRZAqDAyA==", "subType" : "04" } }] """)] + public void EncryptedCollection_PatternProperty_works_as_expected(BsonType bsonType, EncryptionAlgorithm? algorithm, string keyString, string expectedContent) + { + Guid? keyId = keyString is null ? null : Guid.Parse(keyString); + var builder = new EncryptedCollectionBuilder<Patient>(); + + builder.PatternProperty("randomRegex*", bsonType, algorithm, keyId); + + var expected = $$""" + { + "bsonType": "object", + "patternProperties": { + "randomRegex*": { + "encrypt": { + {{expectedContent}} + } + } + } + } + """; + + AssertOutcomeCollectionBuilder(builder, expected); + } + + [Theory] + [InlineData(null, + null, + null, + "")] + [InlineData(new[] {BsonType.Array, BsonType.String}, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random, + null, + """ "bsonType": ["array", "string"], "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" """)] + [InlineData(new[] {BsonType.Array, BsonType.String}, + null, + _keyIdString, + """ "bsonType": ["array", "string"], "keyId": [{ "$binary" : { "base64" : "b0r0cADRQB+sOfRZAqDAyA==", "subType" : "04" } }] """)] + [InlineData(new[] {BsonType.Array, BsonType.String}, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random, + _keyIdString, + """ "bsonType": ["array", "string"], "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", "keyId": [{ "$binary" : { "base64" : "b0r0cADRQB+sOfRZAqDAyA==", "subType" : "04" } }] """)] + public void EncryptedCollection_PatternProperty_with_multiple_bson_types_works_as_expected(IEnumerable<BsonType> bsonTypes, EncryptionAlgorithm? algorithm, string keyString, string expectedContent) + { + Guid? keyId = keyString is null ? null : Guid.Parse(keyString); + var builder = new EncryptedCollectionBuilder<Patient>(); + + builder.PatternProperty("randomRegex*", bsonTypes, algorithm, keyId); + + var expected = $$""" + { + "bsonType": "object", + "patternProperties": { + "randomRegex*": { + "encrypt": { + {{expectedContent}} + } + } + } + } + """; + + AssertOutcomeCollectionBuilder(builder, expected); + } + + [Fact] + public void EncryptedCollection_PatternProperty_nested_works_as_expected() + { + Guid? keyId = Guid.Parse(_keyIdString); + var builder = new EncryptedCollectionBuilder<Patient>(); + + builder.PatternProperty(p => p.Insurance, innerBuilder => + { + innerBuilder + .EncryptMetadata(keyId) + .Property("policyNumber", BsonType.Int32, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic) + .PatternProperty("randomRegex*", BsonType.String, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random); + }); + + var expected = """ + { + "bsonType": "object", + "patternProperties": { + "insurance": { + "bsonType": "object", + "encryptMetadata": { + "keyId": [{ "$binary" : { "base64" : "b0r0cADRQB+sOfRZAqDAyA==", "subType" : "04" } }] + }, + "properties": { + "policyNumber": { + "encrypt": { + "bsonType": "int", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "patternProperties": { + "randomRegex*": { + "encrypt": { + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + } + } + } + } + """; + + AssertOutcomeCollectionBuilder(builder, expected); + } + + [Fact] + public void EncryptedCollection_PatternProperty_nested_with_string_works_as_expected() + { + Guid? keyId = Guid.Parse(_keyIdString); + var builder = new EncryptedCollectionBuilder<Patient>(); + + builder.PatternProperty<Insurance>("insurance", innerBuilder => + { + innerBuilder + .EncryptMetadata(keyId) + .Property("policyNumber", BsonType.Int32, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic) + .PatternProperty("randomRegex*", BsonType.String, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random); + }); + + var expected = """ + { + "bsonType": "object", + "patternProperties": { + "insurance": { + "bsonType": "object", + "encryptMetadata": { + "keyId": [{ "$binary" : { "base64" : "b0r0cADRQB+sOfRZAqDAyA==", "subType" : "04" } }] + }, + "properties": { + "policyNumber": { + "encrypt": { + "bsonType": "int", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "patternProperties": { + "randomRegex*": { + "encrypt": { + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + } + } + } + } + """; + + AssertOutcomeCollectionBuilder(builder, expected); + } + + [Theory] + [InlineData(BsonType.Array, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random, + null, + """ "bsonType": "array", "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" """)] + [InlineData(BsonType.Array, + null, + _keyIdString, + """ "bsonType": "array", "keyId": [{ "$binary" : { "base64" : "b0r0cADRQB+sOfRZAqDAyA==", "subType" : "04" } }] """)] + [InlineData(BsonType.Array, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random, + _keyIdString, + """ "bsonType": "array", "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", "keyId": [{ "$binary" : { "base64" : "b0r0cADRQB+sOfRZAqDAyA==", "subType" : "04" } }] """)] + public void EncryptedCollection_Property_with_expression_works_as_expected(BsonType bsonType, EncryptionAlgorithm? algorithm, string keyString, string expectedContent) + { + Guid? keyId = keyString is null ? null : Guid.Parse(keyString); + var builder = new EncryptedCollectionBuilder<Patient>(); + + builder.Property(p => p.MedicalRecords, bsonType, algorithm, keyId); + + var expected = $$""" + { + "bsonType": "object", + "properties": { + "medicalRecords": { + "encrypt": { + {{expectedContent}} + } + } + } + } + """; + + AssertOutcomeCollectionBuilder(builder, expected); + } + + [Theory] + [InlineData(null, + null, + null, + "")] + [InlineData(new[] {BsonType.Array, BsonType.String}, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random, + null, + """ "bsonType": ["array", "string"], "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" """)] + [InlineData(new[] {BsonType.Array, BsonType.String}, + null, + _keyIdString, + """ "bsonType": ["array", "string"], "keyId": [{ "$binary" : { "base64" : "b0r0cADRQB+sOfRZAqDAyA==", "subType" : "04" } }] """)] + [InlineData(new[] {BsonType.Array, BsonType.String}, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random, + _keyIdString, + """ "bsonType": ["array", "string"], "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", "keyId": [{ "$binary" : { "base64" : "b0r0cADRQB+sOfRZAqDAyA==", "subType" : "04" } }] """)] + public void EncryptedCollection_Property_with_multiple_bson_types_works_as_expected(IEnumerable<BsonType> bsonTypes, EncryptionAlgorithm? algorithm, string keyString, string expectedContent) + { + Guid? keyId = keyString is null ? null : Guid.Parse(keyString); + var builder = new EncryptedCollectionBuilder<Patient>(); + + builder.Property(p => p.MedicalRecords, bsonTypes, algorithm, keyId); + + var expected = $$""" + { + "bsonType": "object", + "properties": { + "medicalRecords": { + "encrypt": { + {{expectedContent}} + } + } + } + } + """; + + AssertOutcomeCollectionBuilder(builder, expected); + } + + [Theory] + [InlineData(BsonType.Array, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random, + null, + """ "bsonType": "array", "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" """)] + [InlineData(BsonType.Array, + null, + _keyIdString, + """ "bsonType": "array", "keyId": [{ "$binary" : { "base64" : "b0r0cADRQB+sOfRZAqDAyA==", "subType" : "04" } }] """)] + [InlineData(BsonType.Array, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random, + _keyIdString, + """ "bsonType": "array", "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random", "keyId": [{ "$binary" : { "base64" : "b0r0cADRQB+sOfRZAqDAyA==", "subType" : "04" } }] """)] + public void EncryptedCollection_Property_with_string_works_as_expected(BsonType bsonType, EncryptionAlgorithm? algorithm, string keyString, string expectedContent) + { + Guid? keyId = keyString is null ? null : Guid.Parse(keyString); + var builder = new EncryptedCollectionBuilder<Patient>(); + + builder.Property("medicalRecords", bsonType, algorithm, keyId); + + var expected = $$""" + { + "bsonType": "object", + "properties": { + "medicalRecords": { + "encrypt": { + {{expectedContent}} + } + } + } + } + """; + + AssertOutcomeCollectionBuilder(builder, expected); + } + + [Fact] + public void EncryptedCollection_Property_nested_works_as_expected() + { + Guid? keyId = Guid.Parse(_keyIdString); + var builder = new EncryptedCollectionBuilder<Patient>(); + + builder.Property(p => p.Insurance, innerBuilder => + { + innerBuilder + .EncryptMetadata(keyId) + .Property("policyNumber", BsonType.Int32, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic) + .PatternProperty("randomRegex*", BsonType.String, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random); + }); + + var expected = """ + { + "bsonType": "object", + "properties": { + "insurance": { + "bsonType": "object", + "encryptMetadata": { + "keyId": [{ "$binary" : { "base64" : "b0r0cADRQB+sOfRZAqDAyA==", "subType" : "04" } }] + }, + "properties": { + "policyNumber": { + "encrypt": { + "bsonType": "int", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "patternProperties": { + "randomRegex*": { + "encrypt": { + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + } + } + } + } + """; + + AssertOutcomeCollectionBuilder(builder, expected); + } + + [Fact] + public void EncryptedCollection_Property_nested_with_string_works_as_expected() + { + Guid? keyId = Guid.Parse(_keyIdString); + var builder = new EncryptedCollectionBuilder<Patient>(); + + builder.Property<Insurance>("insurance", innerBuilder => + { + innerBuilder + .EncryptMetadata(keyId) + .Property("policyNumber", BsonType.Int32, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic) + .PatternProperty("randomRegex*", BsonType.String, + EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random); + }); + + var expected = """ + { + "bsonType": "object", + "properties": { + "insurance": { + "bsonType": "object", + "encryptMetadata": { + "keyId": [{ "$binary" : { "base64" : "b0r0cADRQB+sOfRZAqDAyA==", "subType" : "04" } }] + }, + "properties": { + "policyNumber": { + "encrypt": { + "bsonType": "int", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + "patternProperties": { + "randomRegex*": { + "encrypt": { + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + } + } + } + } + """; + + AssertOutcomeCollectionBuilder(builder, expected); + } + + [Fact] + public void EncryptedCollection_Property_with_empty_bson_types_throws() + { + var builder = new EncryptedCollectionBuilder<Patient>(); + + var recordedException = Record.Exception(() => builder.Property("test", [])); + recordedException.Should().NotBeNull(); + recordedException.Should().BeOfType<ArgumentException>(); + } + + [Fact] + public void EncryptedCollection_Metadata_with_empty_algorithm_and_key_throws() + { + var builder = new EncryptedCollectionBuilder<Patient>(); + + var recordedException = Record.Exception(() => builder.EncryptMetadata(null, null)); + recordedException.Should().NotBeNull(); + recordedException.Should().BeOfType<ArgumentException>(); + } + + private void AssertOutcomeCsfleSchemaBuilder(CsfleSchemaBuilder builder, Dictionary<string, string> expectedSchema) + { + var builtSchema = builder.Build(); + expectedSchema.Should().HaveCount(builtSchema.Count); + foreach (var collectionNamespace in expectedSchema.Keys) + { + var parsed = BsonDocument.Parse(expectedSchema[collectionNamespace]); + builtSchema[collectionNamespace].Should().BeEquivalentTo(parsed); + } + } + + private void AssertOutcomeCollectionBuilder<T>(EncryptedCollectionBuilder<T> builder, string expected) + { + var builtSchema = builder.Build(); + var expectedSchema = BsonDocument.Parse(expected); + builtSchema.Should().BeEquivalentTo(expectedSchema); + } + + internal class TestClass + { + public ObjectId Id { get; set; } + + public string TestString { get; set; } + } + + internal class Patient + { + [BsonId] + public ObjectId Id { get; set; } + + [BsonElement("name")] + public string Name { get; set; } + + [BsonElement("ssn")] + public int Ssn { get; set; } + + [BsonElement("bloodType")] + public string BloodType { get; set; } + + [BsonElement("medicalRecords")] + public List<MedicalRecord> MedicalRecords { get; set; } + + [BsonElement("insurance")] + public Insurance Insurance { get; set; } + } + + internal class MedicalRecord + { + [BsonElement("weight")] + public int Weight { get; set; } + + [BsonElement("bloodPressure")] + public string BloodPressure { get; set; } + } + + internal class Insurance + { + [BsonElement("provider")] + public string Provider { get; set; } + + [BsonElement("policyNumber")] + public int PolicyNumber { get; set; } + } + } +} \ No newline at end of file diff --git a/tests/MongoDB.Driver.Tests/Encryption/EncryptOptionsTests.cs b/tests/MongoDB.Driver.Tests/Encryption/EncryptOptionsTests.cs index 77c0d4e8d89..f9ee52f8690 100644 --- a/tests/MongoDB.Driver.Tests/Encryption/EncryptOptionsTests.cs +++ b/tests/MongoDB.Driver.Tests/Encryption/EncryptOptionsTests.cs @@ -34,7 +34,7 @@ public void Constructor_should_fail_when_contentionFactor_and_algorithm_is_not_i { var exception = Record.Exception(() => new EncryptOptions(algorithm: "test", contentionFactor: 1, keyId: Guid.NewGuid())); var e = exception.Should().BeOfType<ArgumentException>().Subject; - e.Message.Should().Be("ContentionFactor only applies for Indexed or Range algorithm."); + e.Message.Should().Be("ContentionFactor only applies for Indexed, Range, or TextPreview algorithm."); } [Fact] @@ -58,7 +58,7 @@ public void Constructor_should_fail_when_queryType_and_algorithm_is_not_indexed( { var exception = Record.Exception(() => new EncryptOptions(algorithm: "test", queryType: "equality", keyId: Guid.NewGuid())); var e = exception.Should().BeOfType<ArgumentException>().Subject; - e.Message.Should().Be("QueryType only applies for Indexed or Range algorithm."); + e.Message.Should().Be("QueryType only applies for Indexed, Range, or TextPreview algorithm."); } [Fact] @@ -69,6 +69,83 @@ public void Constructor_should_fail_when_rangeOptions_and_algorithm_is_not_range e.Message.Should().Be("RangeOptions only applies for Range algorithm."); } + [Fact] + public void Constructor_should_fail_when_textOptions_is_null() + { + var exception = Record.Exception(() => new EncryptOptions(algorithm: "test", textOptions: null)); + exception.Should().BeOfType<ArgumentNullException>().Which.ParamName.Should().Be("textOptions"); + } + + [Fact] + public void Constructor_should_fail_when_textOptions_and_algorithm_is_not_textPreview() + { + var exception = Record.Exception(() => new EncryptOptions(algorithm: "test", keyId: Guid.NewGuid(), textOptions: new TextOptions(true, true))); + + exception.Should().BeOfType<ArgumentException>() + .Which.Message.Should().Be("TextOptions only applies for TextPreview algorithm."); + } + + [Fact] + public void Constructor_should_fail_with_invalid_queryType_for_textPreview() + { + var invalidQueryType = "equality"; + + var exception = Record.Exception(() => new EncryptOptions(algorithm: EncryptionAlgorithm.TextPreview, keyId: Guid.NewGuid(), queryType: invalidQueryType)); + + exception.Should().BeOfType<ArgumentException>() + .Which.Message.Should().Contain($"QueryType '{invalidQueryType}' is not valid for TextPreview algorithm"); + } + + [Theory] + [InlineData("prefixPreview")] + [InlineData("suffixPreview")] + [InlineData("substringPreview")] + public void Constructor_should_succeed_with_valid_queryType_for_textPreview(string validQueryType) + { + var subject = new EncryptOptions(algorithm: EncryptionAlgorithm.TextPreview, keyId: Guid.NewGuid(), queryType: validQueryType); + + subject.QueryType.Should().Be(validQueryType); + } + + [Fact] + public void Constructor_should_fail_when_prefixPreview_queryType_without_prefixOptions() + { + var exception = Record.Exception(() => new EncryptOptions( + algorithm: EncryptionAlgorithm.TextPreview, + keyId: Guid.NewGuid(), + queryType: "prefixPreview", + textOptions: new TextOptions(true, true))); + + exception.Should().BeOfType<ArgumentException>() + .Which.Message.Should().Contain("PrefixOptions must be set"); + } + + [Fact] + public void Constructor_should_fail_when_substringPreview_queryType_without_substringOptions() + { + var exception = Record.Exception(() => new EncryptOptions( + algorithm: EncryptionAlgorithm.TextPreview, + keyId: Guid.NewGuid(), + queryType: "substringPreview", + textOptions: new TextOptions(true, true))); + + exception.Should().BeOfType<ArgumentException>() + .Which.Message.Should().Contain("SubstringOptions must be set"); + } + + [Fact] + public void Constructor_should_fail_when_suffixPreview_queryType_without_suffixOptions() + { + var exception = Record.Exception(() => new EncryptOptions( + algorithm: EncryptionAlgorithm.TextPreview, + keyId: Guid.NewGuid(), + queryType: "suffixPreview", + textOptions: new TextOptions(true, true))); + + exception.Should().BeOfType<ArgumentException>() + .Which.Message.Should().Contain("SuffixOptions must be set"); + } + [Theory] [InlineData(EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic, "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic")] [InlineData(EncryptionAlgorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Random, "AEAD_AES_256_CBC_HMAC_SHA_512-Random")] @@ -91,6 +168,9 @@ public void Constructor_should_fail_when_rangeOptions_and_algorithm_is_not_range // range algorithm [InlineData(EncryptionAlgorithm.Range, "Range")] [InlineData("Range", "Range")] + // textPreview algorithm + [InlineData(EncryptionAlgorithm.TextPreview, "TextPreview")] + [InlineData("TextPreview", "TextPreview")] public void Constructor_should_support_different_algorithm_representations(object algorithm, string expectedAlgorithmRepresentation) { var alternateKeyName = "test"; @@ -110,6 +190,21 @@ public void Constructor_should_support_different_algorithm_representations(objec subject.KeyId.Should().NotHaveValue(); } + [Fact] + public void With_textOptions_should_create_new_instance_with_updated_textOptions() + { + var originalTextOptions = new TextOptions(true, true, prefixOptions: new PrefixOptions(10, 2)); + var newTextOptions = new TextOptions(false, false, substringOptions: new SubstringOptions(10, 8, 2)); + + var subject = new EncryptOptions(algorithm: EncryptionAlgorithm.TextPreview, keyId: Guid.NewGuid(), textOptions: originalTextOptions); + + var updated = subject.With(textOptions: newTextOptions); + + updated.TextOptions.Should().BeSameAs(newTextOptions); + updated.Algorithm.Should().Be(subject.Algorithm); + updated.KeyId.Should().Be(subject.KeyId); + } + [Fact] public void With_should_set_correct_values() { diff --git a/tests/MongoDB.Driver.Tests/Encryption/MongoEncryptionCreateCollectionExceptionTests.cs b/tests/MongoDB.Driver.Tests/Encryption/MongoEncryptionCreateCollectionExceptionTests.cs deleted file mode 100644 index 5f282702bb0..00000000000 --- a/tests/MongoDB.Driver.Tests/Encryption/MongoEncryptionCreateCollectionExceptionTests.cs +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/tests/MongoDB.Driver.Tests/Encryption/MongoEncryptionExceptionTests.cs b/tests/MongoDB.Driver.Tests/Encryption/MongoEncryptionExceptionTests.cs deleted file mode 100644 index 5f282702bb0..00000000000 --- a/tests/MongoDB.Driver.Tests/Encryption/MongoEncryptionExceptionTests.cs +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/tests/MongoDB.Driver.Tests/FilterDefinitionBuilderTests.cs b/tests/MongoDB.Driver.Tests/FilterDefinitionBuilderTests.cs index d51e76fd149..1cd36373e43 100644 --- a/tests/MongoDB.Driver.Tests/FilterDefinitionBuilderTests.cs +++ b/tests/MongoDB.Driver.Tests/FilterDefinitionBuilderTests.cs @@ -1140,6 +1140,7 @@ private enum ProductType } } + [Trait("Category", "Integration")] public class FieldDefinitionBuilderUInt32Tests { #region static @@ -1361,6 +1362,7 @@ private class DocumentWithUInt32Field } } + [Trait("Category", "Integration")] public class FieldDefinitionBuilderUInt64Tests { #region static diff --git a/tests/MongoDB.Driver.Tests/FindExpressionProjectionDefinitionTests.cs b/tests/MongoDB.Driver.Tests/FindExpressionProjectionDefinitionTests.cs index 018f813209a..ddeb5817e7e 100644 --- a/tests/MongoDB.Driver.Tests/FindExpressionProjectionDefinitionTests.cs +++ b/tests/MongoDB.Driver.Tests/FindExpressionProjectionDefinitionTests.cs @@ -30,7 +30,7 @@ public void Projection_to_class_should_work( [Values(false, true)] bool renderForFind) { var expectedRenderedProjection = renderForFind switch - { + { true => "{ A : 1, X : '$B', _id : 0 }", false => "{ A : '$A', X : '$B', _id : 0 }" }; diff --git a/tests/MongoDB.Driver.Tests/FindFluentTests.cs b/tests/MongoDB.Driver.Tests/FindFluentTests.cs index fb70cd6f43c..78faff1ec9b 100644 --- a/tests/MongoDB.Driver.Tests/FindFluentTests.cs +++ b/tests/MongoDB.Driver.Tests/FindFluentTests.cs @@ -303,8 +303,6 @@ public void ToCursor_should_call_collection_Find_with_expected_arguments( [Fact] public void ToString_should_return_the_correct_string() { - RequireServer.Check().Supports(Feature.FindProjectionExpressions); - var subject = CreateSubject(); subject.Filter = new BsonDocument("Age", 20); subject.Options.Collation = new Collation("en_US"); @@ -325,7 +323,7 @@ public void ToString_should_return_the_correct_string() var str = find.ToString(); - var expectedProjection = + var expectedProjection = "{ \"_v\" : { \"$concat\" : [\"$FirstName\", \" \", \"$LastName\"] }, \"_id\" : 0 }"; str.Should().Be( diff --git a/tests/MongoDB.Driver.Tests/GridFS/GridFSBucketTests.cs b/tests/MongoDB.Driver.Tests/GridFS/GridFSBucketTests.cs index 3ead29d9c82..f4e969d1085 100644 --- a/tests/MongoDB.Driver.Tests/GridFS/GridFSBucketTests.cs +++ b/tests/MongoDB.Driver.Tests/GridFS/GridFSBucketTests.cs @@ -327,6 +327,7 @@ public void DownloadToStreamByName_should_throw_when_filename_is_null( [Theory] [ParameterAttributeData] + [Trait("Category", "Integration")] public void Drop_should_drop_the_files_and_chunks_collections( [Values(false, true)] bool async) { @@ -352,6 +353,7 @@ public void Drop_should_drop_the_files_and_chunks_collections( [Theory] [ParameterAttributeData] + [Trait("Category", "Integration")] public void Drop_should_throw_when_a_write_concern_error_occurss( [Values(false, true)] bool async) @@ -629,6 +631,7 @@ public void UploadFromStream_should_throw_when_source_is_null( [Theory] [ParameterAttributeData] + [Trait("Category", "Integration")] public void GridFS_should_work_with_strict_stable_api( [Values(false, true)] bool async) { diff --git a/tests/MongoDB.Driver.Tests/GridFS/GridFSDownloadStreamBaseTests.cs b/tests/MongoDB.Driver.Tests/GridFS/GridFSDownloadStreamBaseTests.cs index 210cf0c8e4c..948abb45c1c 100644 --- a/tests/MongoDB.Driver.Tests/GridFS/GridFSDownloadStreamBaseTests.cs +++ b/tests/MongoDB.Driver.Tests/GridFS/GridFSDownloadStreamBaseTests.cs @@ -146,6 +146,7 @@ public void constructor_should_initialize_instance() [Theory] [ParameterAttributeData] + [Trait("Category", "Integration")] public void CopyTo_should_copy_stream( [Values(0.0, 0.5, 1.0, 1.5, 2.0, 2.5)] double contentSizeMultiple, [Values(null, 128)] int? bufferSize, @@ -219,6 +220,7 @@ public void FileInfo_should_return_expected_result() [Theory] [ParameterAttributeData] + [Trait("Category", "Integration")] public void Flush_should_not_throw( [Values(false, true)] bool async) { diff --git a/tests/MongoDB.Driver.Tests/GridFS/GridFSSeekableDownloadStreamTests.cs b/tests/MongoDB.Driver.Tests/GridFS/GridFSSeekableDownloadStreamTests.cs index a6f336c4f6c..f291adf375e 100644 --- a/tests/MongoDB.Driver.Tests/GridFS/GridFSSeekableDownloadStreamTests.cs +++ b/tests/MongoDB.Driver.Tests/GridFS/GridFSSeekableDownloadStreamTests.cs @@ -94,6 +94,7 @@ public void RetryReads_get_and_set_should_work( [Theory] [ParameterAttributeData] + [Trait("Category", "Integration")] public void Read_should_return_expected_result( [Values(0.0, 0.5, 1.0, 1.5, 2.0, 2.5)] double fileLengthMultiple, [Values(0.0, 0.5)] double positionMultiple, diff --git a/tests/MongoDB.Driver.Tests/GridFS/GridFSUploadStreamTests.cs b/tests/MongoDB.Driver.Tests/GridFS/GridFSUploadStreamTests.cs index 15070c8b0f6..2be46bb999c 100644 --- a/tests/MongoDB.Driver.Tests/GridFS/GridFSUploadStreamTests.cs +++ b/tests/MongoDB.Driver.Tests/GridFS/GridFSUploadStreamTests.cs @@ -27,6 +27,7 @@ namespace MongoDB.Driver.Tests.GridFS { + [Trait("Category", "Integration")] public class GridFSUploadStreamTests { // public methods diff --git a/tests/MongoDB.Driver.Tests/IJsonDrivenTestRunner.cs b/tests/MongoDB.Driver.Tests/IJsonDrivenTestRunner.cs index 85fde463079..f2bea86436c 100644 --- a/tests/MongoDB.Driver.Tests/IJsonDrivenTestRunner.cs +++ b/tests/MongoDB.Driver.Tests/IJsonDrivenTestRunner.cs @@ -27,8 +27,6 @@ namespace MongoDB.Driver.Tests internal interface IJsonDrivenTestRunner { IClusterInternal FailPointCluster { get; } - IServer FailPointServer { get; } - void ConfigureFailPoint(IServer server, ICoreSessionHandle session, BsonDocument failCommand); Task ConfigureFailPointAsync(IServer server, ICoreSessionHandle session, BsonDocument failCommand); } @@ -49,8 +47,6 @@ public IClusterInternal FailPointCluster } } - public IServer FailPointServer => null; - public void ConfigureFailPoint(IServer server, ICoreSessionHandle session, BsonDocument failCommand) { var failPoint = FailPoint.Configure(server, session, failCommand, withAsync: false); diff --git a/tests/MongoDB.Driver.Tests/Jira/CSharp2564Tests.cs b/tests/MongoDB.Driver.Tests/Jira/CSharp2564Tests.cs index 86506f44c21..0bfd0b6aefc 100644 --- a/tests/MongoDB.Driver.Tests/Jira/CSharp2564Tests.cs +++ b/tests/MongoDB.Driver.Tests/Jira/CSharp2564Tests.cs @@ -28,6 +28,7 @@ namespace MongoDB.Driver.Tests.Jira { + [Trait("Category", "Integration")] public class CSharp2564Tests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/Jira/CSharp2622Tests.cs b/tests/MongoDB.Driver.Tests/Jira/CSharp2622Tests.cs index a87e83779dd..8e759cbe30b 100644 --- a/tests/MongoDB.Driver.Tests/Jira/CSharp2622Tests.cs +++ b/tests/MongoDB.Driver.Tests/Jira/CSharp2622Tests.cs @@ -13,15 +13,14 @@ * limitations under the License. */ -using System; using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.Serialization.Attributes; -using MongoDB.Bson.Serialization.Options; using Xunit; namespace MongoDB.Driver.Tests.Jira { + [Trait("Category", "Integration")] public class CSharp2622Tests { // public methods diff --git a/tests/MongoDB.Driver.Tests/Jira/CSharp3188Tests.cs b/tests/MongoDB.Driver.Tests/Jira/CSharp3188Tests.cs index c3395ad3d04..7e0d4cb75f2 100644 --- a/tests/MongoDB.Driver.Tests/Jira/CSharp3188Tests.cs +++ b/tests/MongoDB.Driver.Tests/Jira/CSharp3188Tests.cs @@ -26,6 +26,7 @@ namespace MongoDB.Driver.Tests.Jira { + [Trait("Category", "Integration")] public class CSharp3188Tests { [Theory] @@ -65,33 +66,12 @@ public void Connection_timeout_should_throw_expected_exception([Values(false, tr .Limit(1) .Project(projectionDefinition); - if (async) - { - var exception = Record.Exception(() => collection.AggregateAsync(pipeline).GetAwaiter().GetResult()); + var exception = Record.Exception(() => collection.AggregateAsync(pipeline).GetAwaiter().GetResult()); - var mongoConnectionException = exception.Should().BeOfType<MongoConnectionException>().Subject; -#pragma warning disable CS0618 // Type or member is obsolete - mongoConnectionException.ContainsSocketTimeoutException.Should().BeFalse(); -#pragma warning restore CS0618 // Type or member is obsolete - mongoConnectionException.ContainsTimeoutException.Should().BeTrue(); - var baseException = GetBaseException(mongoConnectionException); - baseException.Should().BeOfType<TimeoutException>().Which.InnerException.Should().BeNull(); - } - else - { - var exception = Record.Exception(() => collection.Aggregate(pipeline)); - - var mongoConnectionException = exception.Should().BeOfType<MongoConnectionException>().Subject; -#pragma warning disable CS0618 // Type or member is obsolete - mongoConnectionException.ContainsSocketTimeoutException.Should().BeTrue(); -#pragma warning restore CS0618 // Type or member is obsolete - mongoConnectionException.ContainsTimeoutException.Should().BeTrue(); - var baseException = GetBaseException(mongoConnectionException); - var socketException = baseException.Should().BeOfType<IOException>() - .Which.InnerException.Should().BeOfType<SocketException>().Subject; - socketException.SocketErrorCode.Should().Be(SocketError.TimedOut); - socketException.InnerException.Should().BeNull(); - } + var mongoConnectionException = exception.Should().BeOfType<MongoConnectionException>().Subject; + mongoConnectionException.ContainsTimeoutException.Should().BeTrue(); + var baseException = GetBaseException(mongoConnectionException); + baseException.Should().BeOfType<TimeoutException>().Which.InnerException.Should().BeNull(); } Exception GetBaseException(MongoConnectionException mongoConnectionException) diff --git a/tests/MongoDB.Driver.Tests/Jira/CSharp3225Tests.cs b/tests/MongoDB.Driver.Tests/Jira/CSharp3225Tests.cs index 7241343353b..fcf66974e75 100644 --- a/tests/MongoDB.Driver.Tests/Jira/CSharp3225Tests.cs +++ b/tests/MongoDB.Driver.Tests/Jira/CSharp3225Tests.cs @@ -24,6 +24,7 @@ namespace MongoDB.Driver.Tests.Jira { + [Trait("Category", "Integration")] public class CSharp3225Tests { // these examples are taken from: https://www.mongodb.com/docs/manual/reference/operator/aggregation/setWindowFields/#examples diff --git a/tests/MongoDB.Driver.Tests/Jira/CSharp3397Tests.cs b/tests/MongoDB.Driver.Tests/Jira/CSharp3397Tests.cs index 49a24a71359..a7212d38c57 100644 --- a/tests/MongoDB.Driver.Tests/Jira/CSharp3397Tests.cs +++ b/tests/MongoDB.Driver.Tests/Jira/CSharp3397Tests.cs @@ -14,7 +14,6 @@ */ using System; -using System.Linq; using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.Serialization.Attributes; @@ -24,6 +23,7 @@ namespace MongoDB.Driver.Tests.Jira { + [Trait("Category", "Integration")] public class CSharp3397Tests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/Jira/CSharp4040Tests.cs b/tests/MongoDB.Driver.Tests/Jira/CSharp4040Tests.cs new file mode 100644 index 00000000000..7858491fd0a --- /dev/null +++ b/tests/MongoDB.Driver.Tests/Jira/CSharp4040Tests.cs @@ -0,0 +1,47 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using FluentAssertions; +using MongoDB.Bson; +using MongoDB.Bson.Serialization.Attributes; +using Xunit; + +namespace MongoDB.Driver.Tests.Jira +{ + public class CSharp4040Tests + { + private class BaseDocument + { + [BsonId] public ObjectId Id { get; set; } = ObjectId.GenerateNewId(); + + [BsonElement("_t")] + public string Field1 { get; set; } + } + + private class DerivedDocument : BaseDocument {} + + [Fact] + public void BsonClassMapSerializer_serialization_when_using_field_with_same_element_name_as_discriminator_should_throw() + { + var obj = new DerivedDocument { Field1 = "field1" }; + + var recordedException = Record.Exception(() => obj.ToJson(typeof(BaseDocument))); + recordedException.Should().NotBeNull(); + recordedException.Should().BeOfType<BsonSerializationException>(); + recordedException.Message.Should().Be("The discriminator element name cannot be _t because it is already being used" + + " by the property Field1 of type MongoDB.Driver.Tests.Jira.CSharp4040Tests+DerivedDocument"); + } + } +} diff --git a/tests/MongoDB.Driver.Tests/Jira/CSharp4646Tests.cs b/tests/MongoDB.Driver.Tests/Jira/CSharp4646Tests.cs index 7cbb6022ef4..6000c7da53d 100644 --- a/tests/MongoDB.Driver.Tests/Jira/CSharp4646Tests.cs +++ b/tests/MongoDB.Driver.Tests/Jira/CSharp4646Tests.cs @@ -72,7 +72,7 @@ public void Watch_database_filtering_on_collection_name() // some older versions of the server require the database to exist before you can watch it CreateDatabase(database); - + using var changeStream = database.Watch(pipeline); } diff --git a/tests/MongoDB.Driver.Tests/Jira/CSharp5697Tests.cs b/tests/MongoDB.Driver.Tests/Jira/CSharp5697Tests.cs new file mode 100644 index 00000000000..4278485b93c --- /dev/null +++ b/tests/MongoDB.Driver.Tests/Jira/CSharp5697Tests.cs @@ -0,0 +1,106 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using FluentAssertions; +using MongoDB.Bson.Serialization; +using MongoDB.Driver.Core.Misc; +using MongoDB.Driver.TestHelpers; +using MongoDB.TestHelpers.XunitExtensions; +using Xunit; + +namespace MongoDB.Driver.Tests.Jira; + +public class CSharp5697Tests : IntegrationTest<CSharp5697Tests.ClassFixture> +{ + public CSharp5697Tests(ClassFixture fixture) + : base(fixture, server => server.Supports(Feature.ClientBulkWrite)) + { + } + + [Theory] + [ParameterAttributeData] + public async Task ClientBulkWrite_supports_complex_id([Values(true, false)] bool async) + { + var id = async ? "1" : "2"; + var options = new ClientBulkWriteOptions { VerboseResult = true }; + BulkWriteModel[] models = + [ + new BulkWriteInsertOneModel<Document>( + Fixture.Collection.CollectionNamespace, + new Document(new DocumentId(id))) + ]; + + var result = async ? + await Fixture.Client.BulkWriteAsync(models, options) : + Fixture.Client.BulkWrite(models, options); + + result.InsertResults[0].DocumentId.Should().BeOfType<DocumentId>() + .Subject.Key.Should().Be(id); + } + + public class Document + { + public Document(DocumentId id) + { + Id = id; + } + + public DocumentId Id { get; } + } + + public class DocumentId + { + public DocumentId(string key) + { + Key = key; + } + + public string Key { get; } + } + + public sealed class DocumentIdSerializer : IBsonSerializer<DocumentId> + { + public Type ValueType => typeof(DocumentId); + + public DocumentId Deserialize(BsonDeserializationContext context, BsonDeserializationArgs args) + => new DocumentId(context.Reader.ReadString()); + + public void Serialize(BsonSerializationContext context, BsonSerializationArgs args, DocumentId value) + => context.Writer.WriteString(value.Key); + + public void Serialize(BsonSerializationContext context, BsonSerializationArgs args, object value) + { + var id = (DocumentId)value; + Serialize(context, args, id); + } + + object IBsonSerializer.Deserialize(BsonDeserializationContext context, BsonDeserializationArgs args) + => Deserialize(context, args); + } + + public class ClassFixture : MongoCollectionFixture<Document> + { + public ClassFixture() + { + BsonSerializer.RegisterSerializer( new DocumentIdSerializer()); + } + + protected override IEnumerable<Document> InitialData => null; + } +} + diff --git a/tests/MongoDB.Driver.Tests/JsonDrivenTests/JsonDrivenConfigureFailPointTest.cs b/tests/MongoDB.Driver.Tests/JsonDrivenTests/JsonDrivenConfigureFailPointTest.cs index b3720607b1c..cda1386f787 100644 --- a/tests/MongoDB.Driver.Tests/JsonDrivenTests/JsonDrivenConfigureFailPointTest.cs +++ b/tests/MongoDB.Driver.Tests/JsonDrivenTests/JsonDrivenConfigureFailPointTest.cs @@ -46,24 +46,14 @@ protected override async Task CallMethodAsync(CancellationToken cancellationToke protected virtual IServer GetFailPointServer() { - if (TestRunner.FailPointServer != null) - { - return TestRunner.FailPointServer; - } - var cluster = TestRunner.FailPointCluster; - return cluster.SelectServer(WritableServerSelector.Instance, CancellationToken.None); + return cluster.SelectServer(OperationContext.NoTimeout, WritableServerSelector.Instance); } protected async virtual Task<IServer> GetFailPointServerAsync() { - if (TestRunner.FailPointServer != null) - { - return TestRunner.FailPointServer; - } - var cluster = TestRunner.FailPointCluster; - return await cluster.SelectServerAsync(WritableServerSelector.Instance, CancellationToken.None).ConfigureAwait(false); + return await cluster.SelectServerAsync(OperationContext.NoTimeout, WritableServerSelector.Instance).ConfigureAwait(false); } protected override void SetArgument(string name, BsonValue value) diff --git a/tests/MongoDB.Driver.Tests/JsonDrivenTests/JsonDrivenTargetedFailPointTest.cs b/tests/MongoDB.Driver.Tests/JsonDrivenTests/JsonDrivenTargetedFailPointTest.cs index 44f2e342395..8a34e83638d 100644 --- a/tests/MongoDB.Driver.Tests/JsonDrivenTests/JsonDrivenTargetedFailPointTest.cs +++ b/tests/MongoDB.Driver.Tests/JsonDrivenTests/JsonDrivenTargetedFailPointTest.cs @@ -15,7 +15,6 @@ using System.Collections.Generic; using System.Net; -using System.Threading; using System.Threading.Tasks; using FluentAssertions; using MongoDB.Driver.Core.Clusters.ServerSelectors; @@ -34,17 +33,17 @@ protected override IServer GetFailPointServer() { var pinnedServerEndpoint = GetPinnedServerEndpointAndAssertNotNull(); var pinnedServerSelector = CreateServerSelector(pinnedServerEndpoint); - return TestRunner.FailPointCluster.SelectServer(pinnedServerSelector, CancellationToken.None); + return TestRunner.FailPointCluster.SelectServer(OperationContext.NoTimeout, pinnedServerSelector); } protected async override Task<IServer> GetFailPointServerAsync() { var pinnedServerEndpoint = GetPinnedServerEndpointAndAssertNotNull(); var pinnedServerSelector = CreateServerSelector(pinnedServerEndpoint); - return await TestRunner.FailPointCluster.SelectServerAsync(pinnedServerSelector, CancellationToken.None).ConfigureAwait(false); + return await TestRunner.FailPointCluster.SelectServerAsync(OperationContext.NoTimeout, pinnedServerSelector).ConfigureAwait(false); } - // private methods + // private methods private IServerSelector CreateServerSelector(EndPoint endpoint) { return new CompositeServerSelector(new IServerSelector[] diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1326Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1326Tests.cs index f641013292b..4d30844b780 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1326Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1326Tests.cs @@ -18,16 +18,22 @@ using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.Serialization.Attributes; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp1326Tests : Linq3IntegrationTest + public class CSharp1326Tests : LinqIntegrationTest<CSharp1326Tests.ClassFixture> { + public CSharp1326Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Projection_of_ArrayOfDocuments_dictionary_keys_and_values_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var parentIds = new int[] { 1, 2, 3 }; var childrenFilter = Builders<Child>.Filter.In(c => c.ParentId, parentIds) & @@ -52,21 +58,6 @@ public void Projection_of_ArrayOfDocuments_dictionary_keys_and_values_should_wor results[1].Value.Select(x => x.Id).Should().BeEquivalentTo(4); } - private IMongoCollection<Child> CreateCollection() - { - var collection = GetCollection<Child>("Children"); - - CreateCollection( - collection, - new Child { Id = 1, ParentId = 1, Gender = Gender.Male }, - new Child { Id = 2, ParentId = 1, Gender = Gender.Male }, - new Child { Id = 3, ParentId = 1, Gender = Gender.Female }, - new Child { Id = 4, ParentId = 2, Gender = Gender.Male }, - new Child { Id = 5, ParentId = 4, Gender = Gender.Male }); - - return collection; - } - public class Parent { public int Id { get; set; } @@ -83,5 +74,17 @@ public class Child } public enum Gender { Male, Female }; + + public sealed class ClassFixture : MongoCollectionFixture<Child> + { + protected override IEnumerable<Child> InitialData => + [ + new Child { Id = 1, ParentId = 1, Gender = Gender.Male }, + new Child { Id = 2, ParentId = 1, Gender = Gender.Male }, + new Child { Id = 3, ParentId = 1, Gender = Gender.Female }, + new Child { Id = 4, ParentId = 2, Gender = Gender.Male }, + new Child { Id = 5, ParentId = 4, Gender = Gender.Male } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1486Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1486Tests.cs index efeb0603e23..ba4e874cab2 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1486Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1486Tests.cs @@ -19,6 +19,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { + [Trait("Category", "Integration")] public class CSharp1486Tests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1555Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1555Tests.cs index 6260eec71d5..ad0cb8fdc47 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1555Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1555Tests.cs @@ -13,19 +13,26 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Bson.Serialization.Attributes; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp1555Tests : Linq3IntegrationTest + public class CSharp1555Tests : LinqIntegrationTest<CSharp1555Tests.ClassFixture> { + public CSharp1555Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Queryable_should_work() { - var collection = CreatePeopleCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable(); var stages = Translate(collection, queryable); @@ -38,7 +45,7 @@ public void Queryable_should_work() [Fact] public void Select_new_Person_should_work() { - var collection = CreatePeopleCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(p => new Person { Id = p.Id, Name = p.Name }); @@ -52,7 +59,7 @@ public void Select_new_Person_should_work() [Fact] public void Select_new_Person_without_Name_should_work() { - var collection = CreatePeopleCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(p => new Person { Id = p.Id }); @@ -66,7 +73,7 @@ public void Select_new_Person_without_Name_should_work() [Fact] public void Select_new_Person_without_Id_should_work() { - var collection = CreatePeopleCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(p => new Person { Name = p.Name }); @@ -77,25 +84,20 @@ public void Select_new_Person_without_Id_should_work() result.ShouldBeEquivalentTo(new Person { Id = 0, Name = "A" }); } - private IMongoCollection<Person> CreatePeopleCollection() - { - var collection = GetCollection<Person>(); - - var documents = new[] - { - new Person { Id = 1, Name = "A" } - }; - CreateCollection(collection, documents); - - return collection; - } - - private class Person + public class Person { [BsonIgnoreIfNull] public int Id { get; set; } [BsonIgnoreIfNull] public string Name { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<Person> + { + protected override IEnumerable<Person> InitialData => + [ + new Person { Id = 1, Name = "A" } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1754Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1754Tests.cs index 363079b1652..26a8c6c0c02 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1754Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1754Tests.cs @@ -13,19 +13,25 @@ * limitations under the License. */ -using System; +using System.Collections.Generic; using System.Linq; using FluentAssertions; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp1754Tests : Linq3IntegrationTest + public class CSharp1754Tests : LinqIntegrationTest<CSharp1754Tests.ClassFixture> { + public CSharp1754Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Test() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var requiredMeta = new[] { "a", "b" }; var queryable = collection.AsQueryable() @@ -38,20 +44,6 @@ public void Test() results.Select(r => r.Id).Should().Equal(2); } - private IMongoCollection<C> CreateCollection() - { - var collection = GetCollection<C>(); - - var documents = new[] - { - new C { Id = 1, Occurrences = new[] { new Occurrence { Meta = new[] { "a" } } } }, - new C { Id = 2, Occurrences = new[] { new Occurrence { Meta = new[] { "a" } }, new Occurrence { Meta = new[] { "a", "b" } } } } - }; - CreateCollection(collection, documents); - - return collection; - } - public class C { public int Id { get; set; } @@ -62,5 +54,14 @@ public class Occurrence { public string[] Meta { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ + new C { Id = 1, Occurrences = new[] { new Occurrence { Meta = new[] { "a" } } } }, + new C { Id = 2, Occurrences = new[] { new Occurrence { Meta = new[] { "a" } }, new Occurrence { Meta = new[] { "a", "b" } } } } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1771Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1771Tests.cs index 4136ece2276..f7accf57861 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1771Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1771Tests.cs @@ -18,6 +18,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { + [Trait("Category", "Integration")] public class CSharp1771Tests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1906Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1906Tests.cs index dba72f7fb13..534cca49414 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1906Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp1906Tests.cs @@ -13,21 +13,27 @@ * limitations under the License. */ -using System; +using System.Collections.Generic; using System.Linq; using System.Text.RegularExpressions; using FluentAssertions; using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp1906Tests : Linq3IntegrationTest + public class CSharp1906Tests : LinqIntegrationTest<CSharp1906Tests.ClassFixture> { + public CSharp1906Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Using_ToLower_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var lowerCaseValues = new[] { "abc", "def" }; // ensure all are lower case at compile time var queryable = collection.AsQueryable() .Where(c => lowerCaseValues.Contains(c.S.ToLower())); @@ -42,7 +48,7 @@ public void Using_ToLower_should_work() [Fact] public void Using_regular_expression_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var regularExpresssion = new StringOrRegularExpression[] { new Regex("ABC", RegexOptions.IgnoreCase), new Regex("DEF", RegexOptions.IgnoreCase) }; var queryable = collection.AsQueryable() .Where(c => c.S.StringIn(regularExpresssion)); @@ -54,25 +60,20 @@ public void Using_regular_expression_should_work() results.Select(x => x.Id).Should().Equal(1, 2); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>(); + public int Id { get; set; } + public string S { get; set; } + } - var documents = new[] - { + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ new C { Id = 1, S = "aBc" }, new C { Id = 2, S = "dEf" }, new C { Id = 3, S = "gHi" } - }; - CreateCollection(collection, documents); - - return collection; - } - - public class C - { - public int Id { get; set; } - public string S { get; set; } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2003Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2003Tests.cs index 075436de3b1..c68696c8285 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2003Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2003Tests.cs @@ -14,18 +14,25 @@ */ using System; +using System.Collections.Generic; using System.Linq; using FluentAssertions; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp2003Tests : Linq3IntegrationTest + public class CSharp2003Tests : LinqIntegrationTest<CSharp2003Tests.ClassFixture> { + public CSharp2003Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Find_BitsAllClear_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var mask = E.E2 | E.E4; var find = collection.Find(x => (x.E & mask) == 0); @@ -39,7 +46,7 @@ public void Find_BitsAllClear_should_work() [Fact] public void Find_BitsAllSet_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var mask = E.E2 | E.E4; var find = collection.Find(x => (x.E & mask) == mask); @@ -53,7 +60,7 @@ public void Find_BitsAllSet_should_work() [Fact] public void Find_BitsAnyClear_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var mask = E.E2 | E.E4; var find = collection.Find(x => (x.E & mask) != mask); @@ -67,7 +74,7 @@ public void Find_BitsAnyClear_should_work() [Fact] public void Find_BitsAnySet_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var mask = E.E2 | E.E4; var find = collection.Find(x => (x.E & mask) != 0); @@ -81,7 +88,7 @@ public void Find_BitsAnySet_should_work() [Fact] public void Where_BitsAllClear_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var mask = E.E2 | E.E4; var queryable = collection.AsQueryable().Where(x => (x.E & mask) == 0); @@ -95,7 +102,7 @@ public void Where_BitsAllClear_should_work() [Fact] public void Where_BitsAllSet_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var mask = E.E2 | E.E4; var queryable = collection.AsQueryable().Where(x => (x.E & mask) == mask); @@ -109,7 +116,7 @@ public void Where_BitsAllSet_should_work() [Fact] public void Where_BitsAnyClear_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var mask = E.E2 | E.E4; var queryable = collection.AsQueryable().Where(x => (x.E & mask) != mask); @@ -123,7 +130,7 @@ public void Where_BitsAnyClear_should_work() [Fact] public void Where_BitsAnySet_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var mask = E.E2 | E.E4; var queryable = collection.AsQueryable().Where(x => (x.E & mask) != 0); @@ -134,25 +141,10 @@ public void Where_BitsAnySet_should_work() results.Select(x => x.Id).Should().Equal(2, 4, 6); } - private IMongoCollection<C> CreateCollection() - { - var collection = GetCollection<C>(); - - var documents = new[] - { - new C { Id = 1, E = E.E1 }, - new C { Id = 2, E = E.E2 }, - new C { Id = 4, E = E.E4 }, - new C { Id = 6, E = E.E2 | E.E4 }, - new C { Id = 8, E = E.E8 } - }; - CreateCollection(collection, documents); - - return collection; - } - [Flags] - private enum E +#pragma warning disable CA1714 + public enum E +#pragma warning restore CA1714 { E1 = 1, E2 = 2, @@ -160,10 +152,22 @@ private enum E E8 = 8 } - private class C + public class C { public int Id { get; set; } public E E; } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ + new C { Id = 1, E = E.E1 }, + new C { Id = 2, E = E.E2 }, + new C { Id = 4, E = E.E4 }, + new C { Id = 6, E = E.E2 | E.E4 }, + new C { Id = 8, E = E.E8 } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2071Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2071Tests.cs index 9f2f9bbbb4c..e446c2b1e70 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2071Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2071Tests.cs @@ -18,6 +18,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { + [Trait("Category", "Integration")] public class CSharp2071Tests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2107Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2107Tests.cs index 9c5b2180dbf..98f330ff623 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2107Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2107Tests.cs @@ -13,21 +13,29 @@ * limitations under the License. */ +using System; using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.Serialization.Attributes; +using MongoDB.Driver.Core.TestHelpers.XunitExtensions; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp2107Tests : Linq3IntegrationTest + public class CSharp2107Tests : LinqIntegrationTest<CSharp2107Tests.ClassFixture> { + public CSharp2107Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Aggregate_Project_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .Project(doc => new @@ -46,7 +54,7 @@ public void Aggregate_Project_should_work() [Fact] public void Queryable_Select_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(doc => new @@ -62,49 +70,44 @@ public void Queryable_Select_should_work() results[0].UserIsCustomer.Select(u => u.UserId).Should().Equal(1); } - private IMongoCollection<Customer> CreateCollection() - { - var collection = GetCollection<Customer>(); - - var documents = new[] - { - new Customer - { - Id = 1, - Users = new[] - { - new User { UserId = 1, Identity = new Identity { IdentityType = IdentityType.Type1 } }, - new User { UserId = 2, Identity = new Identity { IdentityType = IdentityType.Type2 } } - } - } - }; - CreateCollection(collection, documents); - - return collection; - } - - private class Customer + public class Customer { public int Id { get; set; } public IEnumerable<User> Users { get; set; } } - private class User + public class User { public int UserId { get; set; } public Identity Identity { get; set; } } - private class Identity + public class Identity { [BsonRepresentation(BsonType.String)] public IdentityType IdentityType { get; set; } } - private enum IdentityType + public enum IdentityType { Type1, Type2 } + + public sealed class ClassFixture : MongoCollectionFixture<Customer> + { + protected override IEnumerable<Customer> InitialData => + [ + new Customer + { + Id = 1, + Users = new[] + { + new User { UserId = 1, Identity = new Identity { IdentityType = IdentityType.Type1 } }, + new User { UserId = 2, Identity = new Identity { IdentityType = IdentityType.Type2 } } + } + } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2108Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2108Tests.cs index 03a98dc8e29..2b8ee28de3b 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2108Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2108Tests.cs @@ -14,23 +14,29 @@ */ using System; +using System.Collections.Generic; using System.Globalization; using System.Linq; using FluentAssertions; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp2108Tests : Linq3IntegrationTest + public class CSharp2108Tests : LinqIntegrationTest<CSharp2108Tests.ClassFixture> { + public CSharp2108Tests(ClassFixture fixture) + : base(fixture, server => server.Supports(Feature.DateOperatorsNewIn50)) + { + } + [Fact] public void Aggregate_Project_should_work() { - RequireServer.Check().Supports(Feature.DateOperatorsNewIn50); - var collection = CreateCollection(); + var collection = Fixture.Collection; var endDate = DateTime.Parse("2020-01-03Z", null, DateTimeStyles.AdjustToUniversal); var queryable = collection.Aggregate() @@ -52,8 +58,7 @@ public void Aggregate_Project_should_work() [Fact] public void Queryable_Select_should_work() { - RequireServer.Check().Supports(Feature.DateOperatorsNewIn50); - var collection = CreateCollection(); + var collection = Fixture.Collection; var endDate = DateTime.Parse("2020-01-03Z", null, DateTimeStyles.AdjustToUniversal); var queryable = collection.AsQueryable() @@ -72,24 +77,19 @@ public void Queryable_Select_should_work() results[1].ShouldBeEquivalentTo(new { Days = 1 }); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>(); - - var documents = new[] - { - new C { Id = 1, StartDate = DateTime.Parse("2020-01-01Z", null, DateTimeStyles.AdjustToUniversal) }, - new C { Id = 2, StartDate = DateTime.Parse("2020-01-02Z", null, DateTimeStyles.AdjustToUniversal) } - }; - CreateCollection(collection, documents); - - return collection; + public int Id { get; set; } + public DateTime StartDate { get; set; } } - private class C + public sealed class ClassFixture : MongoCollectionFixture<C> { - public int Id { get; set; } - public DateTime StartDate { get; set; } + protected override IEnumerable<C> InitialData => + [ + new C { Id = 1, StartDate = DateTime.Parse("2020-01-01Z", null, DateTimeStyles.AdjustToUniversal) }, + new C { Id = 2, StartDate = DateTime.Parse("2020-01-02Z", null, DateTimeStyles.AdjustToUniversal) } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2195Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2195Tests.cs index e8d5f764d92..03163607bc0 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2195Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2195Tests.cs @@ -13,21 +13,27 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.Serialization; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp2195Tests : Linq3IntegrationTest + public class CSharp2195Tests : LinqIntegrationTest<CSharp2195Tests.ClassFixture> { + public CSharp2195Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Filter_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var builder = Builders<RawBsonDocument>.Filter; var filter = builder.Eq(x => x["life"], 42); @@ -43,7 +49,7 @@ public void Filter_should_work() [Fact] public void Where_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -58,16 +64,13 @@ public void Where_should_work() results.Select(x => x["_id"].AsInt32).Should().Equal(2); } - private IMongoCollection<RawBsonDocument> CreateCollection() + public sealed class ClassFixture : MongoCollectionFixture<RawBsonDocument, BsonDocument> { - var collection = GetCollection<BsonDocument>(); - - CreateCollection( - collection, + protected override IEnumerable<BsonDocument> InitialData => + [ new BsonDocument { { "_id", 1 }, { "life", 41 } }, - new BsonDocument { { "_id", 2 }, { "life", 42 } }); - - return GetCollection<RawBsonDocument>(); + new BsonDocument { { "_id", 2 }, { "life", 42 } } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2308Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2308Tests.cs index 2fe0d21cf63..071d8a6a6c9 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2308Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2308Tests.cs @@ -17,41 +17,33 @@ using System.Linq; using FluentAssertions; using MongoDB.Bson; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp2308Tests + [Trait("Category", "Integration")] + public class CSharp2308Tests : LinqIntegrationTest<CSharp2308Tests.ClassFixture> { + public CSharp2308Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Nested_Select_should_work() { - var client = DriverTestConfiguration.Client; - var database = client.GetDatabase("FooBar"); - var collection = database.GetCollection<FooNest>("Foos"); - - database.DropCollection("Foos"); - - collection.InsertOne( - new FooNest - { - Name = "Parent", - NestedCollection = new[] { - new FooNest { - Name = "Child" - } - } - }); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(top => top.NestedCollection.Select(child => new { ParentName = top.Name, child.Name })); - var stages = Linq3TestHelpers.Translate(collection, queryable); + var stages = Translate(collection, queryable); var expectedStages = new[] { "{ $project : { _v : { $map : { input : '$NestedCollection', as : 'child', in : { ParentName : '$Name', Name : '$$child.Name' } } }, _id : 0 } }" }; - Linq3TestHelpers.AssertStages(stages, expectedStages); + AssertStages(stages, expectedStages); var pipelineDefinition = new BsonDocumentStagePipelineDefinition<FooNest, BsonDocument>(stages); var resultAsDocument = collection.Aggregate(pipelineDefinition).ToList().Single(); @@ -68,5 +60,21 @@ public class FooNest public string Name; public IEnumerable<FooNest> NestedCollection; } + + public sealed class ClassFixture : MongoCollectionFixture<FooNest> + { + protected override IEnumerable<FooNest> InitialData => + [ + new FooNest + { + Name = "Parent", + NestedCollection = new[] { + new FooNest { + Name = "Child" + } + } + } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2348Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2348Tests.cs index fb61ec89f85..9f3c0b2b1c7 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2348Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2348Tests.cs @@ -13,20 +13,25 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; -using MongoDB.Driver.Tests.Linq.Linq3Implementation; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3ImplementationTests.Jira { - public class CSharp2348Tests : Linq3IntegrationTest + public class CSharp2348Tests : LinqIntegrationTest<CSharp2348Tests.ClassFixture> { + public CSharp2348Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Any_with_equals_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var find = collection.Find(x => x.A.Any(v => v == 2)); @@ -40,7 +45,7 @@ public void Any_with_equals_should_work() [Fact] public void Any_with_or_of_equals_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var find = collection.Find(x => x.A.Any(v => v == 2 || v == 3)); @@ -54,7 +59,7 @@ public void Any_with_or_of_equals_should_work() [Fact] public void Any_with_or_of_equals_and_greater_than_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var find = collection.Find(x => x.A.Any(v => v == 2 || v > 3)); @@ -68,18 +73,6 @@ public void Any_with_or_of_equals_and_greater_than_should_work() results.Select(x => x.Id).Should().Equal(2, 4); } - private IMongoCollection<User> CreateCollection() - { - var collection = GetCollection<User>("test"); - CreateCollection( - collection, - new User { Id = 1, A = new[] { 1 } }, - new User { Id = 2, A = new[] { 1, 2 } }, - new User { Id = 3, A = new[] { 1, 3 } }, - new User { Id = 4, A = new[] { 1, 4 } }); - return collection; - } - public class User { public int Id { get; set; } @@ -91,5 +84,16 @@ public enum Role Admin = 1, Editor = 2 } + + public sealed class ClassFixture : MongoCollectionFixture<User> + { + protected override IEnumerable<User> InitialData => + [ + new User { Id = 1, A = new[] { 1 } }, + new User { Id = 2, A = new[] { 1, 2 } }, + new User { Id = 3, A = new[] { 1, 3 } }, + new User { Id = 4, A = new[] { 1, 4 } } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2422Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2422Tests.cs index 0f6b5f62fb9..034b955b29a 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2422Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2422Tests.cs @@ -21,6 +21,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { + [Trait("Category", "Integration")] public class CSharp2422Tests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2472Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2472Tests.cs index 1ae5fc6f93b..035bba42f7e 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2472Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2472Tests.cs @@ -14,22 +14,26 @@ */ using System; +using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.TestHelpers.XunitExtensions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp2472Tests : Linq3IntegrationTest + public class CSharp2472Tests : LinqIntegrationTest<CSharp2472Tests.ClassFixture> { + public CSharp2472Tests(ClassFixture fixture) + : base(fixture, server => server.Supports(Feature.ToConversionOperators)) + { + } + [Fact] public void Numeric_casts_should_work() { - RequireServer.Check().Supports(Feature.ToConversionOperators); - var collection = CreateCollection(); + var collection = Fixture.Collection; var equipmentId = 1; var startDate = new DateTime(2022, 01, 01, 0, 0, 0, DateTimeKind.Utc); var endDate = new DateTime(2022, 01, 02, 0, 0, 0, DateTimeKind.Utc); @@ -64,18 +68,7 @@ public void Numeric_casts_should_work() result.sqrt_calc.Should().Be(2M); } - private IMongoCollection<C> CreateCollection() - { - var collection = GetCollection<C>("C"); - - CreateCollection( - collection, - new C { Id = 1, equipment_id = 1, timestamp = new DateTime(2022, 01, 01, 12, 0, 0, DateTimeKind.Utc), my_decimal_value = 4M }); - - return collection; - } - - private class C + public class C { public int Id { get; set; } public int equipment_id { get; set; } @@ -88,5 +81,13 @@ private class MyDTO public DateTime timestamp { get; set; } public decimal sqrt_calc { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ + new C { Id = 1, equipment_id = 1, timestamp = new DateTime(2022, 01, 01, 12, 0, 0, DateTimeKind.Utc), my_decimal_value = 4M } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2509Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2509Tests.cs index 720218b4f83..b64e62ed650 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2509Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2509Tests.cs @@ -13,22 +13,28 @@ * limitations under the License. */ +using System; using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Bson.Serialization.Attributes; using MongoDB.Bson.Serialization.Options; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp2509Tests : Linq3IntegrationTest + public class CSharp2509Tests : LinqIntegrationTest<CSharp2509Tests.ClassFixture> { + public CSharp2509Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Where_ContainsValue_should_work_when_representation_is_Dictionary() { - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Where(x => x.D1.ContainsValue(1)); @@ -43,7 +49,7 @@ public void Where_ContainsValue_should_work_when_representation_is_Dictionary() [Fact] public void Where_ContainsValue_should_work_when_representation_is_ArrayOfArrays() { - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Where(x => x.D2.ContainsValue(1)); @@ -58,7 +64,7 @@ public void Where_ContainsValue_should_work_when_representation_is_ArrayOfArrays [Fact] public void Where_ContainsValue_should_work_when_representation_is_ArrayOfDocuments() { - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Where(x => x.D3.ContainsValue(1)); @@ -73,7 +79,7 @@ public void Where_ContainsValue_should_work_when_representation_is_ArrayOfDocume [Fact] public void Select_ContainsValue_should_work_when_representation_is_Dictionary() { - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(x => x.D1.ContainsValue(1)); @@ -88,7 +94,7 @@ public void Select_ContainsValue_should_work_when_representation_is_Dictionary() [Fact] public void Select_ContainsValue_should_work_when_representation_is_ArrayOfArrays() { - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(x => x.D2.ContainsValue(1)); @@ -103,7 +109,7 @@ public void Select_ContainsValue_should_work_when_representation_is_ArrayOfArray [Fact] public void Select_ContainsValue_should_work_when_representation_is_ArrayOfDocuments() { - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(x => x.D3.ContainsValue(1)); @@ -115,11 +121,21 @@ public void Select_ContainsValue_should_work_when_representation_is_ArrayOfDocum results.Should().Equal(true, true, false); } - private IMongoCollection<User> GetCollection() + public class User { - var collection = GetCollection<User>("test"); - CreateCollection( - collection, + public int Id { get; set; } + [BsonDictionaryOptions(DictionaryRepresentation.Document)] + public Dictionary<string, int> D1 { get; set; } + [BsonDictionaryOptions(DictionaryRepresentation.ArrayOfArrays)] + public Dictionary<string, int> D2 { get; set; } + [BsonDictionaryOptions(DictionaryRepresentation.ArrayOfDocuments)] + public Dictionary<string, int> D3 { get; set; } + } + + public sealed class ClassFixture : MongoCollectionFixture<User> + { + protected override IEnumerable<User> InitialData => + [ new User { Id = 1, @@ -140,19 +156,8 @@ private IMongoCollection<User> GetCollection() D1 = new() { { "A", 2 }, { "B", 3 } }, D2 = new() { { "A", 2 }, { "B", 3 } }, D3 = new() { { "A", 2 }, { "B", 3 } } - }); - return collection; - } - - private class User - { - public int Id { get; set; } - [BsonDictionaryOptions(DictionaryRepresentation.Document)] - public Dictionary<string, int> D1 { get; set; } - [BsonDictionaryOptions(DictionaryRepresentation.ArrayOfArrays)] - public Dictionary<string, int> D2 { get; set; } - [BsonDictionaryOptions(DictionaryRepresentation.ArrayOfDocuments)] - public Dictionary<string, int> D3 { get; set; } + } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2708Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2708Tests.cs index 74e078c217d..304de253976 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2708Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2708Tests.cs @@ -14,11 +14,11 @@ */ using System.Linq; -using MongoDB.Driver.Linq; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { + [Trait("Category", "Integration")] public class CSharp2708Tests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2723Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2723Tests.cs index ad64d2efd43..63065869144 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2723Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2723Tests.cs @@ -17,11 +17,11 @@ using System.Linq; using FluentAssertions; using MongoDB.Bson; -using MongoDB.Driver.Linq; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { + [Trait("Category", "Integration")] public class CSharp2723Tests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2727Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2727Tests.cs index db6254700f8..160cfc8fa85 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2727Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp2727Tests.cs @@ -14,24 +14,30 @@ */ using System; +using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.Serialization; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp2727Tests : Linq3IntegrationTest + public class CSharp2727Tests : LinqIntegrationTest<CSharp2727Tests.ClassFixture> { + public CSharp2727Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Find_with_predicate_on_Body_should_work() { RequireServer.Check().Supports(Feature.AggregateToString); - var collection = CreateCollection(); + var collection = Fixture.Collection; var filter = new ExpressionFilterDefinition<Entity>(x => new[] { "Test1", "Test2" }.Contains((string)x.Body["name"])); var serializerRegistry = BsonSerializer.SerializerRegistry; @@ -49,7 +55,7 @@ public void Find_with_predicate_on_Body_should_work() [Fact] public void Find_with_predicate_on_Caption_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var filter = new ExpressionFilterDefinition<Entity>(x => new[] { "Test1", "Test2" }.Contains(x.Caption)); var serializerRegistry = BsonSerializer.SerializerRegistry; @@ -68,7 +74,7 @@ public void Find_with_predicate_on_Caption_should_work() public void Where_with_predicate_on_Body_should_work() { RequireServer.Check().Supports(Feature.AggregateToString); - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -84,7 +90,7 @@ public void Where_with_predicate_on_Body_should_work() [Fact] public void Where_with_predicate_on_Caption_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -97,24 +103,21 @@ public void Where_with_predicate_on_Caption_should_work() results.Select(x => x.Id).Should().Equal(1, 2); } - private IMongoCollection<Entity> CreateCollection() - { - var collection = GetCollection<Entity>("C"); - - CreateCollection( - collection, - new Entity { Id = 1, Body = BsonDocument.Parse("{ name : 'Test1' }"), Caption = "Test1" }, - new Entity { Id = 2, Body = BsonDocument.Parse("{ name : 'Test2' }"), Caption = "Test2" }, - new Entity { Id = 3, Body = BsonDocument.Parse("{ name : 'Test3' }"), Caption = "Test3" }); - - return collection; - } - - private class Entity + public class Entity { public int Id { get; set; } public BsonDocument Body { get; set; } public string Caption { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<Entity> + { + protected override IEnumerable<Entity> InitialData => + [ + new Entity { Id = 1, Body = BsonDocument.Parse("{ name : 'Test1' }"), Caption = "Test1" }, + new Entity { Id = 2, Body = BsonDocument.Parse("{ name : 'Test2' }"), Caption = "Test2" }, + new Entity { Id = 3, Body = BsonDocument.Parse("{ name : 'Test3' }"), Caption = "Test3" } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3136Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3136Tests.cs index 23a610128b5..1341b2c93f5 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3136Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3136Tests.cs @@ -20,19 +20,24 @@ using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp3136Tests : Linq3IntegrationTest + public class CSharp3136Tests : LinqIntegrationTest<CSharp3136Tests.ClassFixture> { + public CSharp3136Tests(ClassFixture fixture) + : base(fixture) + { + } [Fact] public void DateTime_ToString_with_no_arguments_should_work() { RequireServer.Check().Supports(Feature.ToConversionOperators); - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -52,7 +57,7 @@ public void DateTime_ToString_with_no_arguments_should_work() [Fact] public void DateTime_ToString_with_format_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -76,7 +81,7 @@ public void DateTime_ToString_with_format_should_work() [InlineData("%H:%M:%S", "-04:00", "{ $project : { _v : { $dateToString : { date : '$D', format : '%H:%M:%S', timezone : '-04:00' } }, _id : 0 } }", new[] { "23:04:05", "23:04:05" })] public void DateTime_ToString_with_format_and_timezone_constants_should_work(string format, string timezone, string expectedProjectStage, string[] expectedResults) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -104,7 +109,7 @@ public void DateTime_ToString_with_format_and_timezone_constants_should_work(str [InlineData(true, true, "{ $project : { _v : { $dateToString : { date : '$D', format : '$Format', timezone : '$Timezone' } }, _id : 0 } }", new[] { "23:04:05", "23:04:05" })] public void DateTime_ToString_with_format_and_timezone_expressions_should_work(bool withFormat, bool withTimezone, string expectedProjectStage, string[] expectedResults) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var orderby = collection .AsQueryable() @@ -138,7 +143,7 @@ public void NullableDateTime_ToString_with_no_arguments_should_work() { RequireServer.Check().Supports(Feature.ToConversionOperators); - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -166,7 +171,7 @@ public void NullableDateTime_ToString_with_no_arguments_should_work() [InlineData("%H:%M:%S", "-04:00", "xx", "{ $project : { _v : { $dateToString : { date : '$N', format : '%H:%M:%S', timezone : '-04:00', onNull : 'xx' } }, _id : 0 } }", new[] { "23:04:05", "xx" })] public void NullableDateTime_ToString_with_format_and_timezone_and_onNull_constants_should_work(string format, string timezone, string onNull, string expectedProjectStage, string[] expectedResults) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -198,7 +203,7 @@ public void NullableDateTime_ToString_with_format_and_timezone_and_onNull_consta [InlineData(true, true, true, "{ $project : { _v : { $dateToString : { date : '$N', format : '$Format', timezone : '$Timezone', onNull : '$OnNull' } }, _id : 0 } }", new[] { "23:04:05", "missing" })] public void NullableDateTime_ToString_with_format_and_timezone_and_onNull_expressions_should_work(bool withFormat, bool withTimezone, bool withOnNull, string expectedProjectStage, string[] expectedResults) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var orderby = collection .AsQueryable() @@ -231,18 +236,6 @@ public void NullableDateTime_ToString_with_format_and_timezone_and_onNull_expres results.Should().Equal(expectedResults); } - private IMongoCollection<C> CreateCollection() - { - var collection = GetCollection<C>(); - - CreateCollection( - collection, - new C { Id = 1, D = new DateTime(2021, 1, 2, 3, 4, 5, 123, DateTimeKind.Utc), N = new DateTime(2021, 1, 2, 3, 4, 5, 123, DateTimeKind.Utc), Format = "%H:%M:%S", Timezone = "-04:00", OnNull = "missing" }, - new C { Id = 2, D = new DateTime(2021, 1, 2, 3, 4, 5, 123, DateTimeKind.Utc), N = null, Format = "%H:%M:%S", Timezone = "-04:00", OnNull = "missing" }); - - return collection; - } - private List<string> RemoveTrailingZFromResults(List<string> results) { return results.Select(RemoveTrailingZ).ToList(); @@ -253,7 +246,7 @@ static string RemoveTrailingZ(string value) } } - private class C + public class C { public int Id { get; set; } public DateTime D { get; set; } @@ -263,9 +256,13 @@ private class C public string OnNull { get; set; } } - private class ProductTypeSearchResult + public sealed class ClassFixture : MongoCollectionFixture<C> { - public bool IsExternalUrl { get; set; } + protected override IEnumerable<C> InitialData => + [ + new C { Id = 1, D = new DateTime(2021, 1, 2, 3, 4, 5, 123, DateTimeKind.Utc), N = new DateTime(2021, 1, 2, 3, 4, 5, 123, DateTimeKind.Utc), Format = "%H:%M:%S", Timezone = "-04:00", OnNull = "missing" }, + new C { Id = 2, D = new DateTime(2021, 1, 2, 3, 4, 5, 123, DateTimeKind.Utc), N = null, Format = "%H:%M:%S", Timezone = "-04:00", OnNull = "missing" } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3144Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3144Tests.cs index 2ab60aab58c..e5d1591aed6 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3144Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3144Tests.cs @@ -16,17 +16,22 @@ using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp3144Tests : Linq3IntegrationTest + public class CSharp3144Tests : LinqIntegrationTest<CSharp3144Tests.ClassFixture> { + public CSharp3144Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Where_with_Contains_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -42,7 +47,7 @@ public void Where_with_Contains_should_work() [Fact] public void Suggested_workaround_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -55,12 +60,28 @@ public void Suggested_workaround_should_work() results.Select(r => r.Id).Should().Equal(1); } - private IMongoCollection<Order> CreateCollection() + public class Order { - var collection = GetCollection<Order>(); + public virtual int Id { get; set; } + public virtual List<OrderItem> Items { get; set; } - CreateCollection( - collection, + public Order() + { + Items = new List<OrderItem>(); + } + } + + public class OrderItem + { + public virtual int Id { get; set; } + public virtual int GoodId { get; set; } + public virtual int Amount { get; set; } + } + + public sealed class ClassFixture : MongoCollectionFixture<Order> + { + protected override IEnumerable<Order> InitialData => + [ new Order { Id = 1, @@ -77,34 +98,15 @@ private IMongoCollection<Order> CreateCollection() { Id = 2, Items = new List<OrderItem> - { - new OrderItem { GoodId = 6, Amount = 1 }, - new OrderItem { GoodId = 7, Amount = 10 }, - new OrderItem { GoodId = 8, Amount = 20 }, - new OrderItem { GoodId = 9, Amount = 30 }, - new OrderItem { GoodId = 10, Amount = 40 } - } - }); - - return collection; - } - - class Order - { - public virtual int Id { get; set; } - public virtual List<OrderItem> Items { get; set; } - - public Order() - { - Items = new List<OrderItem>(); - } - } - - class OrderItem - { - public virtual int Id { get; set; } - public virtual int GoodId { get; set; } - public virtual int Amount { get; set; } + { + new OrderItem { GoodId = 6, Amount = 1 }, + new OrderItem { GoodId = 7, Amount = 10 }, + new OrderItem { GoodId = 8, Amount = 20 }, + new OrderItem { GoodId = 9, Amount = 30 }, + new OrderItem { GoodId = 10, Amount = 40 } + } + } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3197Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3197Tests.cs index c999d37b193..34282afc432 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3197Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3197Tests.cs @@ -13,19 +13,25 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp3197Tests : Linq3IntegrationTest + public class CSharp3197Tests : LinqIntegrationTest<CSharp3197Tests.ClassFixture> { + public CSharp3197Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Select_select_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -42,21 +48,18 @@ public void Select_select_should_work() result.Should().Be(new { B = 42 }); } - private IMongoCollection<Person> CreateCollection() + public class Person { - var collection = GetCollection<Person>("C"); - - CreateCollection( - collection, - new Person { Id = 1, Age = 42 }); - - return collection; + public int Id { get; set; } + public int Age { get; set; } } - private class Person + public sealed class ClassFixture : MongoCollectionFixture<Person> { - public int Id { get; set; } - public int Age { get; set; } + protected override IEnumerable<Person> InitialData => + [ + new Person { Id = 1, Age = 42 } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3234Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3234Tests.cs index 2a2ae3a7969..b26bb8b6cc1 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3234Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3234Tests.cs @@ -13,20 +13,25 @@ * limitations under the License. */ -using System; +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp3234Tests : Linq3IntegrationTest + public class CSharp3234Tests : LinqIntegrationTest<CSharp3234Tests.ClassFixture> { + public CSharp3234Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Contains_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var selectedIds = new[] { 1, 2, 3 }; var queryable = collection @@ -43,7 +48,7 @@ public void Contains_should_work() [Fact] public void Contains_equals_false_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var selectedIds = new[] { 1, 2, 3 }; var queryable = collection @@ -60,7 +65,7 @@ public void Contains_equals_false_should_work() [Fact] public void Contains_equals_true_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var selectedIds = new[] { 1, 2, 3 }; var queryable = collection @@ -74,24 +79,21 @@ public void Contains_equals_true_should_work() results.OrderBy(x => x.Id).Select(x => x.Id).Should().Equal(1, 2, 3); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>("C"); + public int Id { get; set; } + } - CreateCollection( - collection, + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ new C { Id = 1 }, new C { Id = 2 }, new C { Id = 3 }, new C { Id = 4 }, - new C { Id = 5 }); - - return collection; - } - - private class C - { - public int Id { get; set; } + new C { Id = 5 } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3236Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3236Tests.cs index 7754bc52c6a..0245a451cd1 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3236Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3236Tests.cs @@ -16,17 +16,22 @@ using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp3236Tests : Linq3IntegrationTest + public class CSharp3236Tests : LinqIntegrationTest<CSharp3236Tests.ClassFixture> { + public CSharp3236Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Select_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -46,12 +51,23 @@ public void Select_should_work() result.Comments.Select(c => c.Id).Should().Equal(1, 3); } - private IMongoCollection<Post> CreateCollection() + public class Post + { + public int Id { get; set; } + public List<Comment> Comments { get; set; } + + } + + public class Comment { - var collection = GetCollection<Post>("C"); + public int Id { get; set; } + public string Text { get; set; } + } - CreateCollection( - collection, + public sealed class ClassFixture : MongoCollectionFixture<Post> + { + protected override IEnumerable<Post> InitialData => + [ new Post { Id = 1, @@ -61,22 +77,8 @@ private IMongoCollection<Post> CreateCollection() new Comment { Id = 2, Text = "this is not" }, new Comment { Id = 3, Text = "and this is another test comment" } } - }); - - return collection; - } - - private class Post - { - public int Id { get; set; } - public List<Comment> Comments { get; set; } - - } - - public class Comment - { - public int Id { get; set; } - public string Text { get; set; } + } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3283Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3283Tests.cs index 0439713d106..cc75b83ee78 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3283Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3283Tests.cs @@ -23,6 +23,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { + [Trait("Category", "Integration")] public class CSharp3283Tests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3425Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3425Tests.cs index 41f427d01a6..64afec1a437 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3425Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3425Tests.cs @@ -18,6 +18,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { + [Trait("Category", "Integration")] public class CSharp3425Tests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3435Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3435Tests.cs new file mode 100644 index 00000000000..33584f1b44e --- /dev/null +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3435Tests.cs @@ -0,0 +1,112 @@ +/* Copyright 2010-present MongoDB Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +using System.Linq; +using MongoDB.Driver.Linq; +using Xunit; + +namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira; + +public class CSharp3435Tests : LinqIntegrationTest<CSharp3435Tests.ClassFixture> +{ + public CSharp3435Tests(ClassFixture fixture) + : base(fixture) + { + } + + [Fact] + public void Where_should_work() + { + var queryable = CreateQueryable() + .Where(x => x.NormalizedUsername == "PAPLABROS"); + + var stages = Translate(Fixture.UserClaimCollection, queryable); + AssertStages( + stages, + "{ $project : { _outer : '$$ROOT', _id : 0 } }", + "{ $lookup : { from : 'Users', localField : '_outer.UserId', foreignField : '_id', as : '_inner' } }", + "{ $project : { claim : '$_outer', users : '$_inner', _id : 0 } }", + "{ $match : { 'claim.ClaimType' : 'Moderator' } }", + "{ $project : { _v : { $arrayElemAt : ['$users', 0] }, _id : 0 } }", + "{ $match : { '_v.NormalizedUsername' : 'PAPLABROS' } }"); + } + + [Fact] + public void Where_with_Inject_should_work() + { + var filter = Builders<User>.Filter.Eq(x => x.NormalizedUsername, "PAPLABROS"); + var queryable = CreateQueryable() + .Where(x => filter.Inject()); + + var stages = Translate(Fixture.UserClaimCollection, queryable); + AssertStages( + stages, + "{ $project : { _outer : '$$ROOT', _id : 0 } }", + "{ $lookup : { from : 'Users', localField : '_outer.UserId', foreignField : '_id', as : '_inner' } }", + "{ $project : { claim : '$_outer', users : '$_inner', _id : 0 } }", + "{ $match : { 'claim.ClaimType' : 'Moderator' } }", + "{ $project : { _v : { $arrayElemAt : ['$users', 0] }, _id : 0 } }", + "{ $match : { '_v.NormalizedUsername' : 'PAPLABROS' } }"); + } + + public IQueryable<User> CreateQueryable() + { + var usersCollection = Fixture.UserCollection; + var userClaimsCollection = Fixture.UserClaimCollection; + + var queryable = + from claim in userClaimsCollection.AsQueryable() + join user in usersCollection.AsQueryable() on claim.UserId equals user.Id into users + where claim.ClaimType == "Moderator" + select users.First(); + + // this is the equivalent method syntax + // var queryable = userClaimsCollection.AsQueryable() + // .GroupJoin( + // usersCollection.AsQueryable(), + // claim => claim.UserId, + // user => user.Id, + // (claim, users) => new { claim, users }) + // .Where(x => x.claim.ClaimType == "Moderator") + // .Select(x => x.users.First()); + + return queryable; + } + + public class User + { + public int Id { get; set; } + public string NormalizedUsername { get; set; } + } + + public class UserClaim + { + public int Id { get; set; } + public int UserId { get; set; } + public string ClaimType { get; set; } + } + + public sealed class ClassFixture : MongoDatabaseFixture + { + public IMongoCollection<User> UserCollection { get; private set; } + public IMongoCollection<UserClaim> UserClaimCollection { get; private set; } + + protected override void InitializeFixture() + { + UserCollection = CreateCollection<User>("Users"); + UserClaimCollection = CreateCollection<UserClaim>("UserClaims"); + } + } +} diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3524Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3524Tests.cs index d5dbc8274ba..55db449fe15 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3524Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3524Tests.cs @@ -24,6 +24,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { + [Trait("Category", "Integration")] public class CSharp3524Tests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3614Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3614Tests.cs index 9e625c18029..9a5fb8a2309 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3614Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3614Tests.cs @@ -14,18 +14,25 @@ */ using System; +using System.Collections.Generic; using System.Linq; using FluentAssertions; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp3614Tests : Linq3IntegrationTest + public class CSharp3614Tests : LinqIntegrationTest<CSharp3614Tests.ClassFixture> { + public CSharp3614Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Test() { - var collection = CreateBooksCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(x => new BookDto @@ -57,20 +64,6 @@ public void Test() results[1].Author.ShouldBeEquivalentTo(new AuthorDto { Id = 2, Name = "Two" }); } - private IMongoCollection<Book> CreateBooksCollection() - { - var collection = GetCollection<Book>(); - - var documents = new[] - { - new Book { Id = 1, PageCount = 1, Author = null }, - new Book { Id = 2, PageCount = 2, Author = new Author { Id = 2, Name = "Two" } } - }; - CreateCollection(collection, documents); - - return collection; - } - private class BookDto { public int Id { get; set; } @@ -84,18 +77,27 @@ private class AuthorDto public string Name { get; set; } } - private class Author : IEquatable<Author> + public class Author : IEquatable<Author> { public int Id { get; set; } public string Name { get; set; } public bool Equals(Author other) => Id == other.Id && Name == other.Name; } - private class Book + public class Book { public int Id { get; set; } public int PageCount { get; set; } public Author Author { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<Book> + { + protected override IEnumerable<Book> InitialData => + [ + new Book { Id = 1, PageCount = 1, Author = null }, + new Book { Id = 2, PageCount = 2, Author = new Author { Id = 2, Name = "Two" } } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3630Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3630Tests.cs index 2b0f1583f1a..de33370eee2 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3630Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3630Tests.cs @@ -18,6 +18,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { + [Trait("Category", "Integration")] public class CSharp3630Tests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3713Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3713Tests.cs index 2673013b576..9d9ec2cf130 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3713Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3713Tests.cs @@ -13,37 +13,36 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp3713Tests + public class CSharp3713Tests : LinqIntegrationTest<CSharp3713Tests.ClassFixture> { + public CSharp3713Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void DefaultIfEmpty_should_work() { - var client = DriverTestConfiguration.Client; - var database = client.GetDatabase(DriverTestConfiguration.DatabaseNamespace.DatabaseName); - var collection = database.GetCollection<C>(DriverTestConfiguration.CollectionNamespace.CollectionName); + var collection = Fixture.Collection; var subject = collection.AsQueryable(); - database.DropCollection(collection.CollectionNamespace.CollectionName); - collection.InsertMany(new[] { - new C { Id = 1, InnerArray = new A[0] }, - new C { Id = 2, InnerArray = new[] { new A { S = "abc" } } } - }); - var queryable = subject.SelectMany(outerObject => outerObject.InnerArray.DefaultIfEmpty(), (o, a) => new { o, a }); - var stages = Linq3TestHelpers.Translate(collection, queryable); + var stages = Translate(collection, queryable); var expectedStages = new[] { "{ $project : { _v : { $map : { input : { $cond : { if : { $eq : [{ $size : '$InnerArray' }, 0] }, then : [null], else : '$InnerArray' } }, as : 'a', in : { o : '$$ROOT', a : '$$a' } } }, _id : 0 } }", "{ $unwind : '$_v' }" }; - Linq3TestHelpers.AssertStages(stages, expectedStages); + AssertStages(stages, expectedStages); var result = queryable.ToList(); result.Count.Should().Be(2); @@ -56,27 +55,19 @@ public void DefaultIfEmpty_should_work() [Fact] public void DefaultIfEmpty_with_explicit_default_should_work() { - var client = DriverTestConfiguration.Client; - var database = client.GetDatabase(DriverTestConfiguration.DatabaseNamespace.DatabaseName); - var collection = database.GetCollection<C>(DriverTestConfiguration.CollectionNamespace.CollectionName); + var collection = Fixture.Collection; var subject = collection.AsQueryable(); - database.DropCollection(collection.CollectionNamespace.CollectionName); - collection.InsertMany(new[] { - new C { Id = 1, InnerArray = new A[0] }, - new C { Id = 2, InnerArray = new[] { new A { S = "abc" } } } - }); - var defaultValue = new A { S = "default" }; var queryable = subject.SelectMany(outerObject => outerObject.InnerArray.DefaultIfEmpty(defaultValue), (o, a) => new { o, a }); - var stages = Linq3TestHelpers.Translate(collection, queryable); + var stages = Translate(collection, queryable); var expectedStages = new[] { "{ $project : { _v : { $map : { input : { $cond : { if : { $eq : [{ $size : '$InnerArray' }, 0] }, then : [{ S : 'default' }], else : '$InnerArray' } }, as : 'a', in : { o : '$$ROOT', a : '$$a' } } }, _id : 0 } }", "{ $unwind : '$_v' }" }; - Linq3TestHelpers.AssertStages(stages, expectedStages); + AssertStages(stages, expectedStages); var result = queryable.ToList(); result.Count.Should().Be(2); @@ -86,15 +77,24 @@ public void DefaultIfEmpty_with_explicit_default_should_work() result[1].a.S.Should().Be("abc"); } - private class C + public class C { public int Id { get; set; } public A[] InnerArray { get; set; } } - private class A + public class A { public string S { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ + new C { Id = 1, InnerArray = new A[0] }, + new C { Id = 2, InnerArray = new[] { new A { S = "abc" } } } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3845Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3845Tests.cs index 3d029d46a7e..cf09d4f45a0 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3845Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3845Tests.cs @@ -13,26 +13,26 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Bson.Serialization.Attributes; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp3845Tests : Linq3IntegrationTest + public class CSharp3845Tests : LinqIntegrationTest<CSharp3845Tests.ClassFixture> { + public CSharp3845Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Select_of_anonymous_class_with_missing_fields_should_work() { - var collection = GetCollection<C>(); - CreateCollection( - collection, - new[] - { - new C { Id = 1, S = null, X = 0 }, - new C { Id = 2, S = "abc", X = 123 } - }); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(c => new { F = c.S, G = c.X }); @@ -52,7 +52,7 @@ public void Select_of_anonymous_class_with_missing_fields_should_work() results[1].G.Should().Be(123); } - private class C + public class C { public int Id { get; set; } [BsonIgnoreIfNull] @@ -60,5 +60,14 @@ private class C [BsonIgnoreIfDefault] public int X { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ + new C { Id = 1, S = null, X = 0 }, + new C { Id = 2, S = "abc", X = 123 } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3865Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3865Tests.cs index 345b07748ff..9fa2efa1ba8 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3865Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3865Tests.cs @@ -14,11 +14,11 @@ */ using System.Linq; -using MongoDB.Driver.Linq; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { + [Trait("Category", "Integration")] public class CSharp3865Tests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3910Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3910Tests.cs index 86923f5ae85..dd3dc5f81ee 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3910Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3910Tests.cs @@ -13,28 +13,24 @@ * limitations under the License. */ +using System.Collections.Generic; using FluentAssertions; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp3910Tests + public class CSharp3910Tests : LinqIntegrationTest<CSharp3910Tests.ClassFixture> { + public CSharp3910Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Filter_expression_needing_partial_evaluation_should_work() { - var client = DriverTestConfiguration.Client; - var database = client.GetDatabase(DriverTestConfiguration.DatabaseNamespace.DatabaseName); - var collection = database.GetCollection<Entity>("csharp3910"); - - database.DropCollection("csharp3910"); - collection.InsertMany(new[] - { - new Entity { Id = 1, Description = "Alpha" }, - new Entity { Id = 2, Description = "Alpha2" }, - new Entity { Id = 3, Description = "Bravo" }, - new Entity { Id = 4, Description = "Charlie" } - }); + var collection = Fixture.Collection; var param = "A"; var result = collection.DeleteMany(mvi => mvi.Description.StartsWith(string.Format("{0}", param))); @@ -47,5 +43,16 @@ public class Entity public int Id { get; set; } public string Description { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<Entity> + { + protected override IEnumerable<Entity> InitialData => + [ + new Entity { Id = 1, Description = "Alpha" }, + new Entity { Id = 2, Description = "Alpha2" }, + new Entity { Id = 3, Description = "Bravo" }, + new Entity { Id = 4, Description = "Charlie" } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3924Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3924Tests.cs index 88c97d0a246..4f7e4126a3c 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3924Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3924Tests.cs @@ -14,19 +14,25 @@ */ using System; +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp3924Tests : Linq3IntegrationTest + public class CSharp3924Tests : LinqIntegrationTest<CSharp3924Tests.ClassFixture> { + public CSharp3924Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Projection_with_call_to_Tuple1_constructor_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -42,7 +48,7 @@ public void Projection_with_call_to_Tuple1_constructor_should_work() [Fact] public void Projection_with_call_to_Tuple2_constructor_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -58,7 +64,7 @@ public void Projection_with_call_to_Tuple2_constructor_should_work() [Fact] public void Projection_with_call_to_Tuple3_constructor_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -74,7 +80,7 @@ public void Projection_with_call_to_Tuple3_constructor_should_work() [Fact] public void Where_with_Tuple1_item_comparisons_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -94,7 +100,7 @@ public void Where_with_Tuple1_item_comparisons_should_work() [Fact] public void Where_with_Tuple2_item_comparisons_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -114,7 +120,7 @@ public void Where_with_Tuple2_item_comparisons_should_work() [Fact] public void Where_with_Tuple3_item_comparisons_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -131,23 +137,20 @@ public void Where_with_Tuple3_item_comparisons_should_work() result.Should().Be(new Tuple<int, int, int>(1, 11, 111)); } - private IMongoCollection<C> CreateCollection() - { - var collection = GetCollection<C>("C"); - - CreateCollection( - collection, - new C { Id = 1, X = 1, Y = 11, Z = 111 }); - - return collection; - } - - private class C + public class C { public int Id { get; set; } public int X { get; set; } public int Y { get; set; } public int Z { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ + new C { Id = 1, X = 1, Y = 11, Z = 111 } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3933Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3933Tests.cs index 02315b1affc..212ee4a4c1d 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3933Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3933Tests.cs @@ -16,11 +16,11 @@ using System.Linq; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; -using MongoDB.Driver.Linq; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { + [Trait("Category", "Integration")] public class CSharp3933Tests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3940Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3940Tests.cs index 38bc73edba0..13c1b6456be 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3940Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3940Tests.cs @@ -19,6 +19,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { + [Trait("Category", "Integration")] public class CSharp3940Tests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3946Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3946Tests.cs index 10363172293..1b38b394d8d 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3946Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3946Tests.cs @@ -13,23 +13,30 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp3946Tests : Linq3IntegrationTest + public class CSharp3946Tests : LinqIntegrationTest<CSharp3946Tests.ClassFixture> { + public CSharp3946Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Where_with_constant_limit_should_work() { RequireServer.Check().Supports(Feature.FilterLimit); - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(x => x.IA.Where(i => i >= 2, 2)); @@ -46,7 +53,7 @@ public void Where_with_limit_computed_server_side_should_work() { RequireServer.Check().Supports(Feature.FilterLimit); - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(x => x.IA.Where(i => i >= 2, x.Id + 1)); @@ -58,19 +65,18 @@ public void Where_with_limit_computed_server_side_should_work() results[0].Should().Equal(2, 3); } - private IMongoCollection<C> CreateCollection() - { - var collection = GetCollection<C>(); - CreateCollection( - collection, - new C { Id = 1, IA = new[] { 1, 2, 3, 4 } }); - return collection; - } - public class C { public int Id { get; set; } public int[] IA { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ + new C { Id = 1, IA = new[] { 1, 2, 3, 4 } } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3958Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3958Tests.cs index b629728d726..1b40ffd9325 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3958Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3958Tests.cs @@ -14,24 +14,31 @@ */ using System; +using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Bson; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp3958Tests : Linq3IntegrationTest + public class CSharp3958Tests : LinqIntegrationTest<CSharp3958Tests.ClassFixture> { + public CSharp3958Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Sort_on_a_field_example_should_work() { RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateEngineersCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() .Select(x => new { Result = x.Team.OrderBy(m => m.Name) }); @@ -48,7 +55,7 @@ public void Sort_on_a_field_example_should_work() public void Sort_on_a_subfield_example_should_work() { RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateEngineersCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() .Select(x => new { Result = x.Team.OrderByDescending(m => m.Address.City) }); @@ -65,7 +72,7 @@ public void Sort_on_a_subfield_example_should_work() public void Sort_on_multiple_fields_example_should_work() { RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateEngineersCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() .Select(x => new { Result = x.Team.OrderByDescending(m => m.Age).ThenBy(m => m.Name) }); @@ -82,7 +89,7 @@ public void Sort_on_multiple_fields_example_should_work() public void Sort_an_array_of_integers_example_should_work() { RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateEngineersCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() .Select(x => new { Result = new[] { 1, 4, 1, 6, 12, 5 }.OrderBy(v => v) }); @@ -99,7 +106,7 @@ public void Sort_an_array_of_integers_example_should_work() public void Sort_on_mixed_type_fields_example_should_work() { RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateEngineersCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() .Select( @@ -146,7 +153,7 @@ public void OrderBy_on_entire_object_followed_by_ThenBy_should_throw( [Values(false, true)] bool enableClientSideProjections) { RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateEngineersCollection(); + var collection = Fixture.Collection; var translationOptions = new ExpressionTranslationOptions { EnableClientSideProjections = enableClientSideProjections }; var queryable = collection @@ -177,7 +184,7 @@ public void OrderByDescending_on_entire_object_followed_by_ThenBy_should_throw( [Values(false, true)] bool enableClientSideProjections) { RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateEngineersCollection(); + var collection = Fixture.Collection; var translationOptions = new ExpressionTranslationOptions { EnableClientSideProjections = enableClientSideProjections }; var queryable = collection @@ -208,7 +215,7 @@ public void ThenBy_on_entire_object_should_throw( [Values(false, true)] bool enableClientSideProjections) { RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateEngineersCollection(); + var collection = Fixture.Collection; var translationOptions = new ExpressionTranslationOptions { EnableClientSideProjections = enableClientSideProjections }; var queryable = collection @@ -238,7 +245,7 @@ public void ThenByDescending_on_entire_object_should_throw( [Values(false, true)] bool enableClientSideProjections) { RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateEngineersCollection(); + var collection = Fixture.Collection; var translationOptions = new ExpressionTranslationOptions { EnableClientSideProjections = enableClientSideProjections }; var queryable = collection @@ -266,7 +273,7 @@ public void ThenByDescending_on_entire_object_should_throw( public void Client_side_ThenBy_should_throw() { RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateEngineersCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() .Select(x => new { Result = x.Team.OrderBy(m => m.Name) }); @@ -283,7 +290,7 @@ public void Client_side_ThenBy_should_throw() public void Client_side_ThenByDescending_should_throw() { RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateEngineersCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() .Select(x => new { Result = x.Team.OrderBy(m => m.Name) }); @@ -300,7 +307,7 @@ public void Client_side_ThenByDescending_should_throw() public void IOrderedEnumerableSerializer_Serialize_should_work() { RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateEngineersCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() .Select(x => new { Result = x.Team.OrderBy(m => m.Name) }); @@ -311,33 +318,13 @@ public void IOrderedEnumerableSerializer_Serialize_should_work() json.Should().Be("{ \"Result\" : [{ \"Name\" : \"Charlie\", \"Age\" : 30, \"Address\" : { \"Street\" : \"12 French St\", \"City\" : \"New Brunswick\" } }, { \"Name\" : \"Dallas\", \"Age\" : 30, \"Address\" : { \"Street\" : \"12 Cowper St\", \"City\" : \"Palo Alto\" } }, { \"Name\" : \"Pat\", \"Age\" : 42, \"Address\" : { \"Street\" : \"12 Baker St\", \"City\" : \"London\" } }] }"); } - private IMongoCollection<Engineers> CreateEngineersCollection() - { - var collection = GetCollection<Engineers>(); - - CreateCollection( - collection, - new Engineers - { - Id = 1, - Team = new[] - { - new TeamMember { Name = "Pat", Age = 42, Address = new Address { Street = "12 Baker St", City = "London"}}, - new TeamMember { Name = "Dallas", Age = 30, Address = new Address { Street = "12 Cowper St", City = "Palo Alto"}}, - new TeamMember { Name = "Charlie", Age = 30, Address = new Address { Street = "12 French St", City = "New Brunswick"}} - } - }); - - return collection; - } - - private class Engineers + public class Engineers { public int Id { get; set; } public TeamMember[] Team { get; set; } } - private class TeamMember : IComparable<TeamMember> + public class TeamMember : IComparable<TeamMember> { public string Name { get; set; } public int Age { get; set; } @@ -346,10 +333,27 @@ private class TeamMember : IComparable<TeamMember> public int CompareTo(TeamMember other) => Age.CompareTo(other.Age); } - private class Address + public class Address { public string Street { get; set; } public string City { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<Engineers> + { + protected override IEnumerable<Engineers> InitialData => + [ + new Engineers + { + Id = 1, + Team = new[] + { + new TeamMember { Name = "Pat", Age = 42, Address = new Address { Street = "12 Baker St", City = "London"}}, + new TeamMember { Name = "Dallas", Age = 30, Address = new Address { Street = "12 Cowper St", City = "Palo Alto"}}, + new TeamMember { Name = "Charlie", Age = 30, Address = new Address { Street = "12 French St", City = "New Brunswick"}} + } + } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3965Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3965Tests.cs index e8ada5872ef..9383f92c567 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3965Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp3965Tests.cs @@ -13,19 +13,25 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp3965Tests : Linq3IntegrationTest + public class CSharp3965Tests : LinqIntegrationTest<CSharp3965Tests.ClassFixture> { + public CSharp3965Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void OrderBy_with_expression_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -45,7 +51,7 @@ public void OrderBy_with_expression_should_work() [Fact] public void OrderBy_with_expression_and_ThenBy_with_expression_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -66,7 +72,7 @@ public void OrderBy_with_expression_and_ThenBy_with_expression_should_work() [Fact] public void OrderBy_with_expression_and_ThenBy_with_field_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -87,7 +93,7 @@ public void OrderBy_with_expression_and_ThenBy_with_field_should_work() [Fact] public void OrderBy_with_expression_and_ThenByDescending_with_expression_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -108,7 +114,7 @@ public void OrderBy_with_expression_and_ThenByDescending_with_expression_should_ [Fact] public void OrderBy_with_expression_and_ThenByDescending_with_field_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -129,7 +135,7 @@ public void OrderBy_with_expression_and_ThenByDescending_with_field_should_work( [Fact] public void OrderBy_with_field_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -147,7 +153,7 @@ public void OrderBy_with_field_should_work() [Fact] public void OrderBy_with_field_and_ThenBy_with_expression_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -168,7 +174,7 @@ public void OrderBy_with_field_and_ThenBy_with_expression_should_work() [Fact] public void OrderBy_with_field_and_ThenBy_with_field_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -187,7 +193,7 @@ public void OrderBy_with_field_and_ThenBy_with_field_should_work() [Fact] public void OrderBy_with_field_and_ThenByDescending_with_expression_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -208,7 +214,7 @@ public void OrderBy_with_field_and_ThenByDescending_with_expression_should_work( [Fact] public void OrderBy_with_field_and_ThenByDescending_with_field_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -227,7 +233,7 @@ public void OrderBy_with_field_and_ThenByDescending_with_field_should_work() [Fact] public void OrderByDescending_with_expression_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -247,7 +253,7 @@ public void OrderByDescending_with_expression_should_work() [Fact] public void OrderByDescending_with_expression_and_ThenBy_with_expression_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -268,7 +274,7 @@ public void OrderByDescending_with_expression_and_ThenBy_with_expression_should_ [Fact] public void OrderByDescending_with_expression_and_ThenBy_with_field_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -289,7 +295,7 @@ public void OrderByDescending_with_expression_and_ThenBy_with_field_should_work( [Fact] public void OrderByDescending_with_expression_and_ThenByDescending_with_expression_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -310,7 +316,7 @@ public void OrderByDescending_with_expression_and_ThenByDescending_with_expressi [Fact] public void OrderByDescending_with_expression_and_ThenByDescending_with_field_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -331,7 +337,7 @@ public void OrderByDescending_with_expression_and_ThenByDescending_with_field_sh [Fact] public void OrderByDescending_with_field_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -349,7 +355,7 @@ public void OrderByDescending_with_field_should_work() [Fact] public void OrderByDescending_with_field_and_ThenBy_with_expression_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -370,7 +376,7 @@ public void OrderByDescending_with_field_and_ThenBy_with_expression_should_work( [Fact] public void OrderByDescending_with_field_and_ThenBy_with_field_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -389,7 +395,7 @@ public void OrderByDescending_with_field_and_ThenBy_with_field_should_work() [Fact] public void OrderByDescending_with_field_and_ThenByDescending_with_expression_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -410,7 +416,7 @@ public void OrderByDescending_with_field_and_ThenByDescending_with_expression_sh [Fact] public void OrderByDescending_with_field_and_ThenByDescending_with_field_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection .AsQueryable() @@ -426,36 +432,20 @@ public void OrderByDescending_with_field_and_ThenByDescending_with_field_should_ results.Select(x => x.Id).Should().Equal(2, 1); } - private IMongoCollection<C> CreateCollection() - { - var collection = GetCollection<C>(); - - var documents = new C[] - { - new C - { - Id = 1, - X = 1, - Y = 1, - }, - new C - { - Id = 2, - X = 1, - Y = 2 - } - - }; - CreateCollection(collection, documents); - - return collection; - } - - private class C + public class C { public int Id { get; set; } public int X { get; set; } public int Y { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ + new C { Id = 1, X = 1, Y = 1 }, + new C { Id = 2, X = 1, Y = 2 } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp4048Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp4048Tests.cs index d69edbc43ce..a6bb95b69b1 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp4048Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp4048Tests.cs @@ -17,16 +17,22 @@ using System.Linq; using FluentAssertions; using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp4048Tests : Linq3IntegrationTest + public class CSharp4048Tests : LinqIntegrationTest<CSharp4048Tests.ClassFixture> { + public CSharp4048Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Array_ArrayIndex_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -51,7 +57,7 @@ public void Array_ArrayIndex_of_root_should_work() [Fact] public void Array_ArrayIndex_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -76,7 +82,7 @@ public void Array_ArrayIndex_of_scalar_should_work() [Fact] public void List_get_Item_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -101,7 +107,7 @@ public void List_get_Item_of_root_should_work() [Fact] public void List_get_Item_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -126,7 +132,7 @@ public void List_get_Item_of_scalar_should_work() [Fact] public void IGrouping_Aggregate_with_func_of_root_should_return_expected_result() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -151,7 +157,7 @@ public void IGrouping_Aggregate_with_func_of_root_should_return_expected_result( [Fact] public void IGrouping_Aggregate_with_func_of_scalar_should_return_expected_result() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -176,7 +182,7 @@ public void IGrouping_Aggregate_with_func_of_scalar_should_return_expected_resul [Fact] public void IGrouping_Aggregate_with_seed_and_func_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -201,7 +207,7 @@ public void IGrouping_Aggregate_with_seed_and_func_of_root_should_work() [Fact] public void IGrouping_Aggregate_with_seed_and_func_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -226,7 +232,7 @@ public void IGrouping_Aggregate_with_seed_and_func_of_scalar_should_work() [Fact] public void IGrouping_Aggregate_with_seed_and_func_and_resultSelector_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -251,7 +257,7 @@ public void IGrouping_Aggregate_with_seed_and_func_and_resultSelector_of_root_sh [Fact] public void IGrouping_Aggregate_with_seed_and_func_and_resultSelector_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -276,7 +282,7 @@ public void IGrouping_Aggregate_with_seed_and_func_and_resultSelector_of_scalar_ [Fact] public void IGrouping_All_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -301,7 +307,7 @@ public void IGrouping_All_of_root_should_work() [Fact] public void IGrouping_All_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -326,7 +332,7 @@ public void IGrouping_All_of_scalar_should_work() [Fact] public void IGrouping_Any_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -351,7 +357,7 @@ public void IGrouping_Any_of_root_should_work() [Fact] public void IGrouping_Any_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -376,7 +382,7 @@ public void IGrouping_Any_of_scalar_should_work() [Fact] public void IGrouping_Any_with_predicate_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -401,7 +407,7 @@ public void IGrouping_Any_with_predicate_of_root_should_work() [Fact] public void IGrouping_Any_with_predicate_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -426,7 +432,7 @@ public void IGrouping_Any_with_predicate_of_scalar_should_work() [Fact] public void IGrouping_Average_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -451,7 +457,7 @@ public void IGrouping_Average_of_root_should_work() [Fact] public void IGrouping_Average_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -476,7 +482,7 @@ public void IGrouping_Average_of_scalar_should_work() [Fact] public void IGrouping_Average_with_selector_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -501,7 +507,7 @@ public void IGrouping_Average_with_selector_of_root_should_work() [Fact] public void IGrouping_Average_with_selector_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -526,7 +532,7 @@ public void IGrouping_Average_with_selector_of_scalar_should_work() [Fact] public void IGrouping_Concat_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -551,7 +557,7 @@ public void IGrouping_Concat_of_root_should_work() [Fact] public void IGrouping_Concat_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -576,7 +582,7 @@ public void IGrouping_Concat_of_scalar_should_work() [Fact] public void IGrouping_Contains_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -601,7 +607,7 @@ public void IGrouping_Contains_of_root_should_work() [Fact] public void IGrouping_Contains_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -626,7 +632,7 @@ public void IGrouping_Contains_of_scalar_should_work() [Fact] public void IGrouping_Count_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -651,7 +657,7 @@ public void IGrouping_Count_of_root_should_work() [Fact] public void IGrouping_Count_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -676,7 +682,7 @@ public void IGrouping_Count_of_scalar_should_work() [Fact] public void IGrouping_Count_with_predicate_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -701,7 +707,7 @@ public void IGrouping_Count_with_predicate_of_root_should_work() [Fact] public void IGrouping_Count_with_predicate_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -726,7 +732,7 @@ public void IGrouping_Count_with_predicate_of_scalar_should_work() [Fact] public void IGrouping_DefaultIfEmpty_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -751,7 +757,7 @@ public void IGrouping_DefaultIfEmpty_of_root_should_work() [Fact] public void IGrouping_DefaultIfEmpty_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -776,7 +782,7 @@ public void IGrouping_DefaultIfEmpty_of_scalar_should_work() [Fact] public void IGrouping_Distinct_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -801,7 +807,7 @@ public void IGrouping_Distinct_of_root_should_work() [Fact] public void IGrouping_Distinct_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -826,7 +832,7 @@ public void IGrouping_Distinct_of_scalar_should_work() [Fact] public void IGrouping_ElementAt_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -851,7 +857,7 @@ public void IGrouping_ElementAt_of_root_should_work() [Fact] public void IGrouping_ElementAt_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -876,7 +882,7 @@ public void IGrouping_ElementAt_of_scalar_should_work() [Fact] public void IGrouping_ElementAtOrDefault_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -901,7 +907,7 @@ public void IGrouping_ElementAtOrDefault_of_root_should_work() [Fact] public void IGrouping_ElementAtOrDefault_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -926,7 +932,7 @@ public void IGrouping_ElementAtOrDefault_of_scalar_should_work() [Fact] public void IGrouping_Except_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -951,7 +957,7 @@ public void IGrouping_Except_of_root_should_work() [Fact] public void IGrouping_Except_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -976,7 +982,7 @@ public void IGrouping_Except_of_scalar_should_work() [Fact] public void IGrouping_First_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1001,7 +1007,7 @@ public void IGrouping_First_of_root_should_work() [Fact] public void IGrouping_First_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1026,7 +1032,7 @@ public void IGrouping_First_of_scalar_should_work() [Fact] public void IGrouping_First_with_predicate_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1051,7 +1057,7 @@ public void IGrouping_First_with_predicate_of_root_should_work() [Fact] public void IGrouping_First_with_predicate_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1076,7 +1082,7 @@ public void IGrouping_First_with_predicate_of_scalar_should_work() [Fact] public void IGrouping_FirstOrDefault_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1101,7 +1107,7 @@ public void IGrouping_FirstOrDefault_of_root_should_work() [Fact] public void IGrouping_FirstOrDefault_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1126,7 +1132,7 @@ public void IGrouping_FirstOrDefault_of_scalar_should_work() [Fact] public void IGrouping_FirstOrDefault_with_predicate_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1152,7 +1158,7 @@ public void IGrouping_FirstOrDefault_with_predicate_of_root_should_work() [Fact] public void IGrouping_FirstOrDefault_with_predicate_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1177,7 +1183,7 @@ public void IGrouping_FirstOrDefault_with_predicate_of_scalar_should_work() [Fact] public void IGrouping_Intersect_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1202,7 +1208,7 @@ public void IGrouping_Intersect_of_root_should_work() [Fact] public void IGrouping_Intersect_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1227,7 +1233,7 @@ public void IGrouping_Intersect_of_scalar_should_work() [Fact] public void IGrouping_Last_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1252,7 +1258,7 @@ public void IGrouping_Last_of_root_should_work() [Fact] public void IGrouping_Last_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1277,7 +1283,7 @@ public void IGrouping_Last_of_scalar_should_work() [Fact] public void IGrouping_Last_with_predicate_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1302,7 +1308,7 @@ public void IGrouping_Last_with_predicate_of_root_should_work() [Fact] public void IGrouping_Last_with_predicate_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1327,7 +1333,7 @@ public void IGrouping_Last_with_predicate_of_scalar_should_work() [Fact] public void IGrouping_LastOrDefault_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1352,7 +1358,7 @@ public void IGrouping_LastOrDefault_of_root_should_work() [Fact] public void IGrouping_LastOrDefault_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1377,7 +1383,7 @@ public void IGrouping_LastOrDefault_of_scalar_should_work() [Fact] public void IGrouping_LastOrDefault_with_predicate_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1403,7 +1409,7 @@ public void IGrouping_LastOrDefault_with_predicate_of_root_should_work() [Fact] public void IGrouping_LastOrDefault_with_predicate_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1428,7 +1434,7 @@ public void IGrouping_LastOrDefault_with_predicate_of_scalar_should_work() [Fact] public void IGrouping_LongCount_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1453,7 +1459,7 @@ public void IGrouping_LongCount_of_root_should_work() [Fact] public void IGrouping_LongCount_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1478,7 +1484,7 @@ public void IGrouping_LongCount_of_scalar_should_work() [Fact] public void IGrouping_LongCount_with_predicate_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1503,7 +1509,7 @@ public void IGrouping_LongCount_with_predicate_of_root_should_work() [Fact] public void IGrouping_LongCount_with_predicate_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1528,7 +1534,7 @@ public void IGrouping_LongCount_with_predicate_of_scalar_should_work() [Fact] public void IGrouping_Max_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1553,7 +1559,7 @@ public void IGrouping_Max_of_root_should_work() [Fact] public void IGrouping_Max_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1578,7 +1584,7 @@ public void IGrouping_Max_of_scalar_should_work() [Fact] public void IGrouping_Max_with_selector_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1603,7 +1609,7 @@ public void IGrouping_Max_with_selector_of_root_should_work() [Fact] public void IGrouping_Max_with_selector_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1628,7 +1634,7 @@ public void IGrouping_Max_with_selector_of_scalar_should_work() [Fact] public void IGrouping_Min_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1653,7 +1659,7 @@ public void IGrouping_Min_of_root_should_work() [Fact] public void IGrouping_Min_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1678,7 +1684,7 @@ public void IGrouping_Min_of_scalar_should_work() [Fact] public void IGrouping_Min_with_selector_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1703,7 +1709,7 @@ public void IGrouping_Min_with_selector_of_root_should_work() [Fact] public void IGrouping_Min_with_selector_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1728,7 +1734,7 @@ public void IGrouping_Min_with_selector_of_scalar_should_work() [Fact] public void IGrouping_Reverse_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1753,7 +1759,7 @@ public void IGrouping_Reverse_of_root_should_work() [Fact] public void IGrouping_Reverse_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1778,7 +1784,7 @@ public void IGrouping_Reverse_of_scalar_should_work() [Fact] public void IGrouping_Select_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1803,7 +1809,7 @@ public void IGrouping_Select_of_root_should_work() [Fact] public void IGrouping_Select_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1828,7 +1834,7 @@ public void IGrouping_Select_of_scalar_should_work() [Fact] public void IGrouping_StandardDeviationPopulation_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1853,7 +1859,7 @@ public void IGrouping_StandardDeviationPopulation_of_root_should_work() [Fact] public void IGrouping_StandardDeviationPopulation_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1878,7 +1884,7 @@ public void IGrouping_StandardDeviationPopulation_of_scalar_should_work() [Fact] public void IGrouping_StandardDeviationPopulation_with_selector_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1903,7 +1909,7 @@ public void IGrouping_StandardDeviationPopulation_with_selector_of_root_should_w [Fact] public void IGrouping_StandardDeviationPopulation_with_selector_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1928,7 +1934,7 @@ public void IGrouping_StandardDeviationPopulation_with_selector_of_scalar_should [Fact] public void IGrouping_StandardDeviationSample_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -1953,7 +1959,7 @@ public void IGrouping_StandardDeviationSample_of_root_should_work() [Fact] public void IGrouping_StandardDeviationSample_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -1978,7 +1984,7 @@ public void IGrouping_StandardDeviationSample_of_scalar_should_work() [Fact] public void IGrouping_StandardDeviationSample_with_selector_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -2003,7 +2009,7 @@ public void IGrouping_StandardDeviationSample_with_selector_of_root_should_work( [Fact] public void IGrouping_StandardDeviationSample_with_selector_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -2028,7 +2034,7 @@ public void IGrouping_StandardDeviationSample_with_selector_of_scalar_should_wor [Fact] public void IGrouping_Sum_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -2053,7 +2059,7 @@ public void IGrouping_Sum_of_root_should_work() [Fact] public void IGrouping_Sum_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -2078,7 +2084,7 @@ public void IGrouping_Sum_of_scalar_should_work() [Fact] public void IGrouping_Take_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -2103,7 +2109,7 @@ public void IGrouping_Take_of_root_should_work() [Fact] public void IGrouping_Take_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -2128,7 +2134,7 @@ public void IGrouping_Take_of_scalar_should_work() [Fact] public void IGrouping_ToArray_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -2153,7 +2159,7 @@ public void IGrouping_ToArray_of_root_should_work() [Fact] public void IGrouping_ToArray_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -2178,7 +2184,7 @@ public void IGrouping_ToArray_of_scalar_should_work() [Fact] public void IGrouping_ToList_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -2203,7 +2209,7 @@ public void IGrouping_ToList_of_root_should_work() [Fact] public void IGrouping_ToList_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -2228,7 +2234,7 @@ public void IGrouping_ToList_of_scalar_should_work() [Fact] public void IGrouping_Union_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -2253,7 +2259,7 @@ public void IGrouping_Union_of_root_should_work() [Fact] public void IGrouping_Union_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -2278,7 +2284,7 @@ public void IGrouping_Union_of_scalar_should_work() [Fact] public void IGrouping_Where_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -2303,7 +2309,7 @@ public void IGrouping_Where_of_root_should_work() [Fact] public void IGrouping_Where_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -2328,7 +2334,7 @@ public void IGrouping_Where_of_scalar_should_work() [Fact] public void IGrouping_Zip_of_root_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id) @@ -2353,7 +2359,7 @@ public void IGrouping_Zip_of_root_should_work() [Fact] public void IGrouping_Zip_of_scalar_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .GroupBy(c => c.Id, c => c.X) @@ -2375,27 +2381,24 @@ public void IGrouping_Zip_of_scalar_should_work() results[1].ShouldBeEquivalentTo(new { Id = 2, Result = CreateList(new { X = 2, Y = 3 }) }); } - private IMongoCollection<C> CreateCollection() - { - var collection = GetCollection<C>(); - var documents = new[] - { - new C { Id = 1, X = 1 }, - new C { Id = 2, X = 2 } - }; - CreateCollection(collection, documents); - return collection; - } - private List<TAnonymous> CreateList<TAnonymous>(params TAnonymous[] items) { return new List<TAnonymous>(items); } - private class C + public class C { public int Id { get; set; } public int X { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ + new C { Id = 1, X = 1 }, + new C { Id = 2, X = 2 } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp4049Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp4049Tests.cs index 7647755fc58..39cad63b45d 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp4049Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp4049Tests.cs @@ -13,19 +13,26 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Bson.Serialization.Attributes; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp4049Tests : Linq3IntegrationTest + public class CSharp4049Tests : LinqIntegrationTest<CSharp4049Tests.ClassFixture> { + public CSharp4049Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Aggregate_Project_should_translate_as_expected() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .Project(x => new TestClass { Property = x.Property.ToUpper() }); @@ -39,7 +46,7 @@ public void Aggregate_Project_should_translate_as_expected() [Fact] public void Queryable_Select_should_translate_as_expected() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(x => new TestClass { Property = x.Property.ToUpper() }); @@ -50,23 +57,18 @@ public void Queryable_Select_should_translate_as_expected() results.Property.Should().Be("ABC"); } - private IMongoCollection<TestClass> CreateCollection() - { - var collection = GetCollection<TestClass>(); - - var documents = new[] - { - new TestClass { Property = "abc" } - }; - CreateCollection(collection, documents); - - return collection; - } - public class TestClass { [BsonElement("_p")] public string Property { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<TestClass> + { + protected override IEnumerable<TestClass> InitialData => + [ + new TestClass { Property = "abc" } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp4057Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp4057Tests.cs index 9383176fe97..dfa8e9995d1 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp4057Tests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp4057Tests.cs @@ -13,18 +13,25 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira { - public class CSharp4057Tests : Linq3IntegrationTest + public class CSharp4057Tests : LinqIntegrationTest<CSharp4057Tests.ClassFixture> { + public CSharp4057Tests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Aggregate_Project_should_work() { - var collection = CreateProductsCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .Sort(Builders<Product>.Sort.Ascending(p => p.Id)) @@ -47,7 +54,7 @@ public void Aggregate_Project_should_work() [Fact] public void Queryable_Select() { - var collection = CreateProductsCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .OrderBy(p => p.Id) @@ -67,22 +74,7 @@ public void Queryable_Select() results.Select(r => r.IsExternalUrl).Should().Equal(true, true, false); } - private IMongoCollection<Product> CreateProductsCollection() - { - var collection = GetCollection<Product>(); - - var documents = new[] - { - new Product { Id = 1, ShopUrl = null }, - new Product { Id = 2, ShopUrl = "" }, - new Product { Id = 3, ShopUrl = "abc" } - }; - CreateCollection(collection, documents); - - return collection; - } - - private class Product + public class Product { public int Id { get; set; } public string ShopUrl { get; set; } @@ -92,5 +84,15 @@ private class ProductTypeSearchResult { public bool IsExternalUrl { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<Product> + { + protected override IEnumerable<Product> InitialData => + [ + new Product { Id = 1, ShopUrl = null }, + new Product { Id = 2, ShopUrl = "" }, + new Product { Id = 3, ShopUrl = "abc" } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp5587Tests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp5587Tests.cs new file mode 100644 index 00000000000..3c9d46ce032 --- /dev/null +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Jira/CSharp5587Tests.cs @@ -0,0 +1,114 @@ +/* Copyright 2010-present MongoDB Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +using System.Collections.Generic; +using System.Linq; +using MongoDB.Driver.TestHelpers; +using FluentAssertions; +using MongoDB.Bson.Serialization.Attributes; +using MongoDB.Bson.Serialization.Serializers; +using MongoDB.Driver.Linq; +using Xunit; + +namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Jira; + +public class CSharp5587Tests : LinqIntegrationTest<CSharp5587Tests.ClassFixture> +{ + public CSharp5587Tests(ClassFixture fixture) + : base(fixture) + { + } + + [Fact] + public void FindOneAndUpdate_should_use_correct_discriminator() + { + var collection = Fixture.Collection; + + var lion1 = new Lion { Id = 1, Name = "Lion1" }; + var updateDefinition1 = Builders<Lion>.Update + .SetOnInsert(l => l.Id, 1) + .Set(l => l.Name, lion1.Name); + collection.OfType<Lion>().FindOneAndUpdate( + f => f.Name == lion1.Name, + updateDefinition1, + new FindOneAndUpdateOptions<Lion> { IsUpsert = true }); + + var result = collection.AsQueryable().As(BsonDocumentSerializer.Instance).Single(); + result.Should().BeEquivalentTo( + """ + { + _id : 1, + _t : ["Animal", "Cat", "Lion"], + Name : "Lion1" + } + """); + } + + [Fact] + public void UpdateOne_should_use_correct_discriminator() + { + var collection = Fixture.Collection; + + var lion2 = new Lion { Id = 2, Name = "Lion2" }; + var updateDefinition2 = Builders<Lion>.Update + .SetOnInsert(l => l.Id, lion2.Id) + .Set(l => l.Name, lion2.Name); + collection.OfType<Lion>().UpdateOne( + f => f.Name == lion2.Name, + updateDefinition2, + new UpdateOptions<Lion> { IsUpsert = true }); + + var result = collection.AsQueryable().As(BsonDocumentSerializer.Instance).Single(); + result.Should().BeEquivalentTo( + """ + { + _id : 2, + _t : ["Animal", "Cat", "Lion"], + Name : "Lion2" + } + """); + } + + [BsonDiscriminator(RootClass = true)] + [BsonKnownTypes(typeof(Cat), typeof(Dog))] + public class Animal + { + public int Id { get; set; } + } + + [BsonKnownTypes(typeof(Lion), typeof(Tiger))] + public class Cat : Animal + { + } + + public class Dog : Animal + { + } + + public class Lion : Cat + { + public string Name { get; set; } + } + public class Tiger : Cat + { + } + + public sealed class ClassFixture : MongoCollectionFixture<Animal> + { + public override bool InitializeDataBeforeEachTestCase => true; + + protected override IEnumerable<Animal> InitialData => null; + } +} diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Linq3IntegrationTest.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Linq3IntegrationTest.cs index fd1440ed500..b198cf4e9a1 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Linq3IntegrationTest.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Linq3IntegrationTest.cs @@ -18,12 +18,13 @@ using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.Serialization; -using MongoDB.Driver.Linq; using MongoDB.Driver.Linq.Linq3Implementation; using MongoDB.Driver.Linq.Linq3Implementation.Translators.ExpressionToExecutableQueryTranslators; +using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation { + [Trait("Category", "Integration")] public abstract class Linq3IntegrationTest { protected void AssertStages(IEnumerable<BsonDocument> stages, params string[] expectedStages) diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/MongoQueryProviderTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/MongoQueryProviderTests.cs index 98f8f5d2155..65890c4b457 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/MongoQueryProviderTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/MongoQueryProviderTests.cs @@ -24,6 +24,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3Implementation { + [Trait("Category", "Integration")] public class MongoQueryProviderTests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/AggregateMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/AggregateMethodToAggregationExpressionTranslatorTests.cs index 4afb1d23bf2..5158c901e81 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/AggregateMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/AggregateMethodToAggregationExpressionTranslatorTests.cs @@ -13,22 +13,28 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class AggregateMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class AggregateMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<AggregateMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public AggregateMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Theory] [ParameterAttributeData] public void Aggregate_with_func_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().Aggregate((x, y) => x * y)) : @@ -46,7 +52,7 @@ public void Aggregate_with_func_should_work( public void Aggregate_with_seed_and_func_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().Aggregate(2, (x, y) => x * y)) : @@ -64,7 +70,7 @@ public void Aggregate_with_seed_and_func_should_work( public void Aggregate_with_seed_func_and_result_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().Aggregate(2, (x, y) => x * y, x => x * 3)) : @@ -77,22 +83,21 @@ public void Aggregate_with_seed_func_and_result_selector_should_work( results.Should().Equal(6, 6, 12, 36); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 0, A = new int[0] }, - new C { Id = 1, A = new int[] { 1 } }, - new C { Id = 2, A = new int[] { 1, 2 } }, - new C { Id = 3, A = new int[] { 1, 2, 3 } }); - return collection; + public int Id { get; set; } + public int[] A { get; set; } } - private class C + public sealed class ClassFixture : MongoCollectionFixture<C> { - public int Id { get; set; } - public int[] A { get; set; } + protected override IEnumerable<C> InitialData => + [ + new C { Id = 0, A = new int[0] }, + new C { Id = 1, A = new int[] { 1 } }, + new C { Id = 2, A = new int[] { 1, 2 } }, + new C { Id = 3, A = new int[] { 1, 2, 3 } } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/AllMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/AllMethodToAggregationExpressionTranslatorTests.cs index 70807c1cd24..8b2a90cad70 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/AllMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/AllMethodToAggregationExpressionTranslatorTests.cs @@ -13,22 +13,28 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class AllMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class AllMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<AllMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public AllMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Theory] [ParameterAttributeData] public void All_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().All(x => x < 2)) : @@ -41,22 +47,21 @@ public void All_should_work( results.Should().Equal(true, true, false, false); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 0, A = new int[0] }, - new C { Id = 1, A = new int[] { 1 } }, - new C { Id = 2, A = new int[] { 1, 2 } }, - new C { Id = 3, A = new int[] { 1, 2, 3 } }); - return collection; + public int Id { get; set; } + public int[] A { get; set; } } - private class C + public sealed class ClassFixture : MongoCollectionFixture<C> { - public int Id { get; set; } - public int[] A { get; set; } + protected override IEnumerable<C> InitialData => + [ + new C { Id = 0, A = new int[0] }, + new C { Id = 1, A = new int[] { 1 } }, + new C { Id = 2, A = new int[] { 1, 2 } }, + new C { Id = 3, A = new int[] { 1, 2, 3 } } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/AnyMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/AnyMethodToAggregationExpressionTranslatorTests.cs index 7e8cacaa486..e1bf60c835c 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/AnyMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/AnyMethodToAggregationExpressionTranslatorTests.cs @@ -13,22 +13,28 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class AnyMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class AnyMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<AnyMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public AnyMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Theory] [ParameterAttributeData] public void Any_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().Any()) : @@ -46,7 +52,7 @@ public void Any_should_work( public void Any_with_predicate_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().Any(x => x > 2)) : @@ -62,7 +68,7 @@ public void Any_with_predicate_should_work( [Fact] public void Any_on_constant_array_should_be_optimized() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var obj = new[] { 1, 2, 3 }; var queryable = collection.AsQueryable() @@ -75,22 +81,21 @@ public void Any_on_constant_array_should_be_optimized() results.Select(x => x.Id).Should().Equal(1, 2, 3); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 0, A = new int[0] }, - new C { Id = 1, A = new int[] { 1 } }, - new C { Id = 2, A = new int[] { 1, 2 } }, - new C { Id = 3, A = new int[] { 1, 2, 3 } }); - return collection; + public int Id { get; set; } + public int[] A { get; set; } } - private class C + public sealed class ClassFixture : MongoCollectionFixture<C> { - public int Id { get; set; } - public int[] A { get; set; } + protected override IEnumerable<C> InitialData => + [ + new C { Id = 0, A = new int[0] }, + new C { Id = 1, A = new int[] { 1 } }, + new C { Id = 2, A = new int[] { 1, 2 } }, + new C { Id = 3, A = new int[] { 1, 2, 3 } } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/AverageMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/AverageMethodToAggregationExpressionTranslatorTests.cs index 2a4b7d34143..872bda68c61 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/AverageMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/AverageMethodToAggregationExpressionTranslatorTests.cs @@ -13,24 +13,31 @@ * limitations under the License. */ +using System; +using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.Serialization.Attributes; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class AverageMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class AverageMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<AverageMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public AverageMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Theory] [ParameterAttributeData] public void Average_with_decimals_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Decimals.AsQueryable().Average()) : @@ -48,7 +55,7 @@ public void Average_with_decimals_should_work( public void Average_with_decimals_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Decimals.AsQueryable().Average(x => x * 2.0M)) : @@ -66,7 +73,7 @@ public void Average_with_decimals_selector_should_work( public void Average_with_doubles_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Doubles.AsQueryable().Average()) : @@ -84,7 +91,7 @@ public void Average_with_doubles_should_work( public void Average_with_doubles_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Doubles.AsQueryable().Average(x => x * 2.0)) : @@ -102,7 +109,7 @@ public void Average_with_doubles_selector_should_work( public void Average_with_floats_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Floats.AsQueryable().Average()) : @@ -120,7 +127,7 @@ public void Average_with_floats_should_work( public void Average_with_floats_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Floats.AsQueryable().Average(x => x * 2.0F)) : @@ -138,7 +145,7 @@ public void Average_with_floats_selector_should_work( public void Average_with_ints_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Ints.AsQueryable().Average()) : @@ -156,7 +163,7 @@ public void Average_with_ints_should_work( public void Average_with_ints_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Ints.AsQueryable().Average(x => x * 2)) : @@ -174,7 +181,7 @@ public void Average_with_ints_selector_should_work( public void Average_with_longs_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Longs.AsQueryable().Average()) : @@ -192,7 +199,7 @@ public void Average_with_longs_should_work( public void Average_with_longs_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Longs.AsQueryable().Average(x => x * 2L)) : @@ -210,7 +217,7 @@ public void Average_with_longs_selector_should_work( public void Average_with_nullable_decimals_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableDecimals.AsQueryable().Average()) : @@ -228,7 +235,7 @@ public void Average_with_nullable_decimals_should_work( public void Average_with_nullable_decimals_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableDecimals.AsQueryable().Average(x => x * 2.0M)) : @@ -246,7 +253,7 @@ public void Average_with_nullable_decimals_selector_should_work( public void Average_with_nullable_doubles_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableDoubles.AsQueryable().Average()) : @@ -264,7 +271,7 @@ public void Average_with_nullable_doubles_should_work( public void Average_with_nullable_doubles_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableDoubles.AsQueryable().Average(x => x * 2.0)) : @@ -282,7 +289,7 @@ public void Average_with_nullable_doubles_selector_should_work( public void Average_with_nullable_floats_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableFloats.AsQueryable().Average()) : @@ -300,7 +307,7 @@ public void Average_with_nullable_floats_should_work( public void Average_with_nullable_floats_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableFloats.AsQueryable().Average(x => x * 2.0F)) : @@ -318,7 +325,7 @@ public void Average_with_nullable_floats_selector_should_work( public void Average_with_nullable_ints_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableInts.AsQueryable().Average()) : @@ -336,7 +343,7 @@ public void Average_with_nullable_ints_should_work( public void Average_with_nullable_ints_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableInts.AsQueryable().Average(x => x * 2)) : @@ -354,7 +361,7 @@ public void Average_with_nullable_ints_selector_should_work( public void Average_with_nullable_longs_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableLongs.AsQueryable().Average()) : @@ -372,7 +379,7 @@ public void Average_with_nullable_longs_should_work( public void Average_with_nullable_longs_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableLongs.AsQueryable().Average(x => x * 2L)) : @@ -385,11 +392,133 @@ public void Average_with_nullable_longs_selector_should_work( results.Should().Equal(null, null, 4.0); } - private IMongoCollection<C> CreateCollection() + [Theory] + [ParameterAttributeData] + public void Average_over_empty_set_of_nullable_values_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.EmptyNullableDecimals.AsQueryable().Average()) : + collection.AsQueryable().Select(x => x.EmptyNullableDecimals.Average()); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $avg : '$EmptyNullableDecimals' }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(null, null, null); + } + + [Theory] + [ParameterAttributeData] + public void Average_with_selector_over_empty_set_of_nullable_values_should_work( + [Values(false, true)] bool withNestedAsQueryable) { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.EmptyNullableDecimals.AsQueryable().Average(x => x * 2.0M)) : + collection.AsQueryable().Select(x => x.EmptyNullableDecimals.Average(x => x * 2.0M)); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $avg : { $map : { input : '$EmptyNullableDecimals', as : 'x', in : { $multiply : ['$$x', NumberDecimal(2)] } } } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(null, null, null); + } + + [Theory] + [ParameterAttributeData] + public void Average_over_empty_set_of_non_nullable_values_should_throw( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.EmptyDecimals.AsQueryable().Average()) : + collection.AsQueryable().Select(x => x.EmptyDecimals.Average()); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $avg : '$EmptyDecimals' }, _id : 0 } }"); + + Assert.Throws<FormatException>(() => queryable.ToList()); + } + + [Theory] + [ParameterAttributeData] + public void Average_with_selector_over_empty_set_of_non_nullable_values_should_throw( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.EmptyDecimals.AsQueryable().Average(x => x * 2.0M)) : + collection.AsQueryable().Select(x => x.EmptyDecimals.Average(x => x * 2.0M)); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $avg : { $map : { input : '$EmptyDecimals', as : 'x', in : { $multiply : ['$$x', NumberDecimal(2)] } } } }, _id : 0 } }"); + + Assert.Throws<FormatException>(() => queryable.ToList()); + } + + [Theory] + [ParameterAttributeData] + public void Average_over_empty_set_of_non_nullable_values_cast_to_nullable_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.EmptyDecimals.Select(e => (decimal?)e).AsQueryable().Average()) : + collection.AsQueryable().Select(x => x.EmptyDecimals.Select(e => (decimal?)e).Average()); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $avg : { $map : { input : '$EmptyDecimals', as : 'e', in : '$$e' } } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(null, null, null); + } + + [Theory] + [ParameterAttributeData] + public void Average_with_selector_over_empty_set_of_non_nullable_values_cast_to_nullable_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.EmptyDecimals.Select(e => (decimal?)e).AsQueryable().Average(x => x * 2.0M)) : + collection.AsQueryable().Select(x => x.EmptyDecimals.Select(e => (decimal?)e).Average(x => x * 2.0M)); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $avg : { $map : { input : { $map : { input : '$EmptyDecimals', as : 'e', in : '$$e' } }, as : 'x', in : { $multiply : ['$$x', { '$numberDecimal' : '2.0' }] } } } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(null, null, null); + } + + public class C + { + public int Id { get; set; } + [BsonRepresentation(BsonType.Decimal128)] public decimal[] Decimals { get; set; } + public double[] Doubles { get; set; } + public float[] Floats { get; set; } + public int[] Ints { get; set; } + public long[] Longs { get; set; } + [BsonRepresentation(BsonType.Decimal128)] public decimal?[] NullableDecimals { get; set; } + public double?[] NullableDoubles { get; set; } + public float?[] NullableFloats { get; set; } + public int?[] NullableInts { get; set; } + public long?[] NullableLongs { get; set; } + [BsonRepresentation(BsonType.Decimal128)] public decimal[] EmptyDecimals { get; set; } + [BsonRepresentation(BsonType.Decimal128)] public decimal?[] EmptyNullableDecimals { get; set; } + } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ new C { Id = 1, @@ -402,7 +531,9 @@ private IMongoCollection<C> CreateCollection() NullableDoubles = new double?[0] { }, NullableFloats = new float?[0] { }, NullableInts = new int?[0] { }, - NullableLongs = new long?[0] { } + NullableLongs = new long?[0] { }, + EmptyDecimals = [], + EmptyNullableDecimals = [] }, new C { @@ -416,7 +547,9 @@ private IMongoCollection<C> CreateCollection() NullableDoubles = new double?[] { null }, NullableFloats = new float?[] { null }, NullableInts = new int?[] { null }, - NullableLongs = new long?[] { null } + NullableLongs = new long?[] { null }, + EmptyDecimals = [], + EmptyNullableDecimals = [] }, new C { @@ -430,24 +563,11 @@ private IMongoCollection<C> CreateCollection() NullableDoubles = new double?[] { null, 1.0, 2.0, 3.0 }, NullableFloats = new float?[] { null, 1.0F, 2.0F, 3.0F }, NullableInts = new int?[] { null, 1, 2, 3 }, - NullableLongs = new long?[] { null, 1L, 2L, 3L } - }); - return collection; - } - - private class C - { - public int Id { get; set; } - [BsonRepresentation(BsonType.Decimal128)] public decimal[] Decimals { get; set; } - public double[] Doubles { get; set; } - public float[] Floats { get; set; } - public int[] Ints { get; set; } - public long[] Longs { get; set; } - [BsonRepresentation(BsonType.Decimal128)] public decimal?[] NullableDecimals { get; set; } - public double?[] NullableDoubles { get; set; } - public float?[] NullableFloats { get; set; } - public int?[] NullableInts { get; set; } - public long?[] NullableLongs { get; set; } + NullableLongs = new long?[] { null, 1L, 2L, 3L }, + EmptyDecimals = [], + EmptyNullableDecimals = [] + } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ConcatMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ConcatMethodToAggregationExpressionTranslatorTests.cs index d51d82e00df..a6a084f4ea8 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ConcatMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ConcatMethodToAggregationExpressionTranslatorTests.cs @@ -13,22 +13,28 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class ConcatMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class ConcatMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<ConcatMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public ConcatMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Theory] [ParameterAttributeData] public void Enumerable_Concat_should_work( [Values(false, true)] bool withNestedAsQueryableSource2) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryableSource2 ? collection.AsQueryable().Select(x => x.A.Concat(x.B.AsQueryable())) : @@ -50,7 +56,7 @@ public void Enumerable_Concat_should_work( public void Queryable_Concat_should_work( [Values(false, true)] bool withNestedAsQueryableSource2) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryableSource2 ? collection.AsQueryable().Select(x => x.A.AsQueryable().Concat(x.B.AsQueryable())) : @@ -67,23 +73,22 @@ public void Queryable_Concat_should_work( results[3].Should().Equal(1, 2); } - private IMongoCollection<C> CreateCollection() - { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 0, A = new int[0], B = new int[0] }, - new C { Id = 1, A = new int[0], B = new int[] { 1 } }, - new C { Id = 2, A = new int[] { 1 }, B = new int[0] }, - new C { Id = 3, A = new int[] { 1 }, B = new int[] { 2 } }); - return collection; - } - - private class C + public class C { public int Id { get; set; } public int[] A { get; set; } public int[] B { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ + new C { Id = 0, A = new int[0], B = new int[0] }, + new C { Id = 1, A = new int[0], B = new int[] { 1 } }, + new C { Id = 2, A = new int[] { 1 }, B = new int[0] }, + new C { Id = 3, A = new int[] { 1 }, B = new int[] { 2 } } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ContainsMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ContainsMethodToAggregationExpressionTranslatorTests.cs index 9c9d0e01f63..f01a80013d6 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ContainsMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ContainsMethodToAggregationExpressionTranslatorTests.cs @@ -13,22 +13,28 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class ContainsMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class ContainsMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<ContainsMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public ContainsMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Theory] [ParameterAttributeData] public void Contains_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().Contains(2)) : @@ -41,22 +47,21 @@ public void Contains_should_work( results.Should().Equal(false, false, true, true); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 0, A = new int[0] }, - new C { Id = 1, A = new int[] { 1 } }, - new C { Id = 2, A = new int[] { 1, 2 } }, - new C { Id = 3, A = new int[] { 1, 2, 3 } }); - return collection; + public int Id { get; set; } + public int[] A { get; set; } } - private class C + public sealed class ClassFixture : MongoCollectionFixture<C> { - public int Id { get; set; } - public int[] A { get; set; } + protected override IEnumerable<C> InitialData => + [ + new C { Id = 0, A = new int[0] }, + new C { Id = 1, A = new int[] { 1 } }, + new C { Id = 2, A = new int[] { 1, 2 } }, + new C { Id = 3, A = new int[] { 1, 2, 3 } } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ConvertMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ConvertMethodToAggregationExpressionTranslatorTests.cs index 7759baf34dd..c2c00bf02ca 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ConvertMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ConvertMethodToAggregationExpressionTranslatorTests.cs @@ -17,7 +17,6 @@ using System.Collections.Generic; using System.Linq; using System.Linq.Expressions; -using FluentAssertions; using MongoDB.Bson; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/CountMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/CountMethodToAggregationExpressionTranslatorTests.cs index 2db2b9c7207..42533925a3e 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/CountMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/CountMethodToAggregationExpressionTranslatorTests.cs @@ -13,22 +13,28 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class CountMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class CountMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<CountMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public CountMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Theory] [ParameterAttributeData] public void Count_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().Count()) : @@ -46,7 +52,7 @@ public void Count_should_work( public void Count_with_predicate_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().Count(x => x > 2)) : @@ -64,7 +70,7 @@ public void Count_with_predicate_should_work( public void LongCount_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().LongCount()) : @@ -82,7 +88,7 @@ public void LongCount_should_work( public void LongCount_with_predicate_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().LongCount(x => x > 2)) : @@ -95,22 +101,21 @@ public void LongCount_with_predicate_should_work( results.Should().Equal(0L, 0L, 0L, 1L); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 0, A = new int[0] }, - new C { Id = 1, A = new int[] { 1 } }, - new C { Id = 2, A = new int[] { 1, 2 } }, - new C { Id = 3, A = new int[] { 1, 2, 3 } }); - return collection; + public int Id { get; set; } + public int[] A { get; set; } } - private class C + public sealed class ClassFixture : MongoCollectionFixture<C> { - public int Id { get; set; } - public int[] A { get; set; } + protected override IEnumerable<C> InitialData => + [ + new C { Id = 0, A = new int[0] }, + new C { Id = 1, A = new int[] { 1 } }, + new C { Id = 2, A = new int[] { 1, 2 } }, + new C { Id = 3, A = new int[] { 1, 2, 3 } } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/DateFromStringMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/DateFromStringMethodToAggregationExpressionTranslatorTests.cs index 69dae706333..12fb415064a 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/DateFromStringMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/DateFromStringMethodToAggregationExpressionTranslatorTests.cs @@ -21,13 +21,18 @@ using MongoDB.Bson; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class DateFromStringMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class DateFromStringMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<DateFromStringMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public DateFromStringMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Theory] [InlineData(1, "2023-12-26T12:34:56Z")] [InlineData(2, "throws:FormatException")] @@ -35,7 +40,7 @@ public class DateFromStringMethodToAggregationExpressionTranslatorTests : Linq3I [InlineData(4, "throws:MongoCommandException")] public void DateTime_Parse_should_work(int id, string expectedResult) { - var collection = GetCollection(); + var collection = Fixture.Collection; // technically this Parse method is not an Mql method but this test is to confirm that Parse and DateFromString behave the same var queryable = collection.AsQueryable() @@ -59,7 +64,7 @@ public void DateTime_Parse_should_work(int id, string expectedResult) [InlineData(4, "throws:MongoCommandException")] public void MongoDBFunctions_DateFromString_should_work(int id, string expectedResult) { - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Where(x => x.Id == id) @@ -83,7 +88,7 @@ public void MongoDBFunctions_DateFromString_should_work(int id, string expectedR public void MongoDBFunctions_DateFromString_with_format_should_work(int id, string expectedResult) { RequireServer.Check().Supports(Feature.DateFromStringFormatArgument); - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Where(x => x.Id == id) @@ -107,7 +112,7 @@ public void MongoDBFunctions_DateFromString_with_format_should_work(int id, stri public void MongoDBFunctions_DateFromString_with_format_and_timezone_should_work(int id, string expectedResult) { RequireServer.Check().Supports(Feature.DateFromStringFormatArgument); - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Where(x => x.Id == id) @@ -134,13 +139,13 @@ public void MongoDBFunctions_DateFromString_with_format_and_timezone_should_work public void MongoDBFunctions_DateFromString_with_format_and_timezone_and_onError_and_onNull_should_work(int id, string expectedResult) { RequireServer.Check().Supports(Feature.DateFromStringFormatArgument); - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Where(x => x.Id == id) .Select(x => Mql.DateFromString(x.S, x.F, x.TZ, x.OnError, x.OnNull)); - var expectedStages = + var expectedStages = new[] { $"{{ $match : {{ _id : {id} }} }}", @@ -206,22 +211,7 @@ private void AssertNullableDateTimeResult(DateTime? result, string expectedResul result.Should().Be(expectedResult == "default" ? (DateTime?)default : DateTime.Parse(expectedResult, null, DateTimeStyles.AdjustToUniversal)); } - private IMongoCollection<C> GetCollection() - { - var collection = GetCollection<C>("test"); - CreateCollection( - collection.Database.GetCollection<BsonDocument>("test"), - BsonDocument.Parse("{ _id : 1, S : '2023-12-26T12:34:56', F : '%Y-%m-%dT%H:%M:%S', TZ : 'UTC', OnError : { $date : '0001-01-01T00:00:00' }, OnNull : { $date : '0001-01-01T00:00:00' } }"), - BsonDocument.Parse("{ _id : 2, F : '%Y-%m-%dT%H:%M:%S', TZ : 'UTC', OnError : { $date : '0001-01-01T00:00:00' }, OnNull : { $date : '0001-01-01T00:00:00' } }"), - BsonDocument.Parse("{ _id : 3, S : null, F : '%Y-%m-%dT%H:%M:%S', TZ : 'UTC', OnError : { $date : '0001-01-01T00:00:00' }, OnNull : { $date : '0001-01-01T00:00:00' } }"), - BsonDocument.Parse("{ _id : 4, S : 'error', F : '%Y-%m-%dT%H:%M:%S', TZ : 'UTC', OnError : { $date : '1111-11-11T11:11:11' }, OnNull : { $date : '0001-01-01T00:00:00' } }"), - BsonDocument.Parse("{ _id : 5, F : '%Y-%m-%dT%H:%M:%S', TZ : 'UTC', OnError : { $date : '0001-01-01T00:00:00' }, OnNull : null }"), - BsonDocument.Parse("{ _id : 6, S : null, F : '%Y-%m-%dT%H:%M:%S', TZ : 'UTC', OnError : { $date : '0001-01-01T00:00:00' }, OnNull : null }"), - BsonDocument.Parse("{ _id : 7, S : 'error', F : '%Y-%m-%dT%H:%M:%S', TZ : 'UTC', OnError : { $date : '1111-11-11T11:11:11' }, OnNull : null }")); - return collection; - } - - private class C + public class C { public int Id { get; set; } public string S { get; set; } @@ -230,5 +220,19 @@ private class C public DateTime? OnError { get; set; } public DateTime? OnNull { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<C, BsonDocument> + { + protected override IEnumerable<BsonDocument> InitialData => + [ + BsonDocument.Parse("{ _id : 1, S : '2023-12-26T12:34:56', F : '%Y-%m-%dT%H:%M:%S', TZ : 'UTC', OnError : { $date : '0001-01-01T00:00:00' }, OnNull : { $date : '0001-01-01T00:00:00' } }"), + BsonDocument.Parse("{ _id : 2, F : '%Y-%m-%dT%H:%M:%S', TZ : 'UTC', OnError : { $date : '0001-01-01T00:00:00' }, OnNull : { $date : '0001-01-01T00:00:00' } }"), + BsonDocument.Parse("{ _id : 3, S : null, F : '%Y-%m-%dT%H:%M:%S', TZ : 'UTC', OnError : { $date : '0001-01-01T00:00:00' }, OnNull : { $date : '0001-01-01T00:00:00' } }"), + BsonDocument.Parse("{ _id : 4, S : 'error', F : '%Y-%m-%dT%H:%M:%S', TZ : 'UTC', OnError : { $date : '1111-11-11T11:11:11' }, OnNull : { $date : '0001-01-01T00:00:00' } }"), + BsonDocument.Parse("{ _id : 5, F : '%Y-%m-%dT%H:%M:%S', TZ : 'UTC', OnError : { $date : '0001-01-01T00:00:00' }, OnNull : null }"), + BsonDocument.Parse("{ _id : 6, S : null, F : '%Y-%m-%dT%H:%M:%S', TZ : 'UTC', OnError : { $date : '0001-01-01T00:00:00' }, OnNull : null }"), + BsonDocument.Parse("{ _id : 7, S : 'error', F : '%Y-%m-%dT%H:%M:%S', TZ : 'UTC', OnError : { $date : '1111-11-11T11:11:11' }, OnNull : null }") + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/DefaultIfEmptyMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/DefaultIfEmptyMethodToAggregationExpressionTranslatorTests.cs index f4da76cee91..6887c8dd123 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/DefaultIfEmptyMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/DefaultIfEmptyMethodToAggregationExpressionTranslatorTests.cs @@ -13,19 +13,25 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class DefaultIfEmptyMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class DefaultIfEmptyMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<DefaultIfEmptyMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public DefaultIfEmptyMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Enumerable_DefaultIfEmpty_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(x => x.A.DefaultIfEmpty()); @@ -44,7 +50,7 @@ public void Enumerable_DefaultIfEmpty_should_work() [Fact] public void Queryable_DefaultIfEmpty_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(x => x.A.AsQueryable().DefaultIfEmpty()); @@ -59,22 +65,21 @@ public void Queryable_DefaultIfEmpty_should_work() results[2].Should().Equal(1, 2); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 0, A = new int[0] }, - new C { Id = 1, A = new int[] { 1 } }, - new C { Id = 2, A = new int[] { 1, 2 } }, - new C { Id = 3, A = new int[] { 1, 2, 3 } }); - return collection; + public int Id { get; set; } + public int[] A { get; set; } } - private class C + public sealed class ClassFixture : MongoCollectionFixture<C> { - public int Id { get; set; } - public int[] A { get; set; } + protected override IEnumerable<C> InitialData => + [ + new C { Id = 0, A = new int[0] }, + new C { Id = 1, A = new int[] { 1 } }, + new C { Id = 2, A = new int[] { 1, 2 } }, + new C { Id = 3, A = new int[] { 1, 2, 3 } } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/DistinctMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/DistinctMethodToAggregationExpressionTranslatorTests.cs index 5395c849fec..7f7b5fe5e1c 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/DistinctMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/DistinctMethodToAggregationExpressionTranslatorTests.cs @@ -13,19 +13,25 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class DistinctMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class DistinctMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<DistinctMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public DistinctMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Enumerable_Distinct_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.Distinct()); @@ -43,7 +49,7 @@ public void Enumerable_Distinct_should_work() [Fact] public void Queryable_Distinct_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.AsQueryable().Distinct()); @@ -58,22 +64,21 @@ public void Queryable_Distinct_should_work() results[3].Should().BeEquivalentTo(1, 2); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 0, A = new int[0] }, - new C { Id = 1, A = new int[] { 1 } }, - new C { Id = 2, A = new int[] { 1, 2 } }, - new C { Id = 3, A = new int[] { 1, 2, 2 } }); - return collection; + public int Id { get; set; } + public int[] A { get; set; } } - private class C + public sealed class ClassFixture : MongoCollectionFixture<C> { - public int Id { get; set; } - public int[] A { get; set; } + protected override IEnumerable<C> InitialData => + [ + new C { Id = 0, A = new int[0] }, + new C { Id = 1, A = new int[] { 1 } }, + new C { Id = 2, A = new int[] { 1, 2 } }, + new C { Id = 3, A = new int[] { 1, 2, 2 } } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ElementAtMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ElementAtMethodToAggregationExpressionTranslatorTests.cs index 3640db5aabc..bbfad01e5a1 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ElementAtMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ElementAtMethodToAggregationExpressionTranslatorTests.cs @@ -13,22 +13,28 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class ElementAtMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class ElementAtMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<ElementAtMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public ElementAtMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Theory] [ParameterAttributeData] public void ElementAt_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().ElementAt(1)) : @@ -46,7 +52,7 @@ public void ElementAt_should_work( public void ElementAtOrDefault_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().ElementAtOrDefault(1)) : @@ -59,22 +65,21 @@ public void ElementAtOrDefault_should_work( results.Should().Equal(0, 0, 2, 2); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 0, A = new int[0] }, - new C { Id = 1, A = new int[] { 1 } }, - new C { Id = 2, A = new int[] { 1, 2 } }, - new C { Id = 3, A = new int[] { 1, 2, 2 } }); - return collection; + public int Id { get; set; } + public int[] A { get; set; } } - private class C + public sealed class ClassFixture : MongoCollectionFixture<C> { - public int Id { get; set; } - public int[] A { get; set; } + protected override IEnumerable<C> InitialData => + [ + new C { Id = 0, A = new int[0] }, + new C { Id = 1, A = new int[] { 1 } }, + new C { Id = 2, A = new int[] { 1, 2 } }, + new C { Id = 3, A = new int[] { 1, 2, 3 } } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ExceptMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ExceptMethodToAggregationExpressionTranslatorTests.cs index a7dce5dff24..47bb3f795b2 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ExceptMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ExceptMethodToAggregationExpressionTranslatorTests.cs @@ -13,22 +13,28 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class ExceptMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class ExceptMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<ExceptMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public ExceptMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Theory] [ParameterAttributeData] public void Enumerable_Except_should_work( [Values(false, true)] bool withNestedAsQueryableSource2) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryableSource2 ? collection.AsQueryable().Select(x => x.A.Except(x.B.AsQueryable())) : @@ -50,7 +56,7 @@ public void Enumerable_Except_should_work( public void Queryable_Except_should_work( [Values(false, true)] bool withNestedAsQueryableSource2) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryableSource2 ? collection.AsQueryable().Select(x => x.A.AsQueryable().Except(x.B.AsQueryable())) : @@ -67,23 +73,22 @@ public void Queryable_Except_should_work( results[3].Should().Equal(2); } - private IMongoCollection<C> CreateCollection() - { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 0, A = new int[0], B = new int[0] }, - new C { Id = 1, A = new int[0], B = new int[] { 1 } }, - new C { Id = 2, A = new int[] { 1, 2 }, B = new int[0] }, - new C { Id = 3, A = new int[] { 1, 2 }, B = new int[] { 1 } }); - return collection; - } - - private class C + public class C { public int Id { get; set; } public int[] A { get; set; } public int[] B { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ + new C { Id = 0, A = new int[0], B = new int[0] }, + new C { Id = 1, A = new int[0], B = new int[] { 1 } }, + new C { Id = 2, A = new int[] { 1, 2 }, B = new int[0] }, + new C { Id = 3, A = new int[] { 1, 2 }, B = new int[] { 1 } } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/FirstOrLastMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/FirstOrLastMethodToAggregationExpressionTranslatorTests.cs index cbccfd68170..b131aeadb99 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/FirstOrLastMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/FirstOrLastMethodToAggregationExpressionTranslatorTests.cs @@ -13,22 +13,28 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class FirstOrLastMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class FirstOrLastMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<FirstOrLastMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public FirstOrLastMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Theory] [ParameterAttributeData] public void First_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().First()) : @@ -46,7 +52,7 @@ public void First_should_work( public void First_with_predicate_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().First(x => x > 1)) : @@ -64,7 +70,7 @@ public void First_with_predicate_should_work( public void FirstOrDefault_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().FirstOrDefault()) : @@ -82,7 +88,7 @@ public void FirstOrDefault_should_work( public void FirstOrDefault_with_predicate_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().FirstOrDefault(x => x > 1)) : @@ -100,7 +106,7 @@ public void FirstOrDefault_with_predicate_should_work( public void Last_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().Last()) : @@ -118,7 +124,7 @@ public void Last_should_work( public void Last_with_predicate_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().Last(x => x > 1)) : @@ -136,7 +142,7 @@ public void Last_with_predicate_should_work( public void LastOrDefault_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().LastOrDefault()) : @@ -154,7 +160,7 @@ public void LastOrDefault_should_work( public void LastOrDefault_with_predicate_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.A.AsQueryable().LastOrDefault(x => x > 1)) : @@ -167,22 +173,21 @@ public void LastOrDefault_with_predicate_should_work( results.Should().Equal(0, 0, 2, 3); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 0, A = new int[0] }, - new C { Id = 1, A = new int[] { 1 } }, - new C { Id = 2, A = new int[] { 1, 2 } }, - new C { Id = 3, A = new int[] { 1, 2, 3 } }); - return collection; + public int Id { get; set; } + public int[] A { get; set; } } - private class C + public sealed class ClassFixture : MongoCollectionFixture<C> { - public int Id { get; set; } - public int[] A { get; set; } + protected override IEnumerable<C> InitialData => + [ + new C { Id = 0, A = new int[0] }, + new C { Id = 1, A = new int[] { 1 } }, + new C { Id = 2, A = new int[] { 1, 2 } }, + new C { Id = 3, A = new int[] { 1, 2, 3 } } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/IntersectMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/IntersectMethodToAggregationExpressionTranslatorTests.cs index 49f48d4c53d..ab2be78888d 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/IntersectMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/IntersectMethodToAggregationExpressionTranslatorTests.cs @@ -13,22 +13,28 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class IntersectMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class IntersectMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<IntersectMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public IntersectMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Theory] [ParameterAttributeData] public void Enumerable_Intersect_should_work( [Values(false, true)] bool withNestedAsQueryableSource2) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryableSource2 ? collection.AsQueryable().Select(x => x.A.Intersect(x.B.AsQueryable())) : @@ -50,7 +56,7 @@ public void Enumerable_Intersect_should_work( public void Queryable_Intersect_should_work( [Values(false, true)] bool withNestedAsQueryableSource2) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryableSource2 ? collection.AsQueryable().Select(x => x.A.AsQueryable().Intersect(x.B.AsQueryable())) : @@ -67,23 +73,22 @@ public void Queryable_Intersect_should_work( results[3].Should().Equal(1); } - private IMongoCollection<C> CreateCollection() - { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 0, A = new int[0], B = new int[0] }, - new C { Id = 1, A = new int[0], B = new int[] { 1 } }, - new C { Id = 2, A = new int[] { 1 }, B = new int[] { 1 } }, - new C { Id = 3, A = new int[] { 1 }, B = new int[] { 1, 2 } }); - return collection; - } - - private class C + public class C { public int Id { get; set; } public int[] A { get; set; } public int[] B { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ + new C { Id = 0, A = new int[0], B = new int[0] }, + new C { Id = 1, A = new int[0], B = new int[] { 1 } }, + new C { Id = 2, A = new int[] { 1 }, B = new int[] { 1 } }, + new C { Id = 3, A = new int[] { 1 }, B = new int[] { 1, 2 } } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/IsMatchMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/IsMatchMethodToAggregationExpressionTranslatorTests.cs index 05c0ad52535..786a8989e37 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/IsMatchMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/IsMatchMethodToAggregationExpressionTranslatorTests.cs @@ -13,24 +13,28 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using System.Text.RegularExpressions; using FluentAssertions; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class IsMatchMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class IsMatchMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<IsMatchMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public IsMatchMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture, server => server.Supports(Feature.RegexMatch)) + { + } + [Fact] public void Should_translate_instance_regex_isMatch() { - RequireServer.Check().Supports(Feature.RegexMatch); - - var collection = CreateCollection(); + var collection = Fixture.Collection; var regex = new Regex(@"\dB.*0"); var queryable = collection.AsQueryable() .Where(i => regex.IsMatch(i.A + i.B)); @@ -45,9 +49,9 @@ public void Should_translate_instance_regex_isMatch() [Fact] public void Should_translate_static_regex_isMatch() { - RequireServer.Check().Supports(Feature.RegexMatch); + RequireServer.Check(); - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Where(i => Regex.IsMatch(i.A + i.B, @"\dB.*0")); @@ -61,9 +65,7 @@ public void Should_translate_static_regex_isMatch() [Fact] public void Should_translate_static_regex_isMatch_with_options() { - RequireServer.Check().Supports(Feature.RegexMatch); - - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Where(i => Regex.IsMatch(i.A + i.B, @"\dB.*0", RegexOptions.IgnoreCase)); @@ -74,21 +76,20 @@ public void Should_translate_static_regex_isMatch_with_options() result.Id.Should().Be(2); } - private IMongoCollection<Data> CreateCollection() - { - var collection = GetCollection<Data>("test"); - CreateCollection( - collection, - new Data { Id = 1, A = "ABC", B = "1" }, - new Data { Id = 2, A = "1Br", B = "0" }); - return collection; - } - - private class Data + public class Data { public int Id { get; set; } public string A { get; set; } public string B { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<Data> + { + protected override IEnumerable<Data> InitialData => + [ + new Data { Id = 1, A = "ABC", B = "1" }, + new Data { Id = 2, A = "1Br", B = "0" } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/IsMissingMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/IsMissingMethodToAggregationExpressionTranslatorTests.cs index 017f64f66e4..dc7df901269 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/IsMissingMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/IsMissingMethodToAggregationExpressionTranslatorTests.cs @@ -13,20 +13,26 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Bson; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class IsMissingMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class IsMissingMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<IsMissingMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public IsMissingMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Select_Exists_should_work() { - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(x => Mql.Exists(x.S)); @@ -41,7 +47,7 @@ public void Select_Exists_should_work() [Fact] public void Select_IsMissing_should_work() { - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(x => Mql.IsMissing(x.S)); @@ -56,7 +62,7 @@ public void Select_IsMissing_should_work() [Fact] public void Select_IsNullOrMissing_should_work() { - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(x => Mql.IsNullOrMissing(x.S)); @@ -68,21 +74,20 @@ public void Select_IsNullOrMissing_should_work() results.Should().Equal(true, true, false); } - private IMongoCollection<C> GetCollection() + public class C { - var collection = GetCollection<C>("test"); - CreateCollection( - GetCollection<BsonDocument>("test"), - BsonDocument.Parse("{ _id : 1 }"), - BsonDocument.Parse("{ _id : 2, S : null }"), - BsonDocument.Parse("{ _id : 3, S : 'abc' }")); - return collection; + public int Id { get; set; } + public string S { get; set; } } - private class C + public sealed class ClassFixture : MongoCollectionFixture<C, BsonDocument> { - public int Id { get; set; } - public string S { get; set; } + protected override IEnumerable<BsonDocument> InitialData => + [ + BsonDocument.Parse("{ _id : 1 }"), + BsonDocument.Parse("{ _id : 2, S : null }"), + BsonDocument.Parse("{ _id : 3, S : 'abc' }") + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/IsNullOrWhiteSpaceMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/IsNullOrWhiteSpaceMethodToAggregationExpressionTranslatorTests.cs index 2c5bc4d07e0..fb453ed5dbb 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/IsNullOrWhiteSpaceMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/IsNullOrWhiteSpaceMethodToAggregationExpressionTranslatorTests.cs @@ -13,22 +13,28 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class IsNullOrWhiteSpaceMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class IsNullOrWhiteSpaceMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<IsNullOrWhiteSpaceMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public IsNullOrWhiteSpaceMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture, server => server.Supports(Feature.TrimOperator)) + { + } + [Fact] public void Project_IsNullOrWhiteSpace_using_anonymous_class_should_return_expected_results() { - RequireServer.Check().Supports(Feature.FindProjectionExpressions, Feature.TrimOperator); - var collection = CreateCollection(); + RequireServer.Check().Supports(Feature.FindProjectionExpressions); + var collection = Fixture.Collection; var find = collection.Find("{}") .Project(x => new { R = string.IsNullOrWhiteSpace(x.S) }) @@ -44,8 +50,8 @@ public void Project_IsNullOrWhiteSpace_using_anonymous_class_should_return_expec [Fact] public void Project_IsNullOrWhiteSpace_using_named_class_should_return_expected_results() { - RequireServer.Check().Supports(Feature.FindProjectionExpressions, Feature.TrimOperator); - var collection = CreateCollection(); + RequireServer.Check().Supports(Feature.FindProjectionExpressions); + var collection = Fixture.Collection; var find = collection.Find("{}") .Project(x => new Result { R = string.IsNullOrWhiteSpace(x.S) }) @@ -61,8 +67,7 @@ public void Project_IsNullOrWhiteSpace_using_named_class_should_return_expected_ [Fact] public void Select_IsNullOrWhiteSpace_using_scalar_result_should_return_expected_results() { - RequireServer.Check().Supports(Feature.TrimOperator); - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .OrderBy(x => x.Id) @@ -81,8 +86,7 @@ public void Select_IsNullOrWhiteSpace_using_scalar_result_should_return_expected [Fact] public void Select_IsNullOrWhiteSpace_using_anonymous_class_should_return_expected_results() { - RequireServer.Check().Supports(Feature.TrimOperator); - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .OrderBy(x => x.Id) @@ -101,8 +105,7 @@ public void Select_IsNullOrWhiteSpace_using_anonymous_class_should_return_expect [Fact] public void Select_IsNullOrWhiteSpace_using_named_class_should_return_expected_results() { - RequireServer.Check().Supports(Feature.TrimOperator); - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .OrderBy(x => x.Id) @@ -118,19 +121,6 @@ public void Select_IsNullOrWhiteSpace_using_named_class_should_return_expected_r results.Select(x => x.R).Should().Equal(true, true, true, true, false); } - private IMongoCollection<C> CreateCollection() - { - var collection = GetCollection<C>(); - CreateCollection( - collection, - new C { Id = 1, S = null }, - new C { Id = 2, S = "" }, - new C { Id = 3, S = " " }, - new C { Id = 4, S = " \t\r\n" }, - new C { Id = 5, S = "abc" }); - return collection; - } - public class C { public int Id { get; set; } @@ -141,5 +131,17 @@ public class Result { public bool R { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ + new C { Id = 1, S = null }, + new C { Id = 2, S = "" }, + new C { Id = 3, S = " " }, + new C { Id = 4, S = " \t\r\n" }, + new C { Id = 5, S = "abc" } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/MaxOrMinMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/MaxOrMinMethodToAggregationExpressionTranslatorTests.cs index 333d6c1687e..32c28979acb 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/MaxOrMinMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/MaxOrMinMethodToAggregationExpressionTranslatorTests.cs @@ -13,24 +13,30 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.Serialization.Attributes; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class MaxOrMinMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class MaxOrMinMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<MaxOrMinMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public MaxOrMinMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Theory] [ParameterAttributeData] public void Max_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Documents.AsQueryable().Max()) : @@ -48,7 +54,7 @@ public void Max_should_work( public void Max_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Documents.AsQueryable().Max(x => x["X"])) : @@ -66,7 +72,7 @@ public void Max_with_selector_should_work( public void Max_of_decimals_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Decimals.AsQueryable().Max()) : @@ -84,7 +90,7 @@ public void Max_of_decimals_should_work( public void Max_of_decimals_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Decimals.AsQueryable().Max(x => x * 2.0M)) : @@ -102,7 +108,7 @@ public void Max_of_decimals_with_selector_should_work( public void Max_of_doubles_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Doubles.AsQueryable().Max()) : @@ -120,7 +126,7 @@ public void Max_of_doubles_should_work( public void Max_of_doubles_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Doubles.AsQueryable().Max(x => x * 2.0)) : @@ -138,7 +144,7 @@ public void Max_of_doubles_with_selector_should_work( public void Max_of_floats_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Floats.AsQueryable().Max()) : @@ -156,7 +162,7 @@ public void Max_of_floats_should_work( public void Max_of_floats_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Floats.AsQueryable().Max(x => x * 2.0F)) : @@ -174,7 +180,7 @@ public void Max_of_floats_with_selector_should_work( public void Max_of_ints_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Ints.AsQueryable().Max()) : @@ -192,7 +198,7 @@ public void Max_of_ints_should_work( public void Max_of_ints_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Ints.AsQueryable().Max(x => x * 2)) : @@ -210,7 +216,7 @@ public void Max_of_ints_with_selector_should_work( public void Max_of_longs_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Longs.AsQueryable().Max()) : @@ -228,7 +234,7 @@ public void Max_of_longs_should_work( public void Max_of_longs_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Longs.AsQueryable().Max(x => x * 2L)) : @@ -246,7 +252,7 @@ public void Max_of_longs_with_selector_should_work( public void Max_of_nullable_decimals_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableDecimals.AsQueryable().Max()) : @@ -264,7 +270,7 @@ public void Max_of_nullable_decimals_should_work( public void Max_of_nullable_decimals_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableDecimals.AsQueryable().Max(x => x * 2.0M)) : @@ -282,7 +288,7 @@ public void Max_of_nullable_decimals_with_selector_should_work( public void Max_of_nullable_doubles_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableDoubles.AsQueryable().Max()) : @@ -300,7 +306,7 @@ public void Max_of_nullable_doubles_should_work( public void Max_of_nullable_doubles_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableDoubles.AsQueryable().Max(x => x * 2.0)) : @@ -318,7 +324,7 @@ public void Max_of_nullable_doubles_with_selector_should_work( public void Max_of_nullable_floats_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableFloats.AsQueryable().Max()) : @@ -336,7 +342,7 @@ public void Max_of_nullable_floats_should_work( public void Max_of_nullable_floats_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableFloats.AsQueryable().Max(x => x * 2.0F)) : @@ -354,7 +360,7 @@ public void Max_of_nullable_floats_with_selector_should_work( public void Max_of_nullable_ints_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableInts.AsQueryable().Max()) : @@ -372,7 +378,7 @@ public void Max_of_nullable_ints_should_work( public void Max_of_nullable_ints_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableInts.AsQueryable().Max(x => x * 2)) : @@ -390,7 +396,7 @@ public void Max_of_nullable_ints_with_selector_should_work( public void Max_of_nullable_longs_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableLongs.AsQueryable().Max()) : @@ -408,7 +414,7 @@ public void Max_of_nullable_longs_should_work( public void Max_of_nullable_longs_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableLongs.AsQueryable().Max(x => x * 2L)) : @@ -426,7 +432,7 @@ public void Max_of_nullable_longs_with_selector_should_work( public void Min_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Documents.AsQueryable().Min()) : @@ -444,7 +450,7 @@ public void Min_should_work( public void Min_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Documents.AsQueryable().Min(x => x["X"])) : @@ -462,7 +468,7 @@ public void Min_with_selector_should_work( public void Min_of_decimals_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Decimals.AsQueryable().Min()) : @@ -480,7 +486,7 @@ public void Min_of_decimals_should_work( public void Min_of_decimals_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Decimals.AsQueryable().Min(x => x * 2.0M)) : @@ -498,7 +504,7 @@ public void Min_of_decimals_with_selector_should_work( public void Min_of_doubles_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Doubles.AsQueryable().Min()) : @@ -516,7 +522,7 @@ public void Min_of_doubles_should_work( public void Min_of_doubles_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Doubles.AsQueryable().Min(x => x * 2.0)) : @@ -534,7 +540,7 @@ public void Min_of_doubles_with_selector_should_work( public void Min_of_floats_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Floats.AsQueryable().Min()) : @@ -552,7 +558,7 @@ public void Min_of_floats_should_work( public void Min_of_floats_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Floats.AsQueryable().Min(x => x * 2.0F)) : @@ -570,7 +576,7 @@ public void Min_of_floats_with_selector_should_work( public void Min_of_ints_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Ints.AsQueryable().Min()) : @@ -588,7 +594,7 @@ public void Min_of_ints_should_work( public void Min_of_ints_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Ints.AsQueryable().Min(x => x * 2)) : @@ -605,7 +611,7 @@ public void Min_of_ints_with_selector_should_work( public void Min_of_longs_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Longs.AsQueryable().Min()) : @@ -623,7 +629,7 @@ public void Min_of_longs_should_work( public void Min_of_longs_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Longs.AsQueryable().Min(x => x * 2L)) : @@ -641,7 +647,7 @@ public void Min_of_longs_with_selector_should_work( public void Min_of_nullable_decimals_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableDecimals.AsQueryable().Min()) : @@ -659,7 +665,7 @@ public void Min_of_nullable_decimals_should_work( public void Min_of_nullable_decimals_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableDecimals.AsQueryable().Min(x => x * 2.0M)) : @@ -677,7 +683,7 @@ public void Min_of_nullable_decimals_with_selector_should_work( public void Min_of_nullable_doubles_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableDoubles.AsQueryable().Min()) : @@ -695,7 +701,7 @@ public void Min_of_nullable_doubles_should_work( public void Min_of_nullable_doubles_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableDoubles.AsQueryable().Min(x => x * 2.0)) : @@ -713,7 +719,7 @@ public void Min_of_nullable_doubles_with_selector_should_work( public void Min_of_nullable_floats_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableFloats.AsQueryable().Min()) : @@ -731,7 +737,7 @@ public void Min_of_nullable_floats_should_work( public void Min_of_nullable_floats_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableFloats.AsQueryable().Min(x => x * 2.0F)) : @@ -749,7 +755,7 @@ public void Min_of_nullable_floats_with_selector_should_work( public void Min_of_nullable_ints_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableInts.AsQueryable().Min()) : @@ -766,7 +772,7 @@ public void Min_of_nullable_ints_should_work( public void Min_of_nullable_ints_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableInts.AsQueryable().Min(x => x * 2)) : @@ -784,7 +790,7 @@ public void Min_of_nullable_ints_with_selector_should_work( public void Min_of_nullable_longs_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableLongs.AsQueryable().Min()) : @@ -802,7 +808,7 @@ public void Min_of_nullable_longs_should_work( public void Min_of_nullable_longs_with_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableLongs.AsQueryable().Min(x => x * 2L)) : @@ -815,11 +821,26 @@ public void Min_of_nullable_longs_with_selector_should_work( results.Should().Equal(null, null, 6L); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, + public int Id { get; set; } + [BsonRepresentation(BsonType.Decimal128)] public decimal[] Decimals { get; set; } + public BsonDocument[] Documents { get; set; } + public double[] Doubles { get; set; } + public float[] Floats { get; set; } + public int[] Ints { get; set; } + public long[] Longs { get; set; } + [BsonRepresentation(BsonType.Decimal128)] public decimal?[] NullableDecimals { get; set; } + public double?[] NullableDoubles { get; set; } + public float?[] NullableFloats { get; set; } + public int?[] NullableInts { get; set; } + public long?[] NullableLongs { get; set; } + } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ new C { Id = 1, @@ -864,24 +885,8 @@ private IMongoCollection<C> CreateCollection() NullableFloats = new float?[] { null, 3.0F }, NullableInts = new int?[] { null, 3 }, NullableLongs = new long?[] { null, 3L } - }); - return collection; - } - - private class C - { - public int Id { get; set; } - [BsonRepresentation(BsonType.Decimal128)] public decimal[] Decimals { get; set; } - public BsonDocument[] Documents { get; set; } - public double[] Doubles { get; set; } - public float[] Floats { get; set; } - public int[] Ints { get; set; } - public long[] Longs { get; set; } - [BsonRepresentation(BsonType.Decimal128)] public decimal?[] NullableDecimals { get; set; } - public double?[] NullableDoubles { get; set; } - public float?[] NullableFloats { get; set; } - public int?[] NullableInts { get; set; } - public long?[] NullableLongs { get; set; } + } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/MedianMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/MedianMethodToAggregationExpressionTranslatorTests.cs new file mode 100644 index 00000000000..2013568bb50 --- /dev/null +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/MedianMethodToAggregationExpressionTranslatorTests.cs @@ -0,0 +1,460 @@ +/* Copyright 2010-present MongoDB Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +using System.Collections.Generic; +using System.Linq; +using FluentAssertions; +using MongoDB.Bson; +using MongoDB.Bson.Serialization.Attributes; +using MongoDB.Driver.Core.Misc; +using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; +using MongoDB.TestHelpers.XunitExtensions; +using Xunit; + +namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators +{ + public class MedianMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<MedianMethodToAggregationExpressionTranslatorTests.ClassFixture> + { + public MedianMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture, server => server.Supports(Feature.MedianOperator)) + { + } + + [Theory] + [ParameterAttributeData] + public void Median_with_decimals_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Decimals.AsQueryable().Median()) : + collection.AsQueryable().Select(x => x.Decimals.Median()); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : '$Decimals', method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(1.0M, 1.0M, 2.0M); + } + + [Theory] + [ParameterAttributeData] + public void Median_with_decimals_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Decimals.AsQueryable().Median(y => y * 2.0M)) : + collection.AsQueryable().Select(x => x.Decimals.Median(y => y * 2.0M)); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : { $map : { input : '$Decimals', as : 'y', in : { $multiply : ['$$y', NumberDecimal(2)] } } }, method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(2.0M, 2.0M, 4.0M); + } + + [Theory] + [ParameterAttributeData] + public void Median_with_doubles_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Doubles.AsQueryable().Median()) : + collection.AsQueryable().Select(x => x.Doubles.Median()); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : '$Doubles', method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(1.0, 1.0, 2.0); + } + + [Theory] + [ParameterAttributeData] + public void Median_with_doubles_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Doubles.AsQueryable().Median(y => y * 2.0)) : + collection.AsQueryable().Select(x => x.Doubles.Median(y => y * 2.0)); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : { $map : { input : '$Doubles', as : 'y', in : { $multiply : ['$$y', 2.0] } } }, method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(2.0, 2.0, 4.0); + } + + [Theory] + [ParameterAttributeData] + public void Median_with_floats_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Floats.AsQueryable().Median()) : + collection.AsQueryable().Select(x => x.Floats.Median()); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : '$Floats', method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(1.0F, 1.0F, 2.0F); + } + + [Theory] + [ParameterAttributeData] + public void Median_with_floats_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Floats.AsQueryable().Median(y => y * 2.0F)) : + collection.AsQueryable().Select(x => x.Floats.Median(y => y * 2.0F)); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : { $map : { input : '$Floats', as : 'y', in : { $multiply : ['$$y', 2.0] } } }, method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(2.0F, 2.0F, 4.0F); + } + + [Theory] + [ParameterAttributeData] + public void Median_with_ints_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Ints.AsQueryable().Median()) : + collection.AsQueryable().Select(x => x.Ints.Median()); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : '$Ints', method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(1, 1, 2); + } + + [Theory] + [ParameterAttributeData] + public void Median_with_ints_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Ints.AsQueryable().Median(y => y * 2)) : + collection.AsQueryable().Select(x => x.Ints.Median(y => y * 2)); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : { $map : { input : '$Ints', as : 'y', in : { $multiply : ['$$y', 2] } } }, method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(2, 2, 4); + } + + [Theory] + [ParameterAttributeData] + public void Median_with_longs_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Longs.AsQueryable().Median()) : + collection.AsQueryable().Select(x => x.Longs.Median()); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : '$Longs', method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(1, 1, 2); + } + + [Theory] + [ParameterAttributeData] + public void Median_with_longs_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Longs.AsQueryable().Median(y => y * 2L)) : + collection.AsQueryable().Select(x => x.Longs.Median(y => y * 2L)); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : { $map : { input : '$Longs', as : 'y', in : { $multiply : ['$$y', NumberLong(2)] } } }, method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(2, 2, 4); + } + + [Theory] + [ParameterAttributeData] + public void Median_with_nullable_decimals_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableDecimals.AsQueryable().Median()) : + collection.AsQueryable().Select(x => x.NullableDecimals.Median()); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : '$NullableDecimals', method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(null, null, 2.0M); + } + + [Theory] + [ParameterAttributeData] + public void Median_with_nullable_decimals_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableDecimals.AsQueryable().Median(y => y * 2.0M)) : + collection.AsQueryable().Select(x => x.NullableDecimals.Median(y => y * 2.0M)); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : { $map : { input : '$NullableDecimals', as : 'y', in : { $multiply : ['$$y', NumberDecimal(2)] } } }, method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(null, null, 4.0M); + } + + [Theory] + [ParameterAttributeData] + public void Median_with_nullable_doubles_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableDoubles.AsQueryable().Median()) : + collection.AsQueryable().Select(x => x.NullableDoubles.Median()); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : '$NullableDoubles', method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(null, null, 2.0); + } + + [Theory] + [ParameterAttributeData] + public void Median_with_nullable_doubles_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableDoubles.AsQueryable().Median(y => y * 2.0)) : + collection.AsQueryable().Select(x => x.NullableDoubles.Median(y => y * 2.0)); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : { $map : { input : '$NullableDoubles', as : 'y', in : { $multiply : ['$$y', 2.0] } } }, method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(null, null, 4.0); + } + + [Theory] + [ParameterAttributeData] + public void Median_with_nullable_floats_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableFloats.AsQueryable().Median()) : + collection.AsQueryable().Select(x => x.NullableFloats.Median()); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : '$NullableFloats', method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(null, null, 2.0F); + } + + [Theory] + [ParameterAttributeData] + public void Median_with_nullable_floats_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableFloats.AsQueryable().Median(y => y * 2.0F)) : + collection.AsQueryable().Select(x => x.NullableFloats.Median(y => y * 2.0F)); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : { $map : { input : '$NullableFloats', as : 'y', in : { $multiply : ['$$y', 2.0] } } }, method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(null, null, 4.0F); + } + + [Theory] + [ParameterAttributeData] + public void Median_with_nullable_ints_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableInts.AsQueryable().Median()) : + collection.AsQueryable().Select(x => x.NullableInts.Median()); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : '$NullableInts', method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(null, null, 2); + } + + [Theory] + [ParameterAttributeData] + public void Median_with_nullable_ints_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableInts.AsQueryable().Median(y => y * 2)) : + collection.AsQueryable().Select(x => x.NullableInts.Median(y => y * 2)); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : { $map : { input : '$NullableInts', as : 'y', in : { $multiply : ['$$y', 2] } } }, method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(null, null, 4); + } + + [Theory] + [ParameterAttributeData] + public void Median_with_nullable_longs_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableLongs.AsQueryable().Median()) : + collection.AsQueryable().Select(x => x.NullableLongs.Median()); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : '$NullableLongs', method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(null, null, 2); + } + + [Theory] + [ParameterAttributeData] + public void Median_with_nullable_longs_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableLongs.AsQueryable().Median(y => y * 2L)) : + collection.AsQueryable().Select(x => x.NullableLongs.Median(y => y * 2L)); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $median : { input : { $map : { input : '$NullableLongs', as : 'y', in : { $multiply : ['$$y', NumberLong(2)] } } }, method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results.Should().Equal(null, null, 4); + } + + public class C + { + public int Id { get; set; } + [BsonRepresentation(BsonType.Decimal128)] public decimal[] Decimals { get; set; } + public double[] Doubles { get; set; } + public float[] Floats { get; set; } + public int[] Ints { get; set; } + public long[] Longs { get; set; } + [BsonRepresentation(BsonType.Decimal128)] public decimal?[] NullableDecimals { get; set; } + public double?[] NullableDoubles { get; set; } + public float?[] NullableFloats { get; set; } + public int?[] NullableInts { get; set; } + public long?[] NullableLongs { get; set; } + } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ + new() + { + Id = 1, + Decimals = [1.0M], + Doubles = [1.0], + Floats = [1.0F], + Ints = [1], + Longs = [1L], + NullableDecimals = [], + NullableDoubles = [], + NullableFloats = [], + NullableInts = [], + NullableLongs = [] + }, + new() + { + Id = 2, + Decimals = [1.0M, 2.0M], + Doubles = [1.0, 2.0], + Floats = [1.0F, 2.0F], + Ints = [1, 2], + Longs = [1L, 2L], + NullableDecimals = [null], + NullableDoubles = [null], + NullableFloats = [null], + NullableInts = [null], + NullableLongs = [null] + }, + new() + { + Id = 3, + Decimals = [1.0M, 2.0M, 3.0M], + Doubles = [1.0, 2.0, 3.0], + Floats = [1.0F, 2.0F, 3.0F], + Ints = [1, 2, 3], + Longs = [1L, 2L, 3L], + NullableDecimals = [null, 1.0M, 2.0M, 3.0M], + NullableDoubles = [null, 1.0, 2.0, 3.0], + NullableFloats = [null, 1.0F, 2.0F, 3.0F], + NullableInts = [null, 1, 2, 3], + NullableLongs = [null, 1L, 2L, 3L] + } + ]; + } + } +} \ No newline at end of file diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/OrderByMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/OrderByMethodToAggregationExpressionTranslatorTests.cs index 5e07a2240ad..4cdfd5468ea 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/OrderByMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/OrderByMethodToAggregationExpressionTranslatorTests.cs @@ -13,22 +13,26 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.TestHelpers.XunitExtensions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class OrderByMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class OrderByMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<OrderByMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public OrderByMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture, server => server.Supports(Feature.SortArrayOperator)) + { + } + [Fact] public void Enumerable_OrderBy_should_work() { - RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.OrderBy(x => x.X)); @@ -42,8 +46,7 @@ public void Enumerable_OrderBy_should_work() [Fact] public void Enumerable_OrderByDescending_should_work() { - RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.OrderByDescending(x => x.X)); @@ -57,8 +60,7 @@ public void Enumerable_OrderByDescending_should_work() [Fact] public void Enumerable_ThenBy_should_work() { - RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.OrderByDescending(x => x.X).ThenBy(x => x.Y)); @@ -72,8 +74,7 @@ public void Enumerable_ThenBy_should_work() [Fact] public void Enumerable_ThenByDescending_should_work() { - RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.OrderBy(x => x.X).ThenByDescending(x => x.Y)); @@ -87,8 +88,7 @@ public void Enumerable_ThenByDescending_should_work() [Fact] public void Queryable_OrderBy_should_work() { - RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.AsQueryable().OrderBy(x => x.X)); @@ -102,8 +102,7 @@ public void Queryable_OrderBy_should_work() [Fact] public void Queryable_OrderByDescending_should_work() { - RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.AsQueryable().OrderByDescending(x => x.X)); @@ -117,8 +116,7 @@ public void Queryable_OrderByDescending_should_work() [Fact] public void Queryable_ThenBy_should_work() { - RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.AsQueryable().OrderByDescending(x => x.X).ThenBy(x => x.Y)); @@ -132,8 +130,7 @@ public void Queryable_ThenBy_should_work() [Fact] public void Queryable_ThenByDescending_should_work() { - RequireServer.Check().Supports(Feature.SortArrayOperator); - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.AsQueryable().OrderBy(x => x.X).ThenByDescending(x => x.Y)); @@ -144,16 +141,7 @@ public void Queryable_ThenByDescending_should_work() result.Select(x => x.Y).Should().Equal(2, 1, 4, 3); } - private IMongoCollection<C> CreateCollection() - { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 1, A = new A[] { new A(1, 1), new A(1, 2), new A(2, 3), new A(2, 4) } }); - return collection; - } - - private class C + public class C { public int Id { get; set; } public A[] A { get; set; } @@ -161,9 +149,17 @@ private class C public class A { - public A(int x, int y) { X = x; Y = y; } + public A(int x, int y) { X = x; Y = y; } public int X { get; set; } public int Y { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ + new C { Id = 1, A = new A[] { new A(1, 1), new A(1, 2), new A(2, 3), new A(2, 4) } } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/PercentileMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/PercentileMethodToAggregationExpressionTranslatorTests.cs new file mode 100644 index 00000000000..c0f1225c245 --- /dev/null +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/PercentileMethodToAggregationExpressionTranslatorTests.cs @@ -0,0 +1,541 @@ +/* Copyright 2010-present MongoDB Inc. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +using System.Collections.Generic; +using System.Linq; +using FluentAssertions; +using MongoDB.Bson; +using MongoDB.Bson.Serialization.Attributes; +using MongoDB.Driver.Core.Misc; +using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; +using MongoDB.TestHelpers.XunitExtensions; +using Xunit; + +namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators +{ + public class PercentileMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<PercentileMethodToAggregationExpressionTranslatorTests.ClassFixture> + { + public PercentileMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture, server => server.Supports(Feature.PercentileOperator)) + { + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_decimals_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Decimals.AsQueryable().Percentile(new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.Decimals.Percentile(new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : '$Decimals', p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal(1.0M); + results[1].Should().Equal(1.0M); + results[2].Should().Equal(2.0M); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_decimals_multiple_percentiles_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Decimals.AsQueryable().Percentile(new[] { 0.25, 0.75 })) : + collection.AsQueryable().Select(x => x.Decimals.Percentile(new[] { 0.25, 0.75 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : '$Decimals', p : [0.25, 0.75], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal(1.0M, 1.0M); + results[1].Should().Equal(1.0M, 2.0M); + results[2].Should().Equal(1.0M, 3.0M); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_decimals_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Decimals.AsQueryable().Percentile(y => y * 2.0M, new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.Decimals.Percentile(y => y * 2.0M, new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : { $map : { input : '$Decimals', as : 'y', in : { $multiply : ['$$y', NumberDecimal(2)] } } }, p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal(2.0M); + results[1].Should().Equal(2.0M); + results[2].Should().Equal(4.0M); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_doubles_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Doubles.AsQueryable().Percentile(new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.Doubles.Percentile(new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : '$Doubles', p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal(1.0); + results[1].Should().Equal(1.0); + results[2].Should().Equal(2.0); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_doubles_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Doubles.AsQueryable().Percentile(y => y * 2.0, new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.Doubles.Percentile(y => y * 2.0, new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : { $map : { input : '$Doubles', as : 'y', in : { $multiply : ['$$y', 2.0] } } }, p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal(2.0); + results[1].Should().Equal(2.0); + results[2].Should().Equal(4.0); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_floats_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Floats.AsQueryable().Percentile(new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.Floats.Percentile(new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : '$Floats', p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal(1.0F); + results[1].Should().Equal(1.0F); + results[2].Should().Equal(2.0F); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_floats_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Floats.AsQueryable().Percentile(y => y * 2.0F, new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.Floats.Percentile(y => y * 2.0F, new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : { $map : { input : '$Floats', as : 'y', in : { $multiply : ['$$y', 2.0] } } }, p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal(2.0F); + results[1].Should().Equal(2.0F); + results[2].Should().Equal(4.0F); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_ints_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Ints.AsQueryable().Percentile(new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.Ints.Percentile(new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : '$Ints', p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal(1.0); + results[1].Should().Equal(1.0); + results[2].Should().Equal(2.0); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_ints_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Ints.AsQueryable().Percentile(y => y * 2, new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.Ints.Percentile(y => y * 2, new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : { $map : { input : '$Ints', as : 'y', in : { $multiply : ['$$y', 2] } } }, p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal(2.0); + results[1].Should().Equal(2.0); + results[2].Should().Equal(4.0); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_longs_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Longs.AsQueryable().Percentile(new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.Longs.Percentile(new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : '$Longs', p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal(1.0); + results[1].Should().Equal(1.0); + results[2].Should().Equal(2.0); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_longs_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.Longs.AsQueryable().Percentile(y => y * 2L, new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.Longs.Percentile(y => y * 2L, new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : { $map : { input : '$Longs', as : 'y', in : { $multiply : ['$$y', NumberLong(2)] } } }, p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal(2.0); + results[1].Should().Equal(2.0); + results[2].Should().Equal(4.0); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_nullable_decimals_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableDecimals.AsQueryable().Percentile(new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.NullableDecimals.Percentile(new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : '$NullableDecimals', p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal((decimal?)null); + results[1].Should().Equal((decimal?)null); + results[2].Should().Equal(2.0M); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_nullable_decimals_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableDecimals.AsQueryable().Percentile(y => y * 2.0M, new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.NullableDecimals.Percentile(y => y * 2.0M, new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : { $map : { input : '$NullableDecimals', as : 'y', in : { $multiply : ['$$y', NumberDecimal(2)] } } }, p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal((decimal?)null); + results[1].Should().Equal((decimal?)null); + results[2].Should().Equal(4.0M); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_nullable_doubles_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableDoubles.AsQueryable().Percentile(new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.NullableDoubles.Percentile(new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : '$NullableDoubles', p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal((double?)null); + results[1].Should().Equal((double?)null); + results[2].Should().Equal(2.0); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_nullable_doubles_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableDoubles.AsQueryable().Percentile(y => y * 2.0, new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.NullableDoubles.Percentile(y => y * 2.0, new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : { $map : { input : '$NullableDoubles', as : 'y', in : { $multiply : ['$$y', 2.0] } } }, p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal((double?)null); + results[1].Should().Equal((double?)null); + results[2].Should().Equal(4.0); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_nullable_floats_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableFloats.AsQueryable().Percentile(new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.NullableFloats.Percentile(new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : '$NullableFloats', p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal((float?)null); + results[1].Should().Equal((float?)null); + results[2].Should().Equal(2.0F); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_nullable_floats_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableFloats.AsQueryable().Percentile(y => y * 2.0F, new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.NullableFloats.Percentile(y => y * 2.0F, new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : { $map : { input : '$NullableFloats', as : 'y', in : { $multiply : ['$$y', 2.0] } } }, p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal((float?)null); + results[1].Should().Equal((float?)null); + results[2].Should().Equal(4.0F); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_nullable_ints_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableInts.AsQueryable().Percentile(new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.NullableInts.Percentile(new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : '$NullableInts', p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal((double?)null); + results[1].Should().Equal((double?)null); + results[2].Should().Equal(2.0); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_nullable_ints_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableInts.AsQueryable().Percentile(y => y * 2, new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.NullableInts.Percentile(y => y * 2, new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : { $map : { input : '$NullableInts', as : 'y', in : { $multiply : ['$$y', 2] } } }, p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal((double?)null); + results[1].Should().Equal((double?)null); + results[2].Should().Equal(4.0); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_nullable_longs_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableLongs.AsQueryable().Percentile(new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.NullableLongs.Percentile(new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : '$NullableLongs', p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal((double?)null); + results[1].Should().Equal((double?)null); + results[2].Should().Equal(2.0); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_nullable_longs_selector_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + + var queryable = withNestedAsQueryable ? + collection.AsQueryable().Select(x => x.NullableLongs.AsQueryable().Percentile(y => y * 2L, new[] { 0.5 })) : + collection.AsQueryable().Select(x => x.NullableLongs.Percentile(y => y * 2L, new[] { 0.5 })); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : { $map : { input : '$NullableLongs', as : 'y', in : { $multiply : ['$$y', NumberLong(2)] } } }, p : [0.5], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal((double?)null); + results[1].Should().Equal((double?)null); + results[2].Should().Equal(4.0); + } + + [Theory] + [ParameterAttributeData] + public void Percentile_with_list_input_should_work( + [Values(false, true)] bool withNestedAsQueryable) + { + var collection = Fixture.Collection; + var percentiles = new List<double> { 0.25, 0.5, 0.75 }; + + var queryable = withNestedAsQueryable + ? collection.AsQueryable().Select(x => x.Doubles.AsQueryable().Percentile(percentiles)) + : collection.AsQueryable().Select(x => x.Doubles.Percentile(percentiles)); + + var stages = Translate(collection, queryable); + AssertStages(stages, "{ $project : { _v : { $percentile : { input : '$Doubles', p : [0.25, 0.5, 0.75], method : 'approximate' } }, _id : 0 } }"); + + var results = queryable.ToList(); + results[0].Should().Equal(1.0, 1.0, 1.0); + results[1].Should().Equal(1.0, 1.0, 2.0); + results[2].Should().Equal(1.0, 2.0, 3.0); + } + + public class C + { + public int Id { get; set; } + [BsonRepresentation(BsonType.Decimal128)] public decimal[] Decimals { get; set; } + public double[] Doubles { get; set; } + public float[] Floats { get; set; } + public int[] Ints { get; set; } + public long[] Longs { get; set; } + [BsonRepresentation(BsonType.Decimal128)] public decimal?[] NullableDecimals { get; set; } + public double?[] NullableDoubles { get; set; } + public float?[] NullableFloats { get; set; } + public int?[] NullableInts { get; set; } + public long?[] NullableLongs { get; set; } + } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ + new() + { + Id = 1, + Decimals = [1.0M], + Doubles = [1.0], + Floats = [1.0F], + Ints = [1], + Longs = [1L], + NullableDecimals = [], + NullableDoubles = [], + NullableFloats = [], + NullableInts = [], + NullableLongs = [] + }, + new() + { + Id = 2, + Decimals = [1.0M, 2.0M], + Doubles = [1.0, 2.0], + Floats = [1.0F, 2.0F], + Ints = [1, 2], + Longs = [1L, 2L], + NullableDecimals = [null], + NullableDoubles = [null], + NullableFloats = [null], + NullableInts = [null], + NullableLongs = [null] + }, + new() + { + Id = 3, + Decimals = [1.0M, 2.0M, 3.0M], + Doubles = [1.0, 2.0, 3.0], + Floats = [1.0F, 2.0F, 3.0F], + Ints = [1, 2, 3], + Longs = [1L, 2L, 3L], + NullableDecimals = [null, 1.0M, 2.0M, 3.0M], + NullableDoubles = [null, 1.0, 2.0, 3.0], + NullableFloats = [null, 1.0F, 2.0F, 3.0F], + NullableInts = [null, 1, 2, 3], + NullableLongs = [null, 1L, 2L, 3L] + } + ]; + } + } +} \ No newline at end of file diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/RangeMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/RangeMethodToAggregationExpressionTranslatorTests.cs index 15473964d1f..5e0bfcbff60 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/RangeMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/RangeMethodToAggregationExpressionTranslatorTests.cs @@ -13,19 +13,25 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class RangeMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class RangeMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<RangeMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public RangeMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Range_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => Enumerable.Range(x.Start, x.Count)); @@ -38,21 +44,20 @@ public void Range_should_work() results[1].Should().Equal(3, 4, 5, 6); } - private IMongoCollection<C> CreateCollection() - { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 1, Start = 1, Count = 2 }, - new C { Id = 2, Start = 3, Count = 4 }); - return collection; - } - - private class C + public class C { public int Id { get; set; } public int Start { get; set; } public int Count { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ + new C { Id = 1, Start = 1, Count = 2 }, + new C { Id = 2, Start = 3, Count = 4 } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ReverseMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ReverseMethodToAggregationExpressionTranslatorTests.cs index cf938d037b5..3a59dfee43a 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ReverseMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ReverseMethodToAggregationExpressionTranslatorTests.cs @@ -13,20 +13,25 @@ * limitations under the License. */ -using System; +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class ReverseMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class ReverseMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<ReverseMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public ReverseMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Enumerable_Reverse_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.Reverse()); @@ -40,7 +45,7 @@ public void Enumerable_Reverse_should_work() [Fact] public void Queryable_Reverse_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.AsQueryable().Reverse()); @@ -51,19 +56,18 @@ public void Queryable_Reverse_should_work() result.Should().Equal(3, 2, 1); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 1, A = new int[] { 1, 2, 3 } }); - return collection; + public int Id { get; set; } + public int[] A { get; set; } } - private class C + public sealed class ClassFixture : MongoCollectionFixture<C> { - public int Id { get; set; } - public int[] A { get; set; } + protected override IEnumerable<C> InitialData => + [ + new C { Id = 1, A = new int[] { 1, 2, 3 } } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/RoundMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/RoundMethodToAggregationExpressionTranslatorTests.cs index b39a8b61496..249c4d6dc88 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/RoundMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/RoundMethodToAggregationExpressionTranslatorTests.cs @@ -14,24 +14,28 @@ */ using System; +using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.Serialization.Attributes; using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.TestHelpers.XunitExtensions; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class RoundMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class RoundMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<RoundMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public RoundMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture, server => server.Supports(Feature.Round)) + { + } + [Fact] public void Math_round_double_should_work() { - RequireServer.Check().Supports(Feature.Round); - - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(i => Math.Round(i.Double)); @@ -47,9 +51,7 @@ public void Math_round_double_should_work() [Fact] public void Math_round_double_with_digits_should_work() { - RequireServer.Check().Supports(Feature.Round); - - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(i => Math.Round(i.Double, 1)); @@ -65,9 +67,7 @@ public void Math_round_double_with_digits_should_work() [Fact] public void Math_round_decimal_should_work() { - RequireServer.Check().Supports(Feature.Round); - - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(i => Math.Round(i.Decimal)); @@ -83,9 +83,7 @@ public void Math_round_decimal_should_work() [Fact] public void Math_round_decimal_with_decimals_should_work() { - RequireServer.Check().Supports(Feature.Round); - - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Select(i => Math.Round(i.Decimal, 1)); @@ -98,22 +96,21 @@ public void Math_round_decimal_with_decimals_should_work() results.Should().Equal(10.2m, 9.7m, 9.2m); } - private IMongoCollection<Data> CreateCollection() - { - var collection = GetCollection<Data>("test"); - CreateCollection( - collection, - new Data { Double = 10.234, Decimal = 10.234m }, - new Data { Double = 9.66, Decimal = 9.66m }, - new Data { Double = 9.2, Decimal = 9.2m }); - return collection; - } - - private class Data + public class Data { public double Double { get; set; } [BsonRepresentation(BsonType.Decimal128)] public decimal Decimal { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<Data> + { + protected override IEnumerable<Data> InitialData => + [ + new Data { Double = 10.234, Decimal = 10.234m }, + new Data { Double = 9.66, Decimal = 9.66m }, + new Data { Double = 9.2, Decimal = 9.2m } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/SelectMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/SelectMethodToAggregationExpressionTranslatorTests.cs index c7938d63e35..e57de82ac3a 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/SelectMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/SelectMethodToAggregationExpressionTranslatorTests.cs @@ -13,19 +13,25 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class SelectMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class SelectMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<SelectMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public SelectMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Enumerable_Select_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.Select(x => x + 1)); @@ -39,7 +45,7 @@ public void Enumerable_Select_should_work() [Fact] public void Queryable_Select_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.AsQueryable().Select(x => x + 1)); @@ -50,19 +56,18 @@ public void Queryable_Select_should_work() result.Should().Equal(2, 3, 4); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 1, A = new int[] { 1, 2, 3 } }); - return collection; + public int Id { get; set; } + public int[] A { get; set; } } - private class C + public sealed class ClassFixture : MongoCollectionFixture<C> { - public int Id { get; set; } - public int[] A { get; set; } + protected override IEnumerable<C> InitialData => + [ + new C { Id = 1, A = new int[] { 1, 2, 3 } } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/SkipOrTakeMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/SkipOrTakeMethodToAggregationExpressionTranslatorTests.cs index e16b9b36820..5f1d92b51e6 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/SkipOrTakeMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/SkipOrTakeMethodToAggregationExpressionTranslatorTests.cs @@ -13,19 +13,25 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class SkipOrTakeMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class SkipOrTakeMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<SkipOrTakeMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public SkipOrTakeMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Enumerable_Take_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.Take(2)); @@ -43,7 +49,7 @@ public void Enumerable_Take_should_work() [Fact] public void Queryable_Take_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.AsQueryable().Take(2)); @@ -58,22 +64,21 @@ public void Queryable_Take_should_work() results[3].Should().Equal(1, 2); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 0, A = new int[0] }, - new C { Id = 1, A = new int[] { 1 } }, - new C { Id = 2, A = new int[] { 1, 2 } }, - new C { Id = 3, A = new int[] { 1, 2, 3 } }); - return collection; + public int Id { get; set; } + public int[] A { get; set; } } - private class C + public sealed class ClassFixture : MongoCollectionFixture<C> { - public int Id { get; set; } - public int[] A { get; set; } + protected override IEnumerable<C> InitialData => + [ + new C { Id = 0, A = new int[0] }, + new C { Id = 1, A = new int[] { 1 } }, + new C { Id = 2, A = new int[] { 1, 2 } }, + new C { Id = 3, A = new int[] { 1, 2, 3 } } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/StringConcatMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/StringConcatMethodToAggregationExpressionTranslatorTests.cs index 0995c2218ac..fb94b8f9b83 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/StringConcatMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/StringConcatMethodToAggregationExpressionTranslatorTests.cs @@ -13,18 +13,25 @@ * limitations under the License. */ +using System.Collections.Generic; using FluentAssertions; using System.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class StringConcatMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class StringConcatMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<StringConcatMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public StringConcatMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Filter_using_string_concat_with_two_strings_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Where(i => string.Concat(i.A, ";") == "A1;"); @@ -41,7 +48,7 @@ public void Filter_using_string_concat_with_two_strings_should_work() [Fact] public void Projection_using_string_concat_with_two_strings_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Where(i => i.Id == 1) @@ -60,7 +67,7 @@ public void Projection_using_string_concat_with_two_strings_should_work() [Fact] public void Filter_using_string_concat_with_three_strings_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Where(i => string.Concat(i.A, ";", i.B) == "A1;B1"); @@ -77,7 +84,7 @@ public void Filter_using_string_concat_with_three_strings_should_work() [Fact] public void Projection_using_string_concat_with_three_strings_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Where(i => i.Id == 1) @@ -96,7 +103,7 @@ public void Projection_using_string_concat_with_three_strings_should_work() [Fact] public void Filter_using_string_concat_with_four_strings_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Where(i => string.Concat(i.A, ";", i.B, i.C) == "A1;B1C1"); @@ -113,7 +120,7 @@ public void Filter_using_string_concat_with_four_strings_should_work() [Fact] public void Projection_using_string_concat_with_four_strings_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Where(i => i.Id == 1) @@ -132,7 +139,7 @@ public void Projection_using_string_concat_with_four_strings_should_work() [Fact] public void Filter_using_string_concat_with_params_array_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Where(i => string.Concat(i.A, ";", i.B, ";", i.C) == "A1;B1;C1"); @@ -148,7 +155,7 @@ public void Filter_using_string_concat_with_params_array_should_work() [Fact] public void Projection_using_string_concat_with_params_array_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Where(i => i.Id == 1) @@ -164,17 +171,7 @@ public void Projection_using_string_concat_with_params_array_should_work() result.T.Should().Be("A1;B1;C1"); } - private IMongoCollection<Data> CreateCollection() - { - var collection = GetCollection<Data>("test"); - CreateCollection( - collection, - new Data { Id = 1, A = "A1", B = "B1", C = "C1", D="D1" }, - new Data { Id = 2, A = "A2", B = "B2", C = "C2", D="D2" }); - return collection; - } - - private class Data + public class Data { public int Id { get; set; } public string A { get; set; } @@ -182,5 +179,14 @@ private class Data public string C { get; set; } public string D { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<Data> + { + protected override IEnumerable<Data> InitialData => + [ + new Data { Id = 1, A = "A1", B = "B1", C = "C1", D="D1" }, + new Data { Id = 2, A = "A2", B = "B2", C = "C2", D="D2" } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/SumMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/SumMethodToAggregationExpressionTranslatorTests.cs index 9ba27b13084..8f65d8aa349 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/SumMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/SumMethodToAggregationExpressionTranslatorTests.cs @@ -13,24 +13,30 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.Serialization.Attributes; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class SumMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class SumMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<SumMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public SumMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Theory] [ParameterAttributeData] public void Sum_with_decimals_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Decimals.AsQueryable().Sum()) : @@ -48,7 +54,7 @@ public void Sum_with_decimals_should_work( public void Sum_with_decimals_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Decimals.AsQueryable().Sum(x => x * 2.0M)) : @@ -66,7 +72,7 @@ public void Sum_with_decimals_selector_should_work( public void Sum_with_doubles_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Doubles.AsQueryable().Sum()) : @@ -84,7 +90,7 @@ public void Sum_with_doubles_should_work( public void Sum_with_doubles_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Doubles.AsQueryable().Sum(x => x * 2.0)) : @@ -102,7 +108,7 @@ public void Sum_with_doubles_selector_should_work( public void Sum_with_floats_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Floats.AsQueryable().Sum()) : @@ -120,7 +126,7 @@ public void Sum_with_floats_should_work( public void Sum_with_floats_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Floats.AsQueryable().Sum(x => x * 2.0F)) : @@ -138,7 +144,7 @@ public void Sum_with_floats_selector_should_work( public void Sum_with_ints_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Ints.AsQueryable().Sum()) : @@ -156,7 +162,7 @@ public void Sum_with_ints_should_work( public void Sum_with_ints_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Ints.AsQueryable().Sum(x => x * 2)) : @@ -174,7 +180,7 @@ public void Sum_with_ints_selector_should_work( public void Sum_with_longs_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Longs.AsQueryable().Sum()) : @@ -192,7 +198,7 @@ public void Sum_with_longs_should_work( public void Sum_with_longs_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Longs.AsQueryable().Sum(x => x * 2L)) : @@ -210,7 +216,7 @@ public void Sum_with_longs_selector_should_work( public void Sum_with_nullable_decimals_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableDecimals.AsQueryable().Sum()) : @@ -228,7 +234,7 @@ public void Sum_with_nullable_decimals_should_work( public void Sum_with_nullable_decimals_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableDecimals.AsQueryable().Sum(x => x * 2.0M)) : @@ -246,7 +252,7 @@ public void Sum_with_nullable_decimals_selector_should_work( public void Sum_with_nullable_doubles_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableDoubles.AsQueryable().Sum()) : @@ -264,7 +270,7 @@ public void Sum_with_nullable_doubles_should_work( public void Sum_with_nullable_doubles_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableDoubles.AsQueryable().Sum(x => x * 2.0)) : @@ -282,7 +288,7 @@ public void Sum_with_nullable_doubles_selector_should_work( public void Sum_with_nullable_floats_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableFloats.AsQueryable().Sum()) : @@ -300,7 +306,7 @@ public void Sum_with_nullable_floats_should_work( public void Sum_with_nullable_floats_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableFloats.AsQueryable().Sum(x => x * 2.0F)) : @@ -318,7 +324,7 @@ public void Sum_with_nullable_floats_selector_should_work( public void Sum_with_nullable_ints_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableInts.AsQueryable().Sum()) : @@ -336,7 +342,7 @@ public void Sum_with_nullable_ints_should_work( public void Sum_with_nullable_ints_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableInts.AsQueryable().Sum(x => x * 2)) : @@ -354,7 +360,7 @@ public void Sum_with_nullable_ints_selector_should_work( public void Sum_with_nullable_longs_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableLongs.AsQueryable().Sum()) : @@ -372,7 +378,7 @@ public void Sum_with_nullable_longs_should_work( public void Sum_with_nullable_longs_selector_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.NullableLongs.AsQueryable().Sum(x => x * 2L)) : @@ -385,11 +391,25 @@ public void Sum_with_nullable_longs_selector_should_work( results.Should().Equal(0, 0, 12L); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, + public int Id { get; set; } + [BsonRepresentation(BsonType.Decimal128)] public decimal[] Decimals { get; set; } + public double[] Doubles { get; set; } + public float[] Floats { get; set; } + public int[] Ints { get; set; } + public long[] Longs { get; set; } + [BsonRepresentation(BsonType.Decimal128)] public decimal?[] NullableDecimals { get; set; } + public double?[] NullableDoubles { get; set; } + public float?[] NullableFloats { get; set; } + public int?[] NullableInts { get; set; } + public long?[] NullableLongs { get; set; } + } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ new C { Id = 1, @@ -431,23 +451,8 @@ private IMongoCollection<C> CreateCollection() NullableFloats = new float?[] { null, 1.0F, 2.0F, 3.0F }, NullableInts = new int?[] { null, 1, 2, 3 }, NullableLongs = new long?[] { null, 1L, 2L, 3L } - }); - return collection; - } - - private class C - { - public int Id { get; set; } - [BsonRepresentation(BsonType.Decimal128)] public decimal[] Decimals { get; set; } - public double[] Doubles { get; set; } - public float[] Floats { get; set; } - public int[] Ints { get; set; } - public long[] Longs { get; set; } - [BsonRepresentation(BsonType.Decimal128)] public decimal?[] NullableDecimals { get; set; } - public double?[] NullableDoubles { get; set; } - public float?[] NullableFloats { get; set; } - public int?[] NullableInts { get; set; } - public long?[] NullableLongs { get; set; } + } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ToArrayMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ToArrayMethodToAggregationExpressionTranslatorTests.cs index 8255a187159..d04618a3a89 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ToArrayMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ToArrayMethodToAggregationExpressionTranslatorTests.cs @@ -16,20 +16,25 @@ using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class ToArrayMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class ToArrayMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<ToArrayMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public ToArrayMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Theory] [ParameterAttributeData] public void Array_ToArray_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Array.AsQueryable().ToArray()) : @@ -47,7 +52,7 @@ public void Array_ToArray_should_work( public void IEnumerable_ToArray_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.IEnumerable.AsQueryable().ToArray()) : @@ -65,7 +70,7 @@ public void IEnumerable_ToArray_should_work( public void List_ToArray_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.List.AsQueryable().ToArray()) : @@ -78,27 +83,26 @@ public void List_ToArray_should_work( result.Should().Equal(1, 2, 3); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, + public int Id { get; set; } + public int[] Array { get; set; } + public IEnumerable<int> IEnumerable { get; set; } + public List<int> List { get; set; } + } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ new C { Id = 1, Array = new int[] { 1, 2, 3 }, IEnumerable = new List<int> { 1, 2, 3 }, List = new List<int> { 1, 2, 3 } - }); - return collection; - } - - private class C - { - public int Id { get; set; } - public int[] Array { get; set; } - public IEnumerable<int> IEnumerable { get; set; } - public List<int> List { get; set; } + } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ToListMethodToAggregationTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ToListMethodToAggregationTranslatorTests.cs index 66b405c05c8..9f6e1383914 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ToListMethodToAggregationTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ToListMethodToAggregationTranslatorTests.cs @@ -16,20 +16,25 @@ using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class ToListMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class ToListMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<ToListMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public ToListMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Theory] [ParameterAttributeData] public void Array_ToList_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.Array.AsQueryable().ToList()) : @@ -47,7 +52,7 @@ public void Array_ToList_should_work( public void IEnumerable_ToList_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.IEnumerable.AsQueryable().ToList()) : @@ -65,7 +70,7 @@ public void IEnumerable_ToList_should_work( public void List_ToList_should_work( [Values(false, true)] bool withNestedAsQueryable) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryable ? collection.AsQueryable().Select(x => x.List.AsQueryable().ToList()) : @@ -78,27 +83,26 @@ public void List_ToList_should_work( result.Should().Equal(1, 2, 3); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, + public int Id { get; set; } + public int[] Array { get; set; } + public IEnumerable<int> IEnumerable { get; set; } + public List<int> List { get; set; } + } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ new C { Id = 1, Array = new int[] { 1, 2, 3 }, IEnumerable = new List<int> { 1, 2, 3 }, List = new List<int> { 1, 2, 3 } - }); - return collection; - } - - private class C - { - public int Id { get; set; } - public int[] Array { get; set; } - public IEnumerable<int> IEnumerable { get; set; } - public List<int> List { get; set; } + } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/UnionMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/UnionMethodToAggregationExpressionTranslatorTests.cs index 688e746e652..3bc0e6b1640 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/UnionMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/UnionMethodToAggregationExpressionTranslatorTests.cs @@ -13,22 +13,28 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class UnionMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class UnionMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<UnionMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public UnionMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Theory] [ParameterAttributeData] public void Enumerable_Union_should_work( [Values(false, true)] bool withNestedAsQueryableSource2) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryableSource2 ? collection.AsQueryable().Select(x => x.A.Union(x.B.AsQueryable())) : @@ -50,7 +56,7 @@ public void Enumerable_Union_should_work( public void Queryable_Union_should_work( [Values(false, true)] bool withNestedAsQueryableSource2) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryableSource2 ? collection.AsQueryable().Select(x => x.A.AsQueryable().Union(x.B.AsQueryable())) : @@ -67,23 +73,22 @@ public void Queryable_Union_should_work( results[3].Should().BeEquivalentTo(1, 2, 3); } - private IMongoCollection<C> CreateCollection() - { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 0, A = new int[0], B = new int[0] }, - new C { Id = 1, A = new int[0], B = new int[] { 1 } }, - new C { Id = 2, A = new int[] { 1 }, B = new int[0] }, - new C { Id = 3, A = new int[] { 1, 2 }, B = new int[] { 2, 3 } }); - return collection; - } - - private class C + public class C { public int Id { get; set; } public int[] A { get; set; } public int[] B { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ + new C { Id = 0, A = new int[0], B = new int[0] }, + new C { Id = 1, A = new int[0], B = new int[] { 1 } }, + new C { Id = 2, A = new int[] { 1 }, B = new int[0] }, + new C { Id = 3, A = new int[] { 1, 2 }, B = new int[] { 2, 3 } } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/WhereMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/WhereMethodToAggregationExpressionTranslatorTests.cs index 78305b10af0..a4175b4aede 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/WhereMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/WhereMethodToAggregationExpressionTranslatorTests.cs @@ -13,19 +13,25 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class WhereMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class WhereMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<WhereMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public WhereMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Enumerable_Where_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.Where(x => x > 1)); @@ -43,7 +49,7 @@ public void Enumerable_Where_should_work() [Fact] public void Enumerable_Where_Count_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.Where(x => x > 1).Count()); @@ -61,7 +67,7 @@ public void Enumerable_Where_Count_should_work() [Fact] public void Queryable_Where_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.AsQueryable().Where(x => x > 1)); @@ -79,7 +85,7 @@ public void Queryable_Where_should_work() [Fact] public void Queryable_Where_Count_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable().Select(x => x.A.AsQueryable().Where(x => x > 1).Count()); @@ -94,22 +100,21 @@ public void Queryable_Where_Count_should_work() results[3].Should().Be(2); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 0, A = new int[0] }, - new C { Id = 1, A = new int[] { 1 } }, - new C { Id = 2, A = new int[] { 1, 2 } }, - new C { Id = 3, A = new int[] { 1, 2, 3 } }); - return collection; + public int Id { get; set; } + public int[] A { get; set; } } - private class C + public sealed class ClassFixture : MongoCollectionFixture<C> { - public int Id { get; set; } - public int[] A { get; set; } + protected override IEnumerable<C> InitialData => + [ + new C { Id = 0, A = new int[0] }, + new C { Id = 1, A = new int[] { 1 } }, + new C { Id = 2, A = new int[] { 1, 2 } }, + new C { Id = 3, A = new int[] { 1, 2, 3 } } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/WindowMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/WindowMethodToAggregationExpressionTranslatorTests.cs index 2fb89b2a56f..b34c3f77e3b 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/WindowMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/WindowMethodToAggregationExpressionTranslatorTests.cs @@ -22,17 +22,22 @@ using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class WindowMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class WindowMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<WindowMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public WindowMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture, server => server.Supports(Feature.SetWindowFields)) + { + } + [Fact] public void Translate_should_return_expected_result_for_AddToSet() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.AddToSet(x => x.Int32Field, null) }); @@ -52,8 +57,7 @@ public void Translate_should_return_expected_result_for_AddToSet() [Fact] public void Translate_should_return_expected_result_for_Average_with_Decimal() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Average(x => x.DecimalField, null) }); @@ -72,8 +76,7 @@ public void Translate_should_return_expected_result_for_Average_with_Decimal() [Fact] public void Translate_should_return_expected_result_for_Average_with_Double() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Average(x => x.DoubleField, null) }); @@ -92,8 +95,7 @@ public void Translate_should_return_expected_result_for_Average_with_Double() [Fact] public void Translate_should_return_expected_result_for_Average_with_Int32() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Average(x => x.Int32Field, null) }); @@ -112,8 +114,7 @@ public void Translate_should_return_expected_result_for_Average_with_Int32() [Fact] public void Translate_should_return_expected_result_for_Average_with_Int64() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Average(x => x.Int64Field, null) }); @@ -132,8 +133,7 @@ public void Translate_should_return_expected_result_for_Average_with_Int64() [Fact] public void Translate_should_return_expected_result_for_Average_with_nullable_Decimal() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Average(x => x.NullableDecimalField, null) }); @@ -152,8 +152,7 @@ public void Translate_should_return_expected_result_for_Average_with_nullable_De [Fact] public void Translate_should_return_expected_result_for_Average_with_nullable_Double() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Average(x => x.NullableDoubleField, null) }); @@ -172,8 +171,7 @@ public void Translate_should_return_expected_result_for_Average_with_nullable_Do [Fact] public void Translate_should_return_expected_result_for_Average_with_nullable_Int32() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Average(x => x.NullableInt32Field, null) }); @@ -192,8 +190,7 @@ public void Translate_should_return_expected_result_for_Average_with_nullable_In [Fact] public void Translate_should_return_expected_result_for_Average_with_nullable_Int64() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Average(x => x.NullableInt64Field, null) }); @@ -212,8 +209,7 @@ public void Translate_should_return_expected_result_for_Average_with_nullable_In [Fact] public void Translate_should_return_expected_result_for_Average_with_nullable_Single() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Average(x => x.NullableSingleField, null) }); @@ -232,8 +228,7 @@ public void Translate_should_return_expected_result_for_Average_with_nullable_Si [Fact] public void Translate_should_return_expected_result_for_Average_with_Single() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Average(x => x.SingleField, null) }); @@ -252,8 +247,7 @@ public void Translate_should_return_expected_result_for_Average_with_Single() [Fact] public void Translate_should_return_expected_result_for_Count() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Count(null) }); @@ -272,8 +266,7 @@ public void Translate_should_return_expected_result_for_Count() [Fact] public void Translate_should_return_expected_result_for_CovariancePopulation_with_Decimal() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovariancePopulation(x => x.DecimalField1, x => x.DecimalField2, null) }); @@ -292,8 +285,7 @@ public void Translate_should_return_expected_result_for_CovariancePopulation_wit [Fact] public void Translate_should_return_expected_result_for_CovariancePopulation_with_Double() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovariancePopulation(x => x.DoubleField1, x => x.DoubleField2, null) }); @@ -312,8 +304,7 @@ public void Translate_should_return_expected_result_for_CovariancePopulation_wit [Fact] public void Translate_should_return_expected_result_for_CovariancePopulation_with_Int32() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovariancePopulation(x => x.Int32Field1, x => x.Int32Field2, null) }); @@ -332,8 +323,7 @@ public void Translate_should_return_expected_result_for_CovariancePopulation_wit [Fact] public void Translate_should_return_expected_result_for_CovariancePopulation_with_Int64() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovariancePopulation(x => x.Int64Field1, x => x.Int64Field2, null) }); @@ -352,8 +342,7 @@ public void Translate_should_return_expected_result_for_CovariancePopulation_wit [Fact] public void Translate_should_return_expected_result_for_CovariancePopulation_with_nullable_Decimal() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovariancePopulation(x => x.NullableDecimalField1, x => x.NullableDecimalField2, null) }); @@ -372,8 +361,7 @@ public void Translate_should_return_expected_result_for_CovariancePopulation_wit [Fact] public void Translate_should_return_expected_result_for_CovariancePopulation_with_nullable_Double() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovariancePopulation(x => x.NullableDoubleField1, x => x.NullableDoubleField2, null) }); @@ -392,8 +380,7 @@ public void Translate_should_return_expected_result_for_CovariancePopulation_wit [Fact] public void Translate_should_return_expected_result_for_CovariancePopulation_with_nullable_Int32() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovariancePopulation(x => x.NullableInt32Field1, x => x.NullableInt32Field2, null) }); @@ -412,8 +399,7 @@ public void Translate_should_return_expected_result_for_CovariancePopulation_wit [Fact] public void Translate_should_return_expected_result_for_CovariancePopulation_with_nullable_Int64() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovariancePopulation(x => x.NullableInt64Field1, x => x.NullableInt64Field2, null) }); @@ -432,8 +418,7 @@ public void Translate_should_return_expected_result_for_CovariancePopulation_wit [Fact] public void Translate_should_return_expected_result_for_CovariancePopulation_with_nullable_Single() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovariancePopulation(x => x.NullableSingleField1, x => x.NullableSingleField2, null) }); @@ -452,8 +437,7 @@ public void Translate_should_return_expected_result_for_CovariancePopulation_wit [Fact] public void Translate_should_return_expected_result_for_CovariancePopulation_with_Single() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovariancePopulation(x => x.SingleField1, x => x.SingleField2, null) }); @@ -472,8 +456,7 @@ public void Translate_should_return_expected_result_for_CovariancePopulation_wit [Fact] public void Translate_should_return_expected_result_for_CovarianceSample_with_Decimal() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovarianceSample(x => x.DecimalField1, x => x.DecimalField2, null) }); @@ -492,8 +475,7 @@ public void Translate_should_return_expected_result_for_CovarianceSample_with_De [Fact] public void Translate_should_return_expected_result_for_CovarianceSample_with_Double() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovarianceSample(x => x.DoubleField1, x => x.DoubleField2, null) }); @@ -512,8 +494,7 @@ public void Translate_should_return_expected_result_for_CovarianceSample_with_Do [Fact] public void Translate_should_return_expected_result_for_CovarianceSample_with_Int32() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovarianceSample(x => x.Int32Field1, x => x.Int32Field2, null) }); @@ -532,8 +513,7 @@ public void Translate_should_return_expected_result_for_CovarianceSample_with_In [Fact] public void Translate_should_return_expected_result_for_CovarianceSample_with_Int64() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovarianceSample(x => x.Int64Field1, x => x.Int64Field2, null) }); @@ -552,8 +532,7 @@ public void Translate_should_return_expected_result_for_CovarianceSample_with_In [Fact] public void Translate_should_return_expected_result_for_CovarianceSample_with_nullable_Decimal() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovarianceSample(x => x.NullableDecimalField1, x => x.NullableDecimalField2, null) }); @@ -572,8 +551,7 @@ public void Translate_should_return_expected_result_for_CovarianceSample_with_nu [Fact] public void Translate_should_return_expected_result_for_CovarianceSample_with_nullable_Double() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovarianceSample(x => x.NullableDoubleField1, x => x.NullableDoubleField2, null) }); @@ -592,8 +570,7 @@ public void Translate_should_return_expected_result_for_CovarianceSample_with_nu [Fact] public void Translate_should_return_expected_result_for_CovarianceSample_with_nullable_Int32() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovarianceSample(x => x.NullableInt32Field1, x => x.NullableInt32Field2, null) }); @@ -612,8 +589,7 @@ public void Translate_should_return_expected_result_for_CovarianceSample_with_nu [Fact] public void Translate_should_return_expected_result_for_CovarianceSample_with_nullable_Int64() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovarianceSample(x => x.NullableInt64Field1, x => x.NullableInt64Field2, null) }); @@ -632,8 +608,7 @@ public void Translate_should_return_expected_result_for_CovarianceSample_with_nu [Fact] public void Translate_should_return_expected_result_for_CovarianceSample_with_nullable_Single() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovarianceSample(x => x.NullableSingleField1, x => x.NullableSingleField2, null) }); @@ -652,8 +627,7 @@ public void Translate_should_return_expected_result_for_CovarianceSample_with_nu [Fact] public void Translate_should_return_expected_result_for_CovarianceSample_with_Single() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.CovarianceSample(x => x.SingleField1, x => x.SingleField2, null) }); @@ -672,8 +646,7 @@ public void Translate_should_return_expected_result_for_CovarianceSample_with_Si [Fact] public void Translate_should_return_expected_result_for_DenseRank() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -696,8 +669,7 @@ public void Translate_should_return_expected_result_for_DenseRank() [Fact] public void Translate_should_return_expected_result_for_Derivative_with_Decimal() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -720,8 +692,7 @@ public void Translate_should_return_expected_result_for_Derivative_with_Decimal( [Fact] public void Translate_should_return_expected_result_for_Derivative_with_Decimal_and_unit() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -751,8 +722,7 @@ public void Translate_should_return_expected_result_for_Derivative_with_Decimal_ [Fact] public void Translate_should_return_expected_result_for_Derivative_with_Double() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -775,8 +745,7 @@ public void Translate_should_return_expected_result_for_Derivative_with_Double() [Fact] public void Translate_should_return_expected_result_for_Derivative_with_Double_and_unit() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -806,8 +775,7 @@ public void Translate_should_return_expected_result_for_Derivative_with_Double_a [Fact] public void Translate_should_return_expected_result_for_Derivative_with_Int32() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -830,8 +798,7 @@ public void Translate_should_return_expected_result_for_Derivative_with_Int32() [Fact] public void Translate_should_return_expected_result_for_Derivative_with_Int32_and_unit() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -861,8 +828,7 @@ public void Translate_should_return_expected_result_for_Derivative_with_Int32_an [Fact] public void Translate_should_return_expected_result_for_Derivative_with_Int64() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -885,8 +851,7 @@ public void Translate_should_return_expected_result_for_Derivative_with_Int64() [Fact] public void Translate_should_return_expected_result_for_Derivative_with_Int64_and_unit() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -916,8 +881,7 @@ public void Translate_should_return_expected_result_for_Derivative_with_Int64_an [Fact] public void Translate_should_return_expected_result_for_Derivative_with_Single() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -940,8 +904,7 @@ public void Translate_should_return_expected_result_for_Derivative_with_Single() [Fact] public void Translate_should_return_expected_result_for_Derivative_with_Single_and_unit() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -971,8 +934,7 @@ public void Translate_should_return_expected_result_for_Derivative_with_Single_a [Fact] public void Translate_should_return_expected_result_for_DocumentNumber() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -995,8 +957,7 @@ public void Translate_should_return_expected_result_for_DocumentNumber() [Fact] public void Translate_should_return_expected_result_for_ExponentialMovingAverage_with_Decimal_and_alpha() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1019,8 +980,7 @@ public void Translate_should_return_expected_result_for_ExponentialMovingAverage [Fact] public void Translate_should_return_expected_result_for_ExponentialMovingAverage_with_Decimal_and_n() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1043,8 +1003,7 @@ public void Translate_should_return_expected_result_for_ExponentialMovingAverage [Fact] public void Translate_should_return_expected_result_for_ExponentialMovingAverage_with_Double_and_alpha() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1067,8 +1026,7 @@ public void Translate_should_return_expected_result_for_ExponentialMovingAverage [Fact] public void Translate_should_return_expected_result_for_ExponentialMovingAverage_with_Double_and_n() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1091,8 +1049,7 @@ public void Translate_should_return_expected_result_for_ExponentialMovingAverage [Fact] public void Translate_should_return_expected_result_for_ExponentialMovingAverage_with_Int32_and_alpha() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1115,8 +1072,7 @@ public void Translate_should_return_expected_result_for_ExponentialMovingAverage [Fact] public void Translate_should_return_expected_result_for_ExponentialMovingAverage_with_Int32_and_n() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1139,8 +1095,7 @@ public void Translate_should_return_expected_result_for_ExponentialMovingAverage [Fact] public void Translate_should_return_expected_result_for_ExponentialMovingAverage_with_Int64_and_alpha() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1163,8 +1118,7 @@ public void Translate_should_return_expected_result_for_ExponentialMovingAverage [Fact] public void Translate_should_return_expected_result_for_ExponentialMovingAverage_with_Int64_and_n() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1187,8 +1141,7 @@ public void Translate_should_return_expected_result_for_ExponentialMovingAverage [Fact] public void Translate_should_return_expected_result_for_ExponentialMovingAverage_with_Single_and_alpha() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1211,8 +1164,7 @@ public void Translate_should_return_expected_result_for_ExponentialMovingAverage [Fact] public void Translate_should_return_expected_result_for_ExponentialMovingAverage_with_Single_and_n() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1235,8 +1187,7 @@ public void Translate_should_return_expected_result_for_ExponentialMovingAverage [Fact] public void Translate_should_return_expected_result_for_First() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.First(x => x.Int32Field, null) }); @@ -1255,8 +1206,7 @@ public void Translate_should_return_expected_result_for_First() [Fact] public void Translate_should_return_expected_result_for_Integral_with_Decimal() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1279,8 +1229,7 @@ public void Translate_should_return_expected_result_for_Integral_with_Decimal() [Fact] public void Translate_should_return_expected_result_for_Integral_with_Decimal_and_unit() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1303,8 +1252,7 @@ public void Translate_should_return_expected_result_for_Integral_with_Decimal_an [Fact] public void Translate_should_return_expected_result_for_Integral_with_Double() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1327,8 +1275,7 @@ public void Translate_should_return_expected_result_for_Integral_with_Double() [Fact] public void Translate_should_return_expected_result_for_Integral_with_Double_and_unit() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1351,8 +1298,7 @@ public void Translate_should_return_expected_result_for_Integral_with_Double_and [Fact] public void Translate_should_return_expected_result_for_Integral_with_Int32() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1375,8 +1321,7 @@ public void Translate_should_return_expected_result_for_Integral_with_Int32() [Fact] public void Translate_should_return_expected_result_for_Integral_with_Int32_and_unit() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1399,8 +1344,7 @@ public void Translate_should_return_expected_result_for_Integral_with_Int32_and_ [Fact] public void Translate_should_return_expected_result_for_Integral_with_Int64() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1423,8 +1367,7 @@ public void Translate_should_return_expected_result_for_Integral_with_Int64() [Fact] public void Translate_should_return_expected_result_for_Integral_with_Int64_and_unit() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1447,8 +1390,7 @@ public void Translate_should_return_expected_result_for_Integral_with_Int64_and_ [Fact] public void Translate_should_return_expected_result_for_Integral_with_Single() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1471,8 +1413,7 @@ public void Translate_should_return_expected_result_for_Integral_with_Single() [Fact] public void Translate_should_return_expected_result_for_Integral_with_Single_and_unit() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1495,8 +1436,7 @@ public void Translate_should_return_expected_result_for_Integral_with_Single_and [Fact] public void Translate_should_return_expected_result_for_Last() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Last(x => x.Int32Field, null) }); @@ -1516,7 +1456,7 @@ public void Translate_should_return_expected_result_for_Last() public void Translate_should_return_expected_result_for_Locf() { RequireServer.Check().Supports(Feature.SetWindowFieldsLocf); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Locf(x => x.Int32Field, null) }); @@ -1532,8 +1472,7 @@ public void Translate_should_return_expected_result_for_Locf() [Fact] public void Translate_should_return_expected_result_for_Max() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Max(x => x.Int32Field, null) }); @@ -1549,11 +1488,248 @@ public void Translate_should_return_expected_result_for_Max() } } + [Fact] + public void Translate_should_return_expected_result_for_Median_with_Decimal() + { + RequireServer.Check().Supports(Feature.MedianOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Median(x => x.DecimalField, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $median : { input : '$DecimalField', method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].ToDecimal().Should().Be(2.0M); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Median_with_Double() + { + RequireServer.Check().Supports(Feature.MedianOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Median(x => x.DoubleField, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $median : { input : '$DoubleField', method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].AsDouble.Should().Be(2.0); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Median_with_Int32() + { + RequireServer.Check().Supports(Feature.MedianOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Median(x => x.Int32Field, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $median : { input : '$Int32Field', method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].AsDouble.Should().Be(2.0); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Median_with_Int64() + { + RequireServer.Check().Supports(Feature.MedianOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Median(x => x.Int64Field, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $median : { input : '$Int64Field', method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].AsDouble.Should().Be(2.0); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Median_with_nullable_Decimal() + { + RequireServer.Check().Supports(Feature.MedianOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Median(x => x.NullableDecimalField, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $median : { input : '$NullableDecimalField', method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].ToDecimal().Should().Be(1.0M); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Median_with_nullable_Double() + { + RequireServer.Check().Supports(Feature.MedianOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Median(x => x.NullableDoubleField, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $median : { input : '$NullableDoubleField', method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].AsDouble.Should().Be(1.0); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Median_with_nullable_Int32() + { + RequireServer.Check().Supports(Feature.MedianOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Median(x => x.NullableInt32Field, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $median : { input : '$NullableInt32Field', method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].AsDouble.Should().Be(1.0); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Median_with_nullable_Int64() + { + RequireServer.Check().Supports(Feature.MedianOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Median(x => x.NullableInt64Field, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $median : { input : '$NullableInt64Field', method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].AsDouble.Should().Be(1.0); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Median_with_nullable_Single() + { + RequireServer.Check().Supports(Feature.MedianOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Median(x => x.NullableSingleField, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $median : { input : '$NullableSingleField', method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].AsDouble.Should().Be(1.0); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Median_with_Single() + { + RequireServer.Check().Supports(Feature.MedianOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Median(x => x.SingleField, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $median : { input : '$SingleField', method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].AsDouble.Should().Be(2.0); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Median_with_window() + { + RequireServer.Check().Supports(Feature.MedianOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields( + partitionBy: x => 1, + sortBy: Builders<C>.Sort.Ascending(x => x.Id), + output: p => new { + Result = p.Median(x => x.Int32Field, DocumentsWindow.Create(-1, 1)) + }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] + { + "{ $setWindowFields : { partitionBy : 1, sortBy : { _id : 1 }, output : { Result : { $median : { input : '$Int32Field', method : 'approximate' }, window : { documents : [-1, 1] } } } } }" + }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + results[0]["Result"].AsDouble.Should().Be(1.0); + results[1]["Result"].AsDouble.Should().Be(2.0); + results[2]["Result"].AsDouble.Should().Be(2.0); + } + [Fact] public void Translate_should_return_expected_result_for_Min() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Min(x => x.Int32Field, null) }); @@ -1569,11 +1745,296 @@ public void Translate_should_return_expected_result_for_Min() } } + [Fact] + public void Translate_should_return_expected_result_for_Percentile_with_Decimal() + { + RequireServer.Check().Supports(Feature.PercentileOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Percentile(x => x.DecimalField, new[] { 0.5 }, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $percentile : { input : '$DecimalField', p : [0.5], method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].AsBsonArray[0].ToDecimal().Should().Be(2.0M); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Percentile_with_Double() + { + RequireServer.Check().Supports(Feature.PercentileOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Percentile(x => x.DoubleField, new[] { 0.5 }, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $percentile : { input : '$DoubleField', p : [0.5], method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].AsBsonArray[0].AsDouble.Should().Be(2.0); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Percentile_with_Int32() + { + RequireServer.Check().Supports(Feature.PercentileOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Percentile(x => x.Int32Field, new[] { 0.5 }, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $percentile : { input : '$Int32Field', p : [0.5], method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].AsBsonArray[0].AsDouble.Should().Be(2.0); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Percentile_with_Int64() + { + RequireServer.Check().Supports(Feature.PercentileOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Percentile(x => x.Int64Field, new[] { 0.5 }, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $percentile : { input : '$Int64Field', p : [0.5], method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].AsBsonArray[0].AsDouble.Should().Be(2.0); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Percentile_with_nullable_Decimal() + { + RequireServer.Check().Supports(Feature.PercentileOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Percentile(x => x.NullableDecimalField, new[] { 0.5 }, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $percentile : { input : '$NullableDecimalField', p : [0.5], method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].AsBsonArray[0].ToDecimal().Should().Be(1.0M); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Percentile_with_nullable_Double() + { + RequireServer.Check().Supports(Feature.PercentileOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Percentile(x => x.NullableDoubleField, new[] { 0.5 }, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $percentile : { input : '$NullableDoubleField', p : [0.5], method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].AsBsonArray[0].AsDouble.Should().Be(1.0); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Percentile_with_nullable_Int32() + { + RequireServer.Check().Supports(Feature.PercentileOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Percentile(x => x.NullableInt32Field, new[] { 0.5 }, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $percentile : { input : '$NullableInt32Field', p : [0.5], method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].AsBsonArray[0].AsDouble.Should().Be(1.0); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Percentile_with_nullable_Int64() + { + RequireServer.Check().Supports(Feature.PercentileOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Percentile(x => x.NullableInt64Field, new[] { 0.5 }, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $percentile : { input : '$NullableInt64Field', p : [0.5], method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].AsBsonArray[0].AsDouble.Should().Be(1.0); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Percentile_with_nullable_Single() + { + RequireServer.Check().Supports(Feature.PercentileOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Percentile(x => x.NullableSingleField, new[] { 0.5 }, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $percentile : { input : '$NullableSingleField', p : [0.5], method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].AsBsonArray[0].AsDouble.Should().Be(1.0); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Percentile_with_multiple_percentiles() + { + RequireServer.Check().Supports(Feature.PercentileOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Percentile(x => x.Int32Field, new[] { 0.25, 0.75 }, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $percentile : { input : '$Int32Field', p : [0.25, 0.75], method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + var array = result["Result"].AsBsonArray; + array[0].AsDouble.Should().Be(1.0); + array[1].AsDouble.Should().Be(3.0); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Percentile_with_Single() + { + RequireServer.Check().Supports(Feature.PercentileOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Percentile(x => x.SingleField, new[] { 0.5 }, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $percentile : { input : '$SingleField', p : [0.5], method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + result["Result"].AsBsonArray[0].AsDouble.Should().Be(2.0); + } + } + + [Fact] + public void Translate_should_return_expected_result_for_Percentile_with_window() + { + RequireServer.Check().Supports(Feature.PercentileOperator); + + var collection = Fixture.Collection; + + var aggregate = collection.Aggregate() + .SetWindowFields( + partitionBy: x => 1, + sortBy: Builders<C>.Sort.Ascending(x => x.Id), + output: p => new { + Result = p.Percentile(x => x.Int32Field, new[] { 0.5 }, DocumentsWindow.Create(-1, 1)) + }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] + { + "{ $setWindowFields : { partitionBy : 1, sortBy : { _id : 1 }, output : { Result : { $percentile : { input : '$Int32Field', p : [0.5], method : 'approximate' }, window : { documents : [-1, 1] } } } } }" + }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + results[0]["Result"].AsBsonArray[0].AsDouble.Should().Be(1.0); + results[1]["Result"].AsBsonArray[0].AsDouble.Should().Be(2.0); + results[2]["Result"].AsBsonArray[0].AsDouble.Should().Be(2.0); + } + + [Fact] + public void Translate_should_return_expected_result_for_Percentile_with_List_input() + { + RequireServer.Check().Supports(Feature.PercentileOperator); + + var collection = Fixture.Collection; + var percentiles = new List<double> { 0.25, 0.5, 0.75 }; + + var aggregate = collection.Aggregate() + .SetWindowFields(output: p => new { Result = p.Percentile(x => x.Int32Field, percentiles, null) }); + + var stages = Translate(collection, aggregate); + var expectedStages = new[] { "{ $setWindowFields : { output : { Result : { $percentile : { input : '$Int32Field', p : [0.25, 0.5, 0.75], method : 'approximate' } } } } }" }; + AssertStages(stages, expectedStages); + + var results = aggregate.ToList(); + foreach (var result in results) + { + var array = result["Result"].AsBsonArray; + array[0].AsDouble.Should().Be(1.0); + array[1].AsDouble.Should().Be(2.0); + array[2].AsDouble.Should().Be(3.0); + } + } + [Fact] public void Translate_should_return_expected_result_for_Push() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Push(x => x.Int32Field, null) }); @@ -1592,8 +2053,7 @@ public void Translate_should_return_expected_result_for_Push() [Fact] public void Translate_should_return_expected_result_for_Rank() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1616,8 +2076,7 @@ public void Translate_should_return_expected_result_for_Rank() [Fact] public void Translate_should_return_expected_result_for_Shift() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1640,8 +2099,7 @@ public void Translate_should_return_expected_result_for_Shift() [Fact] public void Translate_should_return_expected_result_for_Shift_with_defaultValue() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields( @@ -1664,8 +2122,7 @@ public void Translate_should_return_expected_result_for_Shift_with_defaultValue( [Fact] public void Translate_should_return_expected_result_for_StandardDeviationPopulation_with_Decimal() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationPopulation(x => x.DecimalField, null) }); @@ -1684,8 +2141,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationPopulat [Fact] public void Translate_should_return_expected_result_for_StandardDeviationPopulation_with_Double() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationPopulation(x => x.DoubleField, null) }); @@ -1704,8 +2160,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationPopulat [Fact] public void Translate_should_return_expected_result_for_StandardDeviationPopulation_with_Int32() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationPopulation(x => x.Int32Field, null) }); @@ -1724,8 +2179,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationPopulat [Fact] public void Translate_should_return_expected_result_for_StandardDeviationPopulation_with_Int64() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationPopulation(x => x.Int64Field, null) }); @@ -1744,8 +2198,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationPopulat [Fact] public void Translate_should_return_expected_result_for_StandardDeviationPopulation_with_nullable_Decimal() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationPopulation(x => x.NullableDecimalField, null) }); @@ -1764,8 +2217,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationPopulat [Fact] public void Translate_should_return_expected_result_for_StandardDeviationPopulation_with_nullable_Double() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationPopulation(x => x.NullableDoubleField, null) }); @@ -1784,8 +2236,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationPopulat [Fact] public void Translate_should_return_expected_result_for_StandardDeviationPopulation_with_nullable_Int32() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationPopulation(x => x.NullableInt32Field, null) }); @@ -1804,8 +2255,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationPopulat [Fact] public void Translate_should_return_expected_result_for_StandardDeviationPopulation_with_nullable_Int64() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationPopulation(x => x.NullableInt64Field, null) }); @@ -1824,8 +2274,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationPopulat [Fact] public void Translate_should_return_expected_result_for_StandardDeviationPopulation_with_nullable_Single() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationPopulation(x => x.NullableSingleField, null) }); @@ -1844,8 +2293,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationPopulat [Fact] public void Translate_should_return_expected_result_for_StandardDeviationPopulation_with_Single() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationPopulation(x => x.SingleField, null) }); @@ -1864,8 +2312,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationPopulat [Fact] public void Translate_should_return_expected_result_for_StandardDeviationSample_with_Decimal() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationSample(x => x.DecimalField, null) }); @@ -1884,8 +2331,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationSample_ [Fact] public void Translate_should_return_expected_result_for_StandardDeviationSample_with_Double() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationSample(x => x.DoubleField, null) }); @@ -1904,8 +2350,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationSample_ [Fact] public void Translate_should_return_expected_result_for_StandardDeviationSample_with_Int32() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationSample(x => x.Int32Field, null) }); @@ -1924,8 +2369,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationSample_ [Fact] public void Translate_should_return_expected_result_for_StandardDeviationSample_with_Int64() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationSample(x => x.Int64Field, null) }); @@ -1944,8 +2388,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationSample_ [Fact] public void Translate_should_return_expected_result_for_StandardDeviationSample_with_nullable_Decimal() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationSample(x => x.NullableDecimalField, null) }); @@ -1964,8 +2407,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationSample_ [Fact] public void Translate_should_return_expected_result_for_StandardDeviationSample_with_nullable_Double() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationSample(x => x.NullableDoubleField, null) }); @@ -1984,8 +2426,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationSample_ [Fact] public void Translate_should_return_expected_result_for_StandardDeviationSample_with_nullable_Int32() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationSample(x => x.NullableInt32Field, null) }); @@ -2004,8 +2445,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationSample_ [Fact] public void Translate_should_return_expected_result_for_StandardDeviationSample_with_nullable_Int64() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationSample(x => x.NullableInt64Field, null) }); @@ -2024,8 +2464,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationSample_ [Fact] public void Translate_should_return_expected_result_for_StandardDeviationSample_with_nullable_Single() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationSample(x => x.NullableSingleField, null) }); @@ -2044,8 +2483,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationSample_ [Fact] public void Translate_should_return_expected_result_for_StandardDeviationSample_with_Single() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.StandardDeviationSample(x => x.SingleField, null) }); @@ -2064,8 +2502,7 @@ public void Translate_should_return_expected_result_for_StandardDeviationSample_ [Fact] public void Translate_should_return_expected_result_for_Sum_with_Decimal() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Sum(x => x.DecimalField, null) }); @@ -2084,8 +2521,7 @@ public void Translate_should_return_expected_result_for_Sum_with_Decimal() [Fact] public void Translate_should_return_expected_result_for_Sum_with_Double() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Sum(x => x.DoubleField, null) }); @@ -2104,8 +2540,7 @@ public void Translate_should_return_expected_result_for_Sum_with_Double() [Fact] public void Translate_should_return_expected_result_for_Sum_with_Int32() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Sum(x => x.Int32Field, null) }); @@ -2124,8 +2559,7 @@ public void Translate_should_return_expected_result_for_Sum_with_Int32() [Fact] public void Translate_should_return_expected_result_for_Sum_with_Int64() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Sum(x => x.Int64Field, null) }); @@ -2144,8 +2578,7 @@ public void Translate_should_return_expected_result_for_Sum_with_Int64() [Fact] public void Translate_should_return_expected_result_for_Sum_with_nullable_Decimal() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Sum(x => x.NullableDecimalField, null) }); @@ -2164,8 +2597,7 @@ public void Translate_should_return_expected_result_for_Sum_with_nullable_Decima [Fact] public void Translate_should_return_expected_result_for_Sum_with_nullable_Double() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Sum(x => x.NullableDoubleField, null) }); @@ -2184,8 +2616,7 @@ public void Translate_should_return_expected_result_for_Sum_with_nullable_Double [Fact] public void Translate_should_return_expected_result_for_Sum_with_nullable_Int32() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Sum(x => x.NullableInt32Field, null) }); @@ -2204,8 +2635,7 @@ public void Translate_should_return_expected_result_for_Sum_with_nullable_Int32( [Fact] public void Translate_should_return_expected_result_for_Sum_with_nullable_Int64() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Sum(x => x.NullableInt64Field, null) }); @@ -2224,8 +2654,7 @@ public void Translate_should_return_expected_result_for_Sum_with_nullable_Int64( [Fact] public void Translate_should_return_expected_result_for_Sum_with_nullable_Single() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Sum(x => x.NullableSingleField, null) }); @@ -2244,8 +2673,7 @@ public void Translate_should_return_expected_result_for_Sum_with_nullable_Single [Fact] public void Translate_should_return_expected_result_for_Sum_with_Single() { - RequireServer.Check().Supports(Feature.SetWindowFields); - var collection = CreateCollection(); + var collection = Fixture.Collection; var aggregate = collection.Aggregate() .SetWindowFields(output: p => new { Result = p.Sum(x => x.SingleField, null) }); @@ -2261,61 +2689,6 @@ public void Translate_should_return_expected_result_for_Sum_with_Single() } } - private IMongoCollection<C> CreateCollection() - { - var collection = GetCollection<C>(); - var documents = CreateTestDocuments(); - CreateCollection(collection, documents); - return collection; - } - - private IEnumerable<C> CreateTestDocuments() - { - var documents = new C[3]; - for (var n = 1; n <= 3; n++) - { - var document = new C - { - Id = n, - DayField = new DateTime(2022, 01, 01, 0, 0, 0, DateTimeKind.Utc).AddDays(n - 1), - DecimalField = n, - DecimalField1 = n, - DecimalField2 = n, - DoubleField = n, - DoubleField1 = n, - DoubleField2 = n, - Int32Field = n, - Int32Field1 = n, - Int32Field2 = n, - Int64Field = n, - Int64Field1 = n, - Int64Field2 = n, - NullableDecimalField = n < 3 ? n : null, - NullableDecimalField1 = n < 3 ? n : null, - NullableDecimalField2 = n < 3 ? n : null, - NullableDoubleField = n < 3 ? n : null, - NullableDoubleField1 = n < 3 ? n : null, - NullableDoubleField2 = n < 3 ? n : null, - NullableInt32Field = n < 3 ? n : null, - NullableInt32Field1 = n < 3 ? n : null, - NullableInt32Field2 = n < 3 ? n : null, - NullableInt64Field = n < 3 ? n : null, - NullableInt64Field1 = n < 3 ? n : null, - NullableInt64Field2 = n < 3 ? n : null, - NullableSingleField = n < 3 ? n : null, - NullableSingleField1 = n < 3 ? n : null, - NullableSingleField2 = n < 3 ? n : null, - SingleField = n, - SingleField1 = n, - SingleField2 = n, - }; - - documents[n - 1] = document; - } - - return documents; - } - public class C { public int Id { get; set; } @@ -2357,5 +2730,53 @@ public class C public float SingleField1 { get; set; } public float SingleField2 { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData + { + get + { + for (var n = 1; n <= 3; n++) + { + yield return new C + { + Id = n, + DayField = new DateTime(2022, 01, 01, 0, 0, 0, DateTimeKind.Utc).AddDays(n - 1), + DecimalField = n, + DecimalField1 = n, + DecimalField2 = n, + DoubleField = n, + DoubleField1 = n, + DoubleField2 = n, + Int32Field = n, + Int32Field1 = n, + Int32Field2 = n, + Int64Field = n, + Int64Field1 = n, + Int64Field2 = n, + NullableDecimalField = n < 3 ? n : null, + NullableDecimalField1 = n < 3 ? n : null, + NullableDecimalField2 = n < 3 ? n : null, + NullableDoubleField = n < 3 ? n : null, + NullableDoubleField1 = n < 3 ? n : null, + NullableDoubleField2 = n < 3 ? n : null, + NullableInt32Field = n < 3 ? n : null, + NullableInt32Field1 = n < 3 ? n : null, + NullableInt32Field2 = n < 3 ? n : null, + NullableInt64Field = n < 3 ? n : null, + NullableInt64Field1 = n < 3 ? n : null, + NullableInt64Field2 = n < 3 ? n : null, + NullableSingleField = n < 3 ? n : null, + NullableSingleField1 = n < 3 ? n : null, + NullableSingleField2 = n < 3 ? n : null, + SingleField = n, + SingleField1 = n, + SingleField2 = n, + }; + } + } + } + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ZipMethodToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ZipMethodToAggregationExpressionTranslatorTests.cs index e579711e810..f4dcb953dca 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ZipMethodToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/MethodTranslators/ZipMethodToAggregationExpressionTranslatorTests.cs @@ -13,22 +13,28 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators.MethodTranslators { - public class ZipMethodToAggregationExpressionTranslatorTests : Linq3IntegrationTest + public class ZipMethodToAggregationExpressionTranslatorTests : LinqIntegrationTest<ZipMethodToAggregationExpressionTranslatorTests.ClassFixture> { + public ZipMethodToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Theory] [ParameterAttributeData] public void Enumerable_Zip_should_work( [Values(false, true)] bool withNestedAsQueryableSource2) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryableSource2 ? collection.AsQueryable().Select(x => x.A.Zip(x.B.AsQueryable(), (x, y) => x * y)) : @@ -50,7 +56,7 @@ public void Enumerable_Zip_should_work( public void Queryable_Zip_should_work( [Values(false, true)] bool withNestedAsQueryableSource2) { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = withNestedAsQueryableSource2 ? collection.AsQueryable().Select(x => x.A.AsQueryable().Zip(x.B.AsQueryable(), (x, y) => x * y)) : @@ -67,23 +73,22 @@ public void Queryable_Zip_should_work( results[3].Should().Equal(3, 8); } - private IMongoCollection<C> CreateCollection() - { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 0, A = new int[0], B = new int[0] }, - new C { Id = 1, A = new int[0], B = new int[] { 1 } }, - new C { Id = 2, A = new int[] { 1 }, B = new int[0] }, - new C { Id = 3, A = new int[] { 1, 2 }, B = new int[] { 3, 4, 5 } }); - return collection; - } - - private class C + public class C { public int Id { get; set; } public int[] A { get; set; } public int[] B { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ + new C { Id = 0, A = new int[0], B = new int[0] }, + new C { Id = 1, A = new int[0], B = new int[] { 1 } }, + new C { Id = 2, A = new int[] { 1 }, B = new int[0] }, + new C { Id = 3, A = new int[] { 1, 2 }, B = new int[] { 3, 4, 5 } } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/NegateExpressionToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/NegateExpressionToAggregationExpressionTranslatorTests.cs index b7a7683eba7..1c85f959d37 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/NegateExpressionToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/NegateExpressionToAggregationExpressionTranslatorTests.cs @@ -13,7 +13,7 @@ * limitations under the License. */ -using System; +using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Bson; @@ -21,17 +21,23 @@ using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators { - public class NegateExpressionToAggregationExpressionTranslatorTests: Linq3IntegrationTest + public class NegateExpressionToAggregationExpressionTranslatorTests: LinqIntegrationTest<NegateExpressionToAggregationExpressionTranslatorTests.ClassFixture> { + public NegateExpressionToAggregationExpressionTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Negate_int_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = Queryable.Select(collection.AsQueryable(), i => -i.Int); var stages = Translate(collection, queryable); @@ -46,7 +52,7 @@ public void Negate_int_should_work() [Fact] public void Negate_long_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = Queryable.Select(collection.AsQueryable(), i => -i.Long); var stages = Translate(collection, queryable); @@ -61,7 +67,7 @@ public void Negate_long_should_work() [Fact] public void Negate_single_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = Queryable.Select(collection.AsQueryable(), i => -i.Single); var stages = Translate(collection, queryable); @@ -76,7 +82,7 @@ public void Negate_single_should_work() [Fact] public void Negate_double_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = Queryable.Select(collection.AsQueryable(), i => -i.Double); var stages = Translate(collection, queryable); @@ -91,7 +97,7 @@ public void Negate_double_should_work() [Fact] public void Negate_decimal128_should_work() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = Queryable.Select(collection.AsQueryable(), i => -i.DecimalAsDecimal128); var stages = Translate(collection, queryable); @@ -109,7 +115,7 @@ public void Negate_decimal_as_string_should_throw( [Values(false, true)] bool enableClientSideProjections) { RequireServer.Check().Supports(Feature.FindProjectionExpressions); - var collection = CreateCollection(); + var collection = Fixture.Collection; var translationOptions = new ExpressionTranslationOptions { EnableClientSideProjections = enableClientSideProjections }; var queryable = Queryable.Select(collection.AsQueryable(translationOptions), i => -i.DecimalAsString); @@ -130,20 +136,7 @@ public void Negate_decimal_as_string_should_throw( } } - private IMongoCollection<Data> CreateCollection() - { - var collection = GetCollection<Data>("test"); - CreateCollection( - collection, - new Data { Id = 1, Int = -10, Double = -10, Single = -10, Long = -10, DecimalAsString = -10, DecimalAsDecimal128 = -10}, - new Data { Id = 2, Int = 5, Double = 5, Single = 5, Long = 5, DecimalAsString = 5, DecimalAsDecimal128 = 5}, - new Data { Id = 3, Int = 0, Double = 0, Single = 0, Long = 0, DecimalAsString = 0, DecimalAsDecimal128 = 0}, - new Data { Id = 4, Int = -int.MaxValue, Double = -double.MaxValue, Single = -float.MaxValue, Long = -long.MaxValue, DecimalAsString = -decimal.MaxValue, DecimalAsDecimal128 = -decimal.MaxValue}, - new Data { Id = 5, Int = int.MaxValue, Double = double.MaxValue, Single = float.MaxValue, Long = long.MaxValue, DecimalAsString = decimal.MaxValue, DecimalAsDecimal128 = decimal.MaxValue}); - return collection; - } - - private class Data + public class Data { public int Id { get; set; } public int Int { get; set; } @@ -155,5 +148,17 @@ private class Data [BsonRepresentation(BsonType.Decimal128)] public decimal DecimalAsDecimal128 { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<Data> + { + protected override IEnumerable<Data> InitialData => + [ + new Data { Id = 1, Int = -10, Double = -10, Single = -10, Long = -10, DecimalAsString = -10, DecimalAsDecimal128 = -10}, + new Data { Id = 2, Int = 5, Double = 5, Single = 5, Long = 5, DecimalAsString = 5, DecimalAsDecimal128 = 5}, + new Data { Id = 3, Int = 0, Double = 0, Single = 0, Long = 0, DecimalAsString = 0, DecimalAsDecimal128 = 0}, + new Data { Id = 4, Int = -int.MaxValue, Double = -double.MaxValue, Single = -float.MaxValue, Long = -long.MaxValue, DecimalAsString = -decimal.MaxValue, DecimalAsDecimal128 = -decimal.MaxValue}, + new Data { Id = 5, Int = int.MaxValue, Double = double.MaxValue, Single = float.MaxValue, Long = long.MaxValue, DecimalAsString = decimal.MaxValue, DecimalAsDecimal128 = decimal.MaxValue} + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/NewKeyValuePairExpressionToAggregationExpressionTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/NewKeyValuePairExpressionToAggregationExpressionTranslatorTests.cs index 765f031469f..68d571ab9b0 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/NewKeyValuePairExpressionToAggregationExpressionTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToAggregationExpressionTranslators/NewKeyValuePairExpressionToAggregationExpressionTranslatorTests.cs @@ -21,7 +21,6 @@ namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToAggregationExpressionTranslators; -#if NET6_0_OR_GREATER || NETCOREAPP3_1_OR_GREATER public class NewKeyValuePairExpressionToAggregationExpressionTranslatorTests : LinqIntegrationTest<NewKeyValuePairExpressionToAggregationExpressionTranslatorTests.ClassFixture> { public NewKeyValuePairExpressionToAggregationExpressionTranslatorTests(ClassFixture fixture) @@ -45,6 +44,7 @@ public void NewKeyValuePair_should_translate() result.Value.Should().Be(42); } +#if NET6_0_OR_GREATER || NETCOREAPP3_1_OR_GREATER [Fact] public void KeyValuePair_Create_should_translate() { @@ -60,6 +60,7 @@ public void KeyValuePair_Create_should_translate() result.Key.Should().Be("X"); result.Value.Should().Be(42); } +#endif public class C { @@ -74,5 +75,3 @@ public sealed class ClassFixture : MongoCollectionFixture<C> ]; } } - -#endif diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToExecutableQueryTranslators/ExecutableQueryTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToExecutableQueryTranslators/ExecutableQueryTests.cs index bfafb2b05ca..b5af795df82 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToExecutableQueryTranslators/ExecutableQueryTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToExecutableQueryTranslators/ExecutableQueryTests.cs @@ -14,19 +14,25 @@ */ using System; +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToExecutableQueryTranslators { - public class ExecutableQueryTests : Linq3IntegrationTest + public class ExecutableQueryTests : LinqIntegrationTest<ExecutableQueryTests.ClassFixture> { + public ExecutableQueryTests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void Cast_to_object_should_work() { - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable1 = collection.AsQueryable(); var queryable2 = queryable1.Provider.CreateQuery<object>(queryable1.Expression); @@ -38,7 +44,7 @@ public void Cast_to_object_should_work() [Fact] public void Cast_aggregation_to_object_should_work() { - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable1 = collection.AsQueryable().GroupBy( p => p.Type, (k, p) => new ProductAggregation {Type = k, MaxPrice = p.Select(i => i.Price).Max()}); @@ -52,7 +58,7 @@ public void Cast_aggregation_to_object_should_work() [Fact] public void Cast_int_to_object_should_work() { - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable1 = collection.AsQueryable().Select(p => p.Id); var queryable2 = queryable1.Provider.CreateQuery<object>(queryable1.Expression); @@ -64,7 +70,7 @@ public void Cast_int_to_object_should_work() [Fact] public void Cast_to_nullable_should_work() { - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable1 = collection.AsQueryable().Select(p => p.Id); var queryable2 = queryable1.Provider.CreateQuery<int?>(queryable1.Expression); @@ -76,7 +82,7 @@ public void Cast_to_nullable_should_work() [Fact] public void Cast_to_incompatible_type_should_throw() { - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable1 = collection.AsQueryable(); var queryable2 = queryable1.Provider.CreateQuery<ProductAggregation>(queryable1.Expression); @@ -89,7 +95,7 @@ public void Cast_to_incompatible_type_should_throw() [Fact] public void Cast_to_interface_should_work() { - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable1 = collection.AsQueryable(); var queryable2 = queryable1.Provider.CreateQuery<IProduct>(queryable1.Expression); @@ -101,7 +107,7 @@ public void Cast_to_interface_should_work() [Fact] public void Cast_to_base_class_should_work() { - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable1 = collection.AsQueryable(); var queryable2 = queryable1.Provider.CreateQuery<ProductBase>(queryable1.Expression); @@ -110,33 +116,20 @@ public void Cast_to_base_class_should_work() results.Should().HaveCount(5); } - private IMongoCollection<DerivedProduct> GetCollection() - { - var collection = GetCollection<DerivedProduct>("test"); - CreateCollection( - collection, - new DerivedProduct { Id = 1, Type = "a", Price = 1 }, - new DerivedProduct { Id = 2, Type = "a", Price = 5 }, - new DerivedProduct { Id = 3, Type = "a", Price = 12 }, - new DerivedProduct { Id = 4, Type = "b", Price = 2 }, - new DerivedProduct { Id = 5, Type = "b", Price = 7 }); - return collection; - } - - private interface IProduct + public interface IProduct { string Type { get; set; } decimal Price { get; set; } } - private class ProductBase : IProduct + public class ProductBase : IProduct { public int Id { get; set; } public string Type { get; set; } public decimal Price { get; set; } } - private class DerivedProduct : ProductBase + public class DerivedProduct : ProductBase { } @@ -145,5 +138,17 @@ private class ProductAggregation public string Type { get; set; } public decimal MaxPrice { get; set; } } + + public sealed class ClassFixture : MongoCollectionFixture<DerivedProduct> + { + protected override IEnumerable<DerivedProduct> InitialData => + [ + new DerivedProduct { Id = 1, Type = "a", Price = 1 }, + new DerivedProduct { Id = 2, Type = "a", Price = 5 }, + new DerivedProduct { Id = 3, Type = "a", Price = 12 }, + new DerivedProduct { Id = 4, Type = "b", Price = 2 }, + new DerivedProduct { Id = 5, Type = "b", Price = 7 } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToFilterTranslators/MethodTranslators/IsNullOrWhiteSpaceMethodToFilterTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToFilterTranslators/MethodTranslators/IsNullOrWhiteSpaceMethodToFilterTranslatorTests.cs index 66e3a886bef..22b4de1a9c3 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToFilterTranslators/MethodTranslators/IsNullOrWhiteSpaceMethodToFilterTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToFilterTranslators/MethodTranslators/IsNullOrWhiteSpaceMethodToFilterTranslatorTests.cs @@ -13,19 +13,24 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; -using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToFilterTranslators.MethodTranslators { - public class IsNullOrWhiteSpaceMethodToFilterTranslatorTests : Linq3IntegrationTest + public class IsNullOrWhiteSpaceMethodToFilterTranslatorTests : LinqIntegrationTest<IsNullOrWhiteSpaceMethodToFilterTranslatorTests.ClassFixture> { + public IsNullOrWhiteSpaceMethodToFilterTranslatorTests(ClassFixture fixture) : base(fixture) + { + } + [Fact] public void Find_using_IsNullOrWhiteSpace_should_return_expected_results() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var find = collection.Find(x => string.IsNullOrWhiteSpace(x.S)); @@ -39,7 +44,7 @@ public void Find_using_IsNullOrWhiteSpace_should_return_expected_results() [Fact] public void Where_using_IsNullOrWhiteSpace_should_return_expected_results() { - var collection = CreateCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Where(x => string.IsNullOrWhiteSpace(x.S)); @@ -51,23 +56,22 @@ public void Where_using_IsNullOrWhiteSpace_should_return_expected_results() results.Select(x => x.Id).Should().BeEquivalentTo(1, 2, 3, 4); } - private IMongoCollection<C> CreateCollection() + public class C { - var collection = GetCollection<C>(); - CreateCollection( - collection, + public int Id { get; set; } + public string S { get; set; } + } + + public sealed class ClassFixture : MongoCollectionFixture<C> + { + protected override IEnumerable<C> InitialData => + [ new C { Id = 1, S = null }, new C { Id = 2, S = "" }, new C { Id = 3, S = " " }, new C { Id = 4, S = " \t\r\n" }, - new C { Id = 5, S = "abc" }); - return collection; - } - - public class C - { - public int Id { get; set; } - public string S { get; set; } + new C { Id = 5, S = "abc" } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToPipelineTranslators/AsMethodToPipelineTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToPipelineTranslators/AsMethodToPipelineTranslatorTests.cs index eb133344a20..456e64c723b 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToPipelineTranslators/AsMethodToPipelineTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToPipelineTranslators/AsMethodToPipelineTranslatorTests.cs @@ -13,22 +13,28 @@ * limitations under the License. */ +using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.Serialization.Serializers; -using MongoDB.Driver; using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToPipelineTranslators { - public class AsMethodToPipelineTranslatorTests : Linq3IntegrationTest + public class AsMethodToPipelineTranslatorTests : LinqIntegrationTest<AsMethodToPipelineTranslatorTests.ClassFixture> { + public AsMethodToPipelineTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void As_should_work() { - var collection = GetCollection(); + var collection = Fixture.Collection; var queryable = collection.AsQueryable() .Where(x => x.Name == "John") @@ -45,20 +51,19 @@ public void As_should_work() result.Should().Be("{ _id : 1, Name : 'John' }"); } - private IMongoCollection<C> GetCollection() + public class C { - var collection = GetCollection<C>("test"); - CreateCollection( - collection, - new C { Id = 1, Name = "John" }, - new C { Id = 2, Name = "Jane" }); - return collection; + public int Id { get; set; } + public string Name { get; set; } } - private class C + public sealed class ClassFixture : MongoCollectionFixture<C> { - public int Id { get; set; } - public string Name { get; set; } + protected override IEnumerable<C> InitialData => + [ + new C { Id = 1, Name = "John" }, + new C { Id = 2, Name = "Jane" } + ]; } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToPipelineTranslators/OfTypeMethodToPipelineTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToPipelineTranslators/OfTypeMethodToPipelineTranslatorTests.cs index dd7fd95b533..1d5d22f2b20 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToPipelineTranslators/OfTypeMethodToPipelineTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3Implementation/Translators/ExpressionToPipelineTranslators/OfTypeMethodToPipelineTranslatorTests.cs @@ -13,20 +13,29 @@ * limitations under the License. */ +using System; +using System.Collections.Generic; using System.Linq; using FluentAssertions; using MongoDB.Bson.Serialization.Attributes; +using MongoDB.Driver.Core.TestHelpers.XunitExtensions; using MongoDB.Driver.Linq; +using MongoDB.Driver.TestHelpers; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3Implementation.Translators.ExpressionToPipelineTranslators { - public class OfTypeMethodToPipelineTranslatorTests: Linq3IntegrationTest + public class OfTypeMethodToPipelineTranslatorTests: LinqIntegrationTest<OfTypeMethodToPipelineTranslatorTests.ClassFixture> { + public OfTypeMethodToPipelineTranslatorTests(ClassFixture fixture) + : base(fixture) + { + } + [Fact] public void OfType_should_return_expected_results() { - var collection = CreateCollection(); + var collection = Fixture.Collection; AssertTypeOf<Account>(collection, "{ $match: { _t : 'Account' } }", 1, 2, 3); AssertTypeOf<Company>(collection, "{ $match: { _t : 'Company' } }", 1, 2); @@ -47,18 +56,6 @@ private void AssertTypeOf<TDocument>(IMongoCollection<Entity> collection, string results.Select(x => x.Id).Should().BeEquivalentTo(expectedIds); } - private IMongoCollection<Entity> CreateCollection() - { - var collection = GetCollection<Entity>("test"); - CreateCollection( - collection, - new Company { Id = 1 }, - new Company { Id = 2 }, - new Contact { Id = 3 }); - - return collection; - } - [BsonDiscriminator(RootClass = true)] [BsonKnownTypes(typeof(Account), typeof(Contact), typeof(Company))] public abstract class Entity @@ -77,5 +74,15 @@ public class Contact : Account public class Company : Account { } + + public sealed class ClassFixture : MongoCollectionFixture<Entity> + { + protected override IEnumerable<Entity> InitialData => + [ + new Company { Id = 1 }, + new Company { Id = 2 }, + new Contact { Id = 3 } + ]; + } } } diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/IntegrationTestBase.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/IntegrationTestBase.cs index e422523ff9d..a499da2bc24 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/IntegrationTestBase.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/IntegrationTestBase.cs @@ -18,9 +18,11 @@ using System.Collections.Generic; using MongoDB.Bson; using MongoDB.Bson.Serialization.Attributes; +using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3ImplementationWithLinq2Tests { + [Trait("Category", "Integration")] public abstract class IntegrationTestBase { protected static IMongoCollection<Root> __collection; @@ -251,7 +253,11 @@ private void InsertSecond() O = new List<long> { 100, 200, 300 }, P = 1.1, U = -1.234565723762724332233489m, - Z = 10 + Z = 10, + NullableW = 8, + NullableX = 9, + NullableY = 10, + NullableZ = 11 }; __collection.InsertOne(root); } @@ -331,6 +337,14 @@ public class Root : IRoot public int Y { get; set; } public decimal Z { get; set; } + + public double? NullableW { get; set; } + + public long? NullableX { get; set; } + + public int? NullableY { get; set; } + + public decimal? NullableZ { get; set; } } public class RootDescended : Root diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableEnumComparedToEnumTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableEnumComparedToEnumTests.cs index 0020e737169..1c3ff6f6226 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableEnumComparedToEnumTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableEnumComparedToEnumTests.cs @@ -22,6 +22,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3ImplementationWithLinq2Tests { + [Trait("Category", "Integration")] public class MongoQueryableEnumComparedToEnumTests { private static readonly IMongoClient __client; diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableEnumComparedToEnumWithStringRepresentationTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableEnumComparedToEnumWithStringRepresentationTests.cs index cad6a9933d2..caf157ce7d3 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableEnumComparedToEnumWithStringRepresentationTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableEnumComparedToEnumWithStringRepresentationTests.cs @@ -23,6 +23,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3ImplementationWithLinq2Tests { + [Trait("Category", "Integration")] public class MongoQueryableEnumComparedToEnumWithStringRepresentationTests { private static readonly IMongoClient __client; diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntArrayComparedToEnumerableIntTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntArrayComparedToEnumerableIntTests.cs index fae8b42b343..c8280dc4756 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntArrayComparedToEnumerableIntTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntArrayComparedToEnumerableIntTests.cs @@ -23,6 +23,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3ImplementationWithLinq2Tests { + [Trait("Category", "Integration")] public class MongoQueryableIntArrayComparedToEnumerableIntTests { private static readonly IMongoClient __client; diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntComparedToDoubleTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntComparedToDoubleTests.cs index cc9d25626e5..6cf5a074a35 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntComparedToDoubleTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntComparedToDoubleTests.cs @@ -22,6 +22,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3ImplementationWithLinq2Tests { + [Trait("Category", "Integration")] public class MongoQueryableIntComparedToDoubleTests { private static readonly IMongoClient __client; diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntComparedToDoubleWithStringRepresentationTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntComparedToDoubleWithStringRepresentationTests.cs index ac02c947b84..511d103038b 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntComparedToDoubleWithStringRepresentationTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntComparedToDoubleWithStringRepresentationTests.cs @@ -23,6 +23,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3ImplementationWithLinq2Tests { + [Trait("Category", "Integration")] public class MongoQueryableIntComparedToDoubleWithStringRepresentationTests { private static readonly IMongoClient __client; diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntComparedToNullableIntTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntComparedToNullableIntTests.cs index 69d9d98ac0f..c98b57fb299 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntComparedToNullableIntTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntComparedToNullableIntTests.cs @@ -22,6 +22,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3ImplementationWithLinq2Tests { + [Trait("Category", "Integration")] public class MongoQueryableIntComparedToNullableIntTests { private static readonly IMongoClient __client; diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntComparedToNullableIntWithStringRepresentationTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntComparedToNullableIntWithStringRepresentationTests.cs index d9367c2df9f..96a07f29d6a 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntComparedToNullableIntWithStringRepresentationTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableIntComparedToNullableIntWithStringRepresentationTests.cs @@ -23,6 +23,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3ImplementationWithLinq2Tests { + [Trait("Category", "Integration")] public class MongoQueryableIntComparedToNullableIntWithStringRepresentationTests { private static readonly IMongoClient __client; diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableNullableEnumComparedToNullableEnumTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableNullableEnumComparedToNullableEnumTests.cs index 0d28da7db01..945210f690c 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableNullableEnumComparedToNullableEnumTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableNullableEnumComparedToNullableEnumTests.cs @@ -22,6 +22,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3ImplementationWithLinq2Tests { + [Trait("Category", "Integration")] public class MongoQueryableNullableEnumComparedToNullableEnumTests { private static readonly IMongoClient __client; diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableNullableEnumComparedToNullableEnumWithStringRepresentationTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableNullableEnumComparedToNullableEnumWithStringRepresentationTests.cs index e8d4310a731..dd8ccf9e5d8 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableNullableEnumComparedToNullableEnumWithStringRepresentationTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableNullableEnumComparedToNullableEnumWithStringRepresentationTests.cs @@ -23,6 +23,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3ImplementationWithLinq2Tests { + [Trait("Category", "Integration")] public class MongoQueryableNullableEnumComparedToNullableEnumWithStringRepresentationTests { private static readonly IMongoClient __client; diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableTests.cs index d68f43e1fe7..8e161fb3a7b 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableTests.cs @@ -19,7 +19,6 @@ using System.Threading.Tasks; using FluentAssertions; using MongoDB.Bson; -using MongoDB.Driver; using MongoDB.Driver.Core.Clusters; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; @@ -78,6 +77,21 @@ public async Task AnyAsync_with_predicate() result.Should().BeTrue(); } + [Fact] + public async Task ToAsyncEnumerable() + { + var query = CreateQuery().Select(x => x.A); + var expectedResults = query.ToList(); + + var asyncResults = new List<string>(); + await foreach (var item in query.ToAsyncEnumerable()) + { + asyncResults.Add(item); + } + + asyncResults.Should().Equal(expectedResults); + } + [Fact] public void Average() { @@ -121,6 +135,106 @@ public async Task AverageAsync_with_selector() result.Should().Be(61); } + [Fact] + public void Average_on_empty_set() + { + Action action = () => CreateQuery().Where(x => x.A == "__dummy__").Select(x => x.W).Average(); + + action.ShouldThrow<InvalidOperationException>().WithMessage("Sequence contains no elements"); + } + + [Fact] + public void Average_on_empty_set_with_selector() + { + Action action = () => CreateQuery().Where(x => x.A == "__dummy__").Average(x => x.X); + + action.ShouldThrow<InvalidOperationException>().WithMessage("Sequence contains no elements"); + } + + [Fact] + public void AverageAsync_on_empty_set() + { + var subject = CreateQuery().Where(x => x.A == "__dummy__").Select(x => x.Y).AverageAsync(); + + subject.Awaiting(async q => await q) + .ShouldThrow<InvalidOperationException>() + .WithMessage("Sequence contains no elements"); + } + + [Fact] + public void AverageAsync_on_empty_set_with_selector() + { + var subject = CreateQuery().Where(x => x.A == "__dummy__").AverageAsync(x => x.Z); + + subject.Awaiting(async q => await q) + .ShouldThrow<InvalidOperationException>() + .WithMessage("Sequence contains no elements"); + } + + [Fact] + public void Average_on_nullable_empty_set() + { + var result = CreateQuery().Where(x => x.A == "__dummy__").Select(x => x.NullableW).Average(); + + result.Should().Be(null); + } + + [Fact] + public void Average_on_nullable_empty_set_with_selector() + { + var result = CreateQuery().Where(x => x.A == "__dummy__").Average(x => x.NullableX); + + result.Should().Be(null); + } + + [Fact] + public async Task AverageAsync_on_nullable_empty_set() + { + var result = await CreateQuery().Where(x => x.A == "__dummy__").Select(x => x.NullableY).AverageAsync(); + + result.Should().Be(null); + } + + [Fact] + public async Task AverageAsync_on_nullable_empty_set_with_selector() + { + var result = await CreateQuery().Where(x => x.A == "__dummy__").AverageAsync(x => x.NullableZ); + + result.Should().Be(null); + } + + [Fact] + public void Average_on_empty_set_cast_to_nullable() + { + var result = CreateQuery().Where(x => x.A == "__dummy__").Select(x => (double?)x.W).Average(); + + result.Should().Be(null); + } + + [Fact] + public void Average_on_empty_set_cast_to_nullable_with_selector() + { + var result = CreateQuery().Where(x => x.A == "__dummy__").Average(x => (long?)x.X); + + result.Should().Be(null); + } + + [Fact] + public async Task AverageAsync_on_empty_set_cast_to_nullable() + { + var result = await CreateQuery().Where(x => x.A == "__dummy__").Select(x => (int?)x.Y).AverageAsync(); + + result.Should().Be(null); + } + + [Fact] + public async Task AverageAsync_on_empty_set_cast_to_nullable_with_selector() + { + var result = await CreateQuery().Where(x => x.A == "__dummy__").AverageAsync(x => (decimal?)x.Z); + + result.Should().Be(null); + } + [Fact] public void GroupBy_combined_with_a_previous_embedded_pipeline() { diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableWithDotNotationTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableWithDotNotationTests.cs index 0e4e4af8c0f..f43d71146fe 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableWithDotNotationTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/MongoQueryableWithDotNotationTests.cs @@ -24,6 +24,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3ImplementationWithLinq2Tests { + [Trait("Category", "Integration")] public class MongoQueryableWithDotNotationTests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/Translators/AggregateGroupTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/Translators/AggregateGroupTranslatorTests.cs index 45bbf7067af..a8f7428079b 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/Translators/AggregateGroupTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/Translators/AggregateGroupTranslatorTests.cs @@ -20,6 +20,7 @@ using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.Serialization; +using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; using MongoDB.Driver.Linq; using MongoDB.Driver.Linq.Linq3Implementation.Ast; @@ -164,6 +165,66 @@ public void Should_translate_count_with_a_predicate() result.Value.Result.Should().Be(1); } + [Fact] + public void Should_translate_median_with_embedded_projector() + { + RequireServer.Check().Supports(Feature.MedianOperator); + + var result = Group(x => x.A, g => new { Result = g.Median(x=> x.C.E.F) }); + + AssertStages( + result.Stages, + "{ $group : { _id : '$A', __agg0 : { $median : { input : '$C.E.F', method : 'approximate' } } } }", + "{ $project : { Result : '$__agg0', _id : 0 } }"); + + result.Value.Result.Should().Be(111); + } + + [Fact] + public void Should_translate_median_with_selected_projector() + { + RequireServer.Check().Supports(Feature.MedianOperator); + + var result = Group(x => x.A, g => new { Result = g.Select(x => x.C.E.F).Median() }); + + AssertStages( + result.Stages, + "{ $group : { _id : '$A', __agg0 : { $median : { input : '$C.E.F', method : 'approximate' } } } }", + "{ $project : { Result : '$__agg0', _id : 0 } }"); + + result.Value.Result.Should().Be(111); + } + + [Fact] + public void Should_translate_percentile_with_embedded_projector() + { + RequireServer.Check().Supports(Feature.PercentileOperator); + + var result = Group(x => x.A, g => new { Result = g.Percentile(x => x.C.E.F, new[] { 0.5 }) }); + + AssertStages( + result.Stages, + "{ $group : { _id : '$A', __agg0 : { $percentile : { input : '$C.E.F', p : [0.5], method : 'approximate' } } } }", + "{ $project : { Result : '$__agg0', _id : 0 } }"); + + result.Value.Result.Should().Equal(111.0); + } + + [Fact] + public void Should_translate_percentile_with_selected_projector() + { + RequireServer.Check().Supports(Feature.PercentileOperator); + + var result = Group(x => x.A, g => new { Result = g.Select(x => x.C.E.F).Percentile(new[] { 0.5 }) }); + + AssertStages( + result.Stages, + "{ $group : { _id : '$A', __agg0 : { $percentile : { input : '$C.E.F', p : [0.5], method : 'approximate' } } } }", + "{ $project : { Result : '$__agg0', _id : 0 } }"); + + result.Value.Result.Should().Equal(111.0); + } + [Fact] public void Should_translate_where_with_a_predicate_and_count() { @@ -519,6 +580,40 @@ public void Should_translate_complex_selector() result.Value.Max.Should().Be(333); } + [Fact] + public void Should_translate_complex_selector_with_quantile_methods() + { + RequireServer.Check().Supports(Feature.PercentileOperator); + + var result = Group(x => x.A, g => new + { + Median = g.Median(x => x.C.E.F + x.C.E.H), + Percentile = g.Percentile(x => x.C.E.F + x.C.E.H, new[] { 0.95 }) + }); + + AssertStages( + result.Stages, + @" + { + $group : { + _id : '$A', + __agg0 : { $median : { input : { $add : ['$C.E.F', '$C.E.H'] }, method : 'approximate' } }, + __agg1 : { $percentile : { input : { $add : ['$C.E.F', '$C.E.H'] }, p : [0.95], method : 'approximate' } } + } + }", + @" + { + $project : { + Median : '$__agg0', + Percentile : '$__agg1', + _id : 0 + } + }"); + + result.Value.Median.Should().Be(333); + result.Value.Percentile.Should().Equal(333); + } + [Fact] public void Should_translate_aggregate_expressions_with_user_provided_serializer_if_possible() { diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/Translators/LegacyPredicateTranslatorTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/Translators/LegacyPredicateTranslatorTests.cs index 361855cc07a..cd49af1955f 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/Translators/LegacyPredicateTranslatorTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/Translators/LegacyPredicateTranslatorTests.cs @@ -22,7 +22,6 @@ using MongoDB.Bson; using MongoDB.Bson.Serialization; using MongoDB.Bson.Serialization.Attributes; -using MongoDB.Driver; using MongoDB.Driver.Linq.Linq3Implementation.Ast.Optimizers; using MongoDB.Driver.Linq.Linq3Implementation.Misc; using MongoDB.Driver.Linq.Linq3Implementation.Translators; @@ -31,6 +30,7 @@ namespace MongoDB.Driver.Tests.Linq.Linq3ImplementationWithLinq2Tests.Translators { + [Trait("Category", "Integration")] public class LegacyPredicateTranslatorTests { private static IMongoDatabase __database; diff --git a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/Translators/PredicateTranslatorValidationTests.cs b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/Translators/PredicateTranslatorValidationTests.cs index 7bdb46a2219..12d51f85ce8 100644 --- a/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/Translators/PredicateTranslatorValidationTests.cs +++ b/tests/MongoDB.Driver.Tests/Linq/Linq3ImplementationWithLinq2Tests/Translators/PredicateTranslatorValidationTests.cs @@ -19,13 +19,13 @@ using System.Linq.Expressions; using FluentAssertions; using MongoDB.Bson; -using MongoDB.Driver; using MongoDB.Driver.Linq.Linq3Implementation; using MongoDB.Driver.Linq.Linq3Implementation.Translators.ExpressionToExecutableQueryTranslators; using Xunit; namespace MongoDB.Driver.Tests.Linq.Linq3ImplementationWithLinq2Tests.Translators { + [Trait("Category", "Integration")] public class PredicateTranslatorValidationTests { private IMongoCollection<TestObject> _collection; diff --git a/tests/MongoDB.Driver.Tests/ListDatabasesTests.cs b/tests/MongoDB.Driver.Tests/ListDatabasesTests.cs index 9840f441ec0..c5bf7d955a1 100644 --- a/tests/MongoDB.Driver.Tests/ListDatabasesTests.cs +++ b/tests/MongoDB.Driver.Tests/ListDatabasesTests.cs @@ -23,6 +23,7 @@ namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class ListDatabasesTests { private string _databaseName = $"authorizedDatabases{Guid.NewGuid()}"; diff --git a/tests/MongoDB.Driver.Tests/LoggingTests.cs b/tests/MongoDB.Driver.Tests/LoggingTests.cs index 04f83dbca0e..118d2404f98 100644 --- a/tests/MongoDB.Driver.Tests/LoggingTests.cs +++ b/tests/MongoDB.Driver.Tests/LoggingTests.cs @@ -21,12 +21,12 @@ using MongoDB.Driver.Core.Configuration; using MongoDB.Driver.Core.Logging; using MongoDB.Driver.Core.TestHelpers.Logging; -using MongoDB.Driver.Linq; using Xunit; using Xunit.Abstractions; namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class LoggingTests : LoggableTestClass { public LoggingTests(ITestOutputHelper output) : base(output, includeAllCategories: true) @@ -102,7 +102,7 @@ public void Prose_tests_truncation_limit_1(int? maxDocumentSize) : new LoggingSettings(LoggerFactory, maxDocumentSize.Value); using (var client = DriverTestConfiguration.CreateMongoClient(loggingSettings)) { - + var db = client.GetDatabase(DriverTestConfiguration.DatabaseNamespace.DatabaseName); try diff --git a/tests/MongoDB.Driver.Tests/MockOperationExecutor.cs b/tests/MongoDB.Driver.Tests/MockOperationExecutor.cs index 6c9c18b6410..ee6e0d015f3 100644 --- a/tests/MongoDB.Driver.Tests/MockOperationExecutor.cs +++ b/tests/MongoDB.Driver.Tests/MockOperationExecutor.cs @@ -48,6 +48,10 @@ public int QueuedCallCount get { return _calls.Count; } } + public void Dispose() + { + } + public void EnqueueResult<TResult>(TResult result) { _results.Enqueue(result); @@ -58,15 +62,21 @@ public void EnqueueException<TResult>(Exception exception) _results.Enqueue(exception); } - public TResult ExecuteReadOperation<TResult>(IReadBinding binding, IReadOperation<TResult> operation, CancellationToken cancellationToken) + public TResult ExecuteReadOperation<TResult>( + OperationContext operationContext, + IClientSessionHandle session, + IReadOperation<TResult> operation, + ReadPreference readPreference, + bool allowChannelPinning) { _calls.Enqueue(new ReadCall<TResult> { - Binding = binding, Operation = operation, - CancellationToken = cancellationToken, - SessionId = binding.Session.Id, - UsedImplicitSession = binding.Session.IsImplicit + CancellationToken = operationContext.CancellationToken, + ReadPreference = readPreference, + SessionId = session?.WrappedCoreSession.Id, + Timeout = operationContext.Timeout, + UsedImplicitSession = session == null || session.IsImplicit }); if (_results.Count > 0) @@ -85,11 +95,16 @@ public TResult ExecuteReadOperation<TResult>(IReadBinding binding, IReadOperatio return default(TResult); } - public Task<TResult> ExecuteReadOperationAsync<TResult>(IReadBinding binding, IReadOperation<TResult> operation, CancellationToken cancellationToken) + public Task<TResult> ExecuteReadOperationAsync<TResult>( + OperationContext operationContext, + IClientSessionHandle session, + IReadOperation<TResult> operation, + ReadPreference readPreference, + bool allowChannelPinning) { try { - var result = ExecuteReadOperation<TResult>(binding, operation, cancellationToken); + var result = ExecuteReadOperation(operationContext, session, operation, readPreference, allowChannelPinning); return Task.FromResult(result); } catch (Exception ex) @@ -100,15 +115,19 @@ public Task<TResult> ExecuteReadOperationAsync<TResult>(IReadBinding binding, IR } } - public TResult ExecuteWriteOperation<TResult>(IWriteBinding binding, IWriteOperation<TResult> operation, CancellationToken cancellationToken) + public TResult ExecuteWriteOperation<TResult>( + OperationContext operationContext, + IClientSessionHandle session, + IWriteOperation<TResult> operation, + bool allowChannelPinning) { _calls.Enqueue(new WriteCall<TResult> { - Binding = binding, Operation = operation, - CancellationToken = cancellationToken, - SessionId = binding.Session.Id, - UsedImplicitSession = binding.Session.IsImplicit + CancellationToken = operationContext.CancellationToken, + SessionId = session?.WrappedCoreSession.Id, + Timeout = operationContext.Timeout, + UsedImplicitSession = session == null || session.IsImplicit }); if (_results.Count > 0) @@ -127,11 +146,15 @@ public TResult ExecuteWriteOperation<TResult>(IWriteBinding binding, IWriteOpera return default(TResult); } - public Task<TResult> ExecuteWriteOperationAsync<TResult>(IWriteBinding binding, IWriteOperation<TResult> operation, CancellationToken cancellationToken) + public Task<TResult> ExecuteWriteOperationAsync<TResult>( + OperationContext operationContext, + IClientSessionHandle session, + IWriteOperation<TResult> operation, + bool allowChannelPinning) { try { - var result = ExecuteWriteOperation<TResult>(binding, operation, cancellationToken); + var result = ExecuteWriteOperation(operationContext, session, operation, allowChannelPinning); return Task.FromResult(result); } catch (Exception ex) @@ -176,7 +199,7 @@ public WriteCall<TResult> GetWriteCall<TResult>() return writeCall; } - public IClientSessionHandle StartImplicitSession(CancellationToken cancellationToken) + public IClientSessionHandle StartImplicitSession() { var cluster = Mock.Of<IClusterInternal>(); var options = new ClientSessionOptions(); @@ -186,26 +209,22 @@ public IClientSessionHandle StartImplicitSession(CancellationToken cancellationT return new ClientSessionHandle(_client, options, coreSessionHandle); } - public Task<IClientSessionHandle> StartImplicitSessionAsync(CancellationToken cancellationToken) - { - return Task.FromResult(StartImplicitSession(cancellationToken)); - } - public class ReadCall<TResult> { - public IReadBinding Binding { get; set; } public IReadOperation<TResult> Operation { get; set; } public CancellationToken CancellationToken { get; set; } + public ReadPreference ReadPreference { get; set; } public BsonDocument SessionId { get; set; } + public TimeSpan? Timeout { get; set; } public bool UsedImplicitSession { get; set; } } public class WriteCall<TResult> { - public IWriteBinding Binding { get; set; } public IWriteOperation<TResult> Operation { get; set; } public CancellationToken CancellationToken { get; set; } public BsonDocument SessionId { get; set; } + public TimeSpan? Timeout { get; set; } public bool UsedImplicitSession { get; set; } } } diff --git a/tests/MongoDB.Driver.Tests/MongoClientSettingsTests.cs b/tests/MongoDB.Driver.Tests/MongoClientSettingsTests.cs index ddcffdadf19..50a040e2516 100644 --- a/tests/MongoDB.Driver.Tests/MongoClientSettingsTests.cs +++ b/tests/MongoDB.Driver.Tests/MongoClientSettingsTests.cs @@ -25,9 +25,9 @@ using MongoDB.Driver.Core.Clusters; using MongoDB.Driver.Core.Compression; using MongoDB.Driver.Core.Configuration; +using MongoDB.Driver.Core.Connections; using MongoDB.Driver.Core.Servers; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; -using MongoDB.Driver.Encryption; using MongoDB.TestHelpers.XunitExtensions; using Moq; using Xunit; @@ -266,6 +266,7 @@ public void TestDefaults() Assert.Equal(ServerMonitoringMode.Auto, settings.ServerMonitoringMode); Assert.Equal(MongoDefaults.ServerSelectionTimeout, settings.ServerSelectionTimeout); Assert.Equal(MongoDefaults.SocketTimeout, settings.SocketTimeout); + Assert.Equal(null, settings.Socks5ProxySettings); Assert.Null(settings.SslSettings); #pragma warning disable 618 Assert.Equal(false, settings.UseSsl); @@ -435,6 +436,10 @@ public void TestEquals() clone.SocketTimeout = new TimeSpan(1, 2, 3); Assert.False(clone.Equals(settings)); + clone = settings.Clone(); + clone.Socks5ProxySettings = Socks5ProxySettings.Create("host.com", null, null, null); + Assert.False(clone.Equals(settings)); + clone = settings.Clone(); clone.SslSettings = new SslSettings { CheckCertificateRevocation = false }; Assert.False(clone.Equals(settings)); @@ -475,6 +480,7 @@ public void TestEquals() settings.ReadConcern = ReadConcern.Majority; settings.ReadEncoding = new UTF8Encoding(false, false); settings.ServerApi = new ServerApi(ServerApiVersion.V1); + settings.Socks5ProxySettings = Socks5ProxySettings.Create("host.com", 8080, null, null); settings.WriteConcern = WriteConcern.W2; settings.WriteEncoding = new UTF8Encoding(false, false); @@ -485,6 +491,7 @@ public void TestEquals() clone.ReadEncoding = new UTF8Encoding(false, false); clone.ReadPreference = clone.ReadPreference.With(settings.ReadPreference.ReadPreferenceMode); clone.ServerApi = new ServerApi(settings.ServerApi.Version); + clone.Socks5ProxySettings = Socks5ProxySettings.Create("host.com", 8080, null, null); clone.WriteConcern = WriteConcern.FromBsonDocument(settings.WriteConcern.ToBsonDocument()); clone.WriteEncoding = new UTF8Encoding(false, false); @@ -582,7 +589,8 @@ public void TestFromUrl() "maxConnecting=3;maxIdleTime=124;maxLifeTime=125;maxPoolSize=126;minPoolSize=127;readConcernLevel=majority;" + "readPreference=secondary;readPreferenceTags=a:1,b:2;readPreferenceTags=c:3,d:4;retryReads=false;retryWrites=true;socketTimeout=129;" + "serverMonitoringMode=Stream;serverSelectionTimeout=20s;tls=true;sslVerifyCertificate=false;waitqueuesize=130;waitQueueTimeout=131;" + - "w=1;fsync=true;journal=true;w=2;wtimeout=131;gssapiServiceName=other"; + "w=1;fsync=true;journal=true;w=2;wtimeout=131;gssapiServiceName=other" + + "&proxyHost=host.com&proxyPort=2020&proxyUsername=user&proxyPassword=passw"; var builder = new MongoUrlBuilder(connectionString); var url = builder.ToMongoUrl(); @@ -620,6 +628,10 @@ public void TestFromUrl() Assert.Equal(ServerMonitoringMode.Stream, settings.ServerMonitoringMode); Assert.Equal(url.ServerSelectionTimeout, settings.ServerSelectionTimeout); Assert.Equal(url.SocketTimeout, settings.SocketTimeout); + Assert.Equal(url.ProxyHost, settings.Socks5ProxySettings.Host); + Assert.Equal(url.ProxyPort, settings.Socks5ProxySettings.Port); + Assert.Equal(url.ProxyUsername, ((Socks5AuthenticationSettings.UsernamePasswordAuthenticationSettings)settings.Socks5ProxySettings.Authentication).Username); + Assert.Equal(url.ProxyPassword, ((Socks5AuthenticationSettings.UsernamePasswordAuthenticationSettings)settings.Socks5ProxySettings.Authentication).Password); #pragma warning disable 618 Assert.Equal(url.TlsDisableCertificateRevocationCheck, !settings.SslSettings.CheckCertificateRevocation); Assert.Equal(url.UseSsl, settings.UseSsl); @@ -1110,6 +1122,7 @@ public void TestServersWithSrvMaxHosts() } [Fact] + [Trait("Category", "Integration")] public void TestSocketConfigurator() { RequireServer.Check(); @@ -1174,6 +1187,21 @@ public void TestSocketTimeout() Assert.Throws<InvalidOperationException>(() => { settings.SocketTimeout = socketTimeout; }); } + [Fact] + public void TestSocks5ProxySettings() + { + var settings = new MongoClientSettings(); + Assert.Equal(null, settings.Socks5ProxySettings); + + var newProxySettings = Socks5ProxySettings.Create("host.com", 280, "test", "test"); + settings.Socks5ProxySettings = newProxySettings; + Assert.Equal(newProxySettings, settings.Socks5ProxySettings); + + settings.Freeze(); + Assert.Equal(newProxySettings, settings.Socks5ProxySettings); + Assert.Throws<InvalidOperationException>(() => { settings.Socks5ProxySettings = newProxySettings; }); + } + [Fact] public void TestSslSettings() { @@ -1325,6 +1353,7 @@ public void ToClusterKey_should_copy_relevant_values() ServerMonitoringMode = ServerMonitoringMode.Poll, ServerSelectionTimeout = TimeSpan.FromSeconds(6), SocketTimeout = TimeSpan.FromSeconds(4), + Socks5ProxySettings = Socks5ProxySettings.Create("host", 2020, null, null), SslSettings = sslSettings, UseTls = true, #pragma warning disable 618 @@ -1361,6 +1390,7 @@ public void ToClusterKey_should_copy_relevant_values() result.ServerMonitoringMode.Should().Be(ServerMonitoringMode.Poll); result.ServerSelectionTimeout.Should().Be(subject.ServerSelectionTimeout); result.SocketTimeout.Should().Be(subject.SocketTimeout); + result.Socks5ProxySettings.Should().Be(subject.Socks5ProxySettings); result.SslSettings.Should().Be(subject.SslSettings); result.UseTls.Should().Be(subject.UseTls); #pragma warning disable 618 diff --git a/tests/MongoDB.Driver.Tests/MongoClientTests.cs b/tests/MongoDB.Driver.Tests/MongoClientTests.cs index d1f06516a2f..026f5b83cd4 100644 --- a/tests/MongoDB.Driver.Tests/MongoClientTests.cs +++ b/tests/MongoDB.Driver.Tests/MongoClientTests.cs @@ -28,6 +28,7 @@ namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class MongoClientTests { [Fact] @@ -118,9 +119,6 @@ public void Disposed_client_should_throw_on_member_access() var exception = Record.Exception(() => client.Cluster); exception.Should().BeOfType<ObjectDisposedException>(); - - exception = Record.Exception(() => client.StartImplicitSession(default)); - exception.Should().BeOfType<ObjectDisposedException>(); } [Theory] @@ -131,7 +129,7 @@ public void DropDatabase_should_invoke_the_correct_operation( { var operationExecutor = new MockOperationExecutor(); var writeConcern = new WriteConcern(1); - var subject = new MongoClient(operationExecutor, DriverTestConfiguration.GetClientSettings()).WithWriteConcern(writeConcern); + var subject = new MongoClient(DriverTestConfiguration.GetClientSettings(), _ => operationExecutor).WithWriteConcern(writeConcern); var session = CreateClientSession(); using var cancellationTokenSource = new CancellationTokenSource(); var cancellationToken = cancellationTokenSource.Token; @@ -182,7 +180,7 @@ public void ListDatabaseNames_should_invoke_the_correct_operation( [Values(false, true)] bool async) { var operationExecutor = new MockOperationExecutor(); - var subject = new MongoClient(operationExecutor, DriverTestConfiguration.GetClientSettings()); + var subject = new MongoClient(DriverTestConfiguration.GetClientSettings(), _ => operationExecutor); var session = CreateClientSession(); using var cancellationTokenSource = new CancellationTokenSource(); var cancellationToken = cancellationTokenSource.Token; @@ -276,7 +274,7 @@ public void ListDatabases_should_invoke_the_correct_operation( [Values(false, true)] bool async) { var operationExecutor = new MockOperationExecutor(); - var subject = new MongoClient(operationExecutor, DriverTestConfiguration.GetClientSettings()); + var subject = new MongoClient(DriverTestConfiguration.GetClientSettings(), _ => operationExecutor); var session = CreateClientSession(); using var cancellationTokenSource = new CancellationTokenSource(); var cancellationToken = cancellationTokenSource.Token; @@ -336,7 +334,7 @@ public void Watch_should_invoke_the_correct_operation( { var operationExecutor = new MockOperationExecutor(); var clientSettings = DriverTestConfiguration.GetClientSettings(); - var subject = new MongoClient(operationExecutor, clientSettings); + var subject = new MongoClient(clientSettings, _ => operationExecutor); var session = usingSession ? CreateClientSession() : null; var pipeline = new EmptyPipelineDefinition<ChangeStreamDocument<BsonDocument>>().Limit(1); var options = new ChangeStreamOptions diff --git a/tests/MongoDB.Driver.Tests/MongoCollectionImplTests.cs b/tests/MongoDB.Driver.Tests/MongoCollectionImplTests.cs index 818c29ade41..d89660af55e 100644 --- a/tests/MongoDB.Driver.Tests/MongoCollectionImplTests.cs +++ b/tests/MongoDB.Driver.Tests/MongoCollectionImplTests.cs @@ -3805,6 +3805,7 @@ public void Watch_should_throw_when_pipeline_is_null( } [Fact] + [Trait("Category", "Integration")] public void Watch_should_support_full_document_with_duplicate_elements() { RequireServer.Check().ClusterTypes(ClusterType.ReplicaSet, ClusterType.Sharded); diff --git a/tests/MongoDB.Driver.Tests/MongoCredentialTests.cs b/tests/MongoDB.Driver.Tests/MongoCredentialTests.cs index 8b1c38ee667..e44f90e0cdc 100644 --- a/tests/MongoDB.Driver.Tests/MongoCredentialTests.cs +++ b/tests/MongoDB.Driver.Tests/MongoCredentialTests.cs @@ -20,6 +20,7 @@ namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class MongoCredentialTests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/MongoDB.Driver.Tests.csproj b/tests/MongoDB.Driver.Tests/MongoDB.Driver.Tests.csproj index e7a41e2e09b..316086d21ec 100644 --- a/tests/MongoDB.Driver.Tests/MongoDB.Driver.Tests.csproj +++ b/tests/MongoDB.Driver.Tests/MongoDB.Driver.Tests.csproj @@ -28,10 +28,6 @@ </NoWarn> </PropertyGroup> - <ItemGroup> - <PackageReference Include="Microsoft.NETFramework.ReferenceAssemblies" PrivateAssets="All" Version="1.0.0" /> - </ItemGroup> - <ItemGroup> <PackageReference Include="Microsoft.NET.Test.Sdk" Version="17.13.0" /> <PackageReference Include="xunit.runner.visualstudio" Version="2.4.0" /> diff --git a/tests/MongoDB.Driver.Tests/MongoDatabasTests.cs b/tests/MongoDB.Driver.Tests/MongoDatabaseTests.cs similarity index 98% rename from tests/MongoDB.Driver.Tests/MongoDatabasTests.cs rename to tests/MongoDB.Driver.Tests/MongoDatabaseTests.cs index 9b35886d6b9..b474f0a3e5a 100644 --- a/tests/MongoDB.Driver.Tests/MongoDatabasTests.cs +++ b/tests/MongoDB.Driver.Tests/MongoDatabaseTests.cs @@ -380,6 +380,7 @@ public void AggregateToCollection_should_throw_when_last_stage_is_not_an_output_ [Theory] [ParameterAttributeData] + [Trait("Category", "Integration")] public void CreateCollection_should_execute_a_CreateCollectionOperation_when_options_is_generic( [Values(false, true)] bool usingSession, [Values(false, true)] bool clustered, @@ -408,9 +409,7 @@ public void CreateCollection_should_execute_a_CreateCollectionOperation_when_opt IndexOptionDefaults = new IndexOptionDefaults { StorageEngine = new BsonDocument("x", 1) }, MaxDocuments = 10, MaxSize = 11, - NoPadding = true, StorageEngine = storageEngine, - UsePowerOf2Sizes = true, ValidationAction = DocumentValidationAction.Warn, ValidationLevel = DocumentValidationLevel.Off, Validator = validatorDefinition @@ -460,9 +459,7 @@ public void CreateCollection_should_execute_a_CreateCollectionOperation_when_opt op.IndexOptionDefaults.ToBsonDocument().Should().Be(options.IndexOptionDefaults.ToBsonDocument()); op.MaxDocuments.Should().Be(options.MaxDocuments); op.MaxSize.Should().Be(options.MaxSize); - op.NoPadding.Should().Be(options.NoPadding); op.StorageEngine.Should().Be(storageEngine); - op.UsePowerOf2Sizes.Should().Be(options.UsePowerOf2Sizes); op.ValidationAction.Should().Be(options.ValidationAction); op.ValidationLevel.Should().Be(options.ValidationLevel); op.Validator.Should().Be(validatorDocument); @@ -487,9 +484,7 @@ public void CreateCollection_should_execute_a_CreateCollectionOperation_when_opt IndexOptionDefaults = new IndexOptionDefaults { StorageEngine = new BsonDocument("x", 1) }, MaxDocuments = 10, MaxSize = 11, - NoPadding = true, StorageEngine = storageEngine, - UsePowerOf2Sizes = true, ValidationAction = DocumentValidationAction.Warn, ValidationLevel = DocumentValidationLevel.Off }; @@ -531,9 +526,7 @@ public void CreateCollection_should_execute_a_CreateCollectionOperation_when_opt op.IndexOptionDefaults.ToBsonDocument().Should().Be(options.IndexOptionDefaults.ToBsonDocument()); op.MaxDocuments.Should().Be(options.MaxDocuments); op.MaxSize.Should().Be(options.MaxSize); - op.NoPadding.Should().Be(options.NoPadding); op.StorageEngine.Should().Be(storageEngine); - op.UsePowerOf2Sizes.Should().Be(options.UsePowerOf2Sizes); op.ValidationAction.Should().Be(options.ValidationAction); op.ValidationLevel.Should().Be(options.ValidationLevel); op.Validator.Should().BeNull(); @@ -585,9 +578,7 @@ public void CreateCollection_should_execute_a_CreateCollectionOperation_when_opt op.IndexOptionDefaults.Should().BeNull(); op.MaxDocuments.Should().NotHaveValue(); op.MaxSize.Should().NotHaveValue(); - op.NoPadding.Should().NotHaveValue(); op.StorageEngine.Should().BeNull(); - op.UsePowerOf2Sizes.Should().NotHaveValue(); op.ValidationAction.Should().BeNull(); op.ValidationLevel.Should().BeNull(); op.Validator.Should().BeNull(); @@ -881,6 +872,7 @@ public void ListCollectionNames_should_execute_a_ListCollectionsOperation( [Theory] [ParameterAttributeData] + [Trait("Category", "Integration")] public void ListCollectionNames_should_return_expected_result( [Values(0, 1, 2, 10)] int numberOfCollections, [Values(null, false, true)] bool? usingAuthorizedCollections, @@ -1095,8 +1087,7 @@ public void RunCommand_should_default_to_ReadPreference_primary( var call = _operationExecutor.GetReadCall<BsonDocument>(); VerifySessionAndCancellationToken(call, session, cancellationToken); - var binding = call.Binding.Should().BeOfType<ReadBindingHandle>().Subject; - binding.ReadPreference.Should().Be(ReadPreference.Primary); + call.ReadPreference.Should().Be(ReadPreference.Primary); var op = call.Operation.Should().BeOfType<ReadCommandOperation<BsonDocument>>().Subject; op.DatabaseNamespace.Should().Be(_subject.DatabaseNamespace); @@ -1142,8 +1133,7 @@ public void RunCommand_should_use_the_provided_ReadPreference( var call = _operationExecutor.GetReadCall<BsonDocument>(); VerifySessionAndCancellationToken(call, session, cancellationToken); - var binding = call.Binding.Should().BeOfType<ReadBindingHandle>().Subject; - binding.ReadPreference.Should().Be(readPreference); + call.ReadPreference.Should().Be(readPreference); var op = call.Operation.Should().BeOfType<ReadCommandOperation<BsonDocument>>().Subject; op.DatabaseNamespace.Should().Be(_subject.DatabaseNamespace); @@ -1188,8 +1178,7 @@ public void RunCommand_should_run_a_non_read_command( var call = _operationExecutor.GetReadCall<BsonDocument>(); VerifySessionAndCancellationToken(call, session, cancellationToken); - var binding = call.Binding.Should().BeOfType<ReadBindingHandle>().Subject; - binding.ReadPreference.Should().Be(ReadPreference.Primary); + call.ReadPreference.Should().Be(ReadPreference.Primary); var op = call.Operation.Should().BeOfType<ReadCommandOperation<BsonDocument>>().Subject; op.DatabaseNamespace.Should().Be(_subject.DatabaseNamespace); @@ -1234,8 +1223,7 @@ public void RunCommand_should_run_a_json_command( var call = _operationExecutor.GetReadCall<BsonDocument>(); VerifySessionAndCancellationToken(call, session, cancellationToken); - var binding = call.Binding.Should().BeOfType<ReadBindingHandle>().Subject; - binding.ReadPreference.Should().Be(ReadPreference.Primary); + call.ReadPreference.Should().Be(ReadPreference.Primary); var op = call.Operation.Should().BeOfType<ReadCommandOperation<BsonDocument>>().Subject; op.DatabaseNamespace.Should().Be(_subject.DatabaseNamespace); @@ -1280,8 +1268,7 @@ public void RunCommand_should_run_a_serialized_command( var call = _operationExecutor.GetReadCall<BsonDocument>(); VerifySessionAndCancellationToken(call, session, cancellationToken); - var binding = call.Binding.Should().BeOfType<ReadBindingHandle>().Subject; - binding.ReadPreference.Should().Be(ReadPreference.Primary); + call.ReadPreference.Should().Be(ReadPreference.Primary); var op = call.Operation.Should().BeOfType<ReadCommandOperation<BsonDocument>>().Subject; op.DatabaseNamespace.Should().Be(_subject.DatabaseNamespace); diff --git a/tests/MongoDB.Driver.Tests/MongoIndexManagerTests.cs b/tests/MongoDB.Driver.Tests/MongoIndexManagerTests.cs index ac5fbef4289..1c0228246a1 100644 --- a/tests/MongoDB.Driver.Tests/MongoIndexManagerTests.cs +++ b/tests/MongoDB.Driver.Tests/MongoIndexManagerTests.cs @@ -22,6 +22,7 @@ namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class MongoIndexManagerTests { [Theory] diff --git a/tests/MongoDB.Driver.Tests/MongoUrlBuilderTests.cs b/tests/MongoDB.Driver.Tests/MongoUrlBuilderTests.cs index 0ed8719d185..f45282623dc 100644 --- a/tests/MongoDB.Driver.Tests/MongoUrlBuilderTests.cs +++ b/tests/MongoDB.Driver.Tests/MongoUrlBuilderTests.cs @@ -94,6 +94,7 @@ public void TestAll() ServerMonitoringMode = ServerMonitoringMode.Poll, ServerSelectionTimeout = TimeSpan.FromSeconds(10), SocketTimeout = TimeSpan.FromSeconds(7), + Timeout = TimeSpan.FromSeconds(13), Username = "username", #pragma warning disable 618 UseSsl = true, @@ -140,6 +141,7 @@ public void TestAll() "serverMonitoringMode=Poll", "serverSelectionTimeout=10s", "socketTimeout=7s", + "timeout=13s", "waitQueueSize=123", "waitQueueTimeout=8s", "retryReads=false", @@ -185,6 +187,7 @@ public void TestAll() Assert.Equal(ServerMonitoringMode.Poll, builder.ServerMonitoringMode); Assert.Equal(TimeSpan.FromSeconds(10), builder.ServerSelectionTimeout); Assert.Equal(TimeSpan.FromSeconds(7), builder.SocketTimeout); + Assert.Equal(TimeSpan.FromSeconds(13), builder.Timeout); Assert.Equal("username", builder.Username); #pragma warning disable 618 Assert.Equal(true, builder.UseSsl); @@ -443,6 +446,7 @@ public void TestDefaults() Assert.Equal(MongoDefaults.LocalThreshold, builder.LocalThreshold); Assert.Equal(MongoDefaults.ServerSelectionTimeout, builder.ServerSelectionTimeout); Assert.Equal(MongoDefaults.SocketTimeout, builder.SocketTimeout); + Assert.Equal(null, builder.Timeout); Assert.Equal(MongoInternalDefaults.MongoClientSettings.SrvServiceName, builder.SrvServiceName); Assert.Equal(null, builder.Username); #pragma warning disable 618 @@ -1136,6 +1140,28 @@ public void TestSocketTimeout_Range() builder.SocketTimeout = TimeSpan.FromSeconds(1); } + [Theory] + [InlineData(null, "mongodb://localhost", new[] { "" })] + [InlineData(-1, "mongodb://localhost/?timeout{0}", new[] { "=0", "MS=0" })] + [InlineData(500, "mongodb://localhost/?timeout{0}", new[] { "=500ms", "=0.5", "=0.5s", "=00:00:00.5", "MS=500" })] + [InlineData(30000, "mongodb://localhost/?timeout{0}", new[] { "=30s", "=30000ms", "=30", "=0.5m", "=00:00:30", "MS=30000" })] + [InlineData(1800000, "mongodb://localhost/?timeout{0}", new[] { "=30m", "=1800000ms", "=1800", "=1800s", "=0.5h", "=00:30:00", "MS=1800000" })] + [InlineData(3600000, "mongodb://localhost/?timeout{0}", new[] { "=1h", "=3600000ms", "=3600", "=3600s", "=60m", "=01:00:00", "MS=3600000" })] + [InlineData(3723000, "mongodb://localhost/?timeout{0}", new[] { "=01:02:03", "=3723000ms", "=3723", "=3723s", "MS=3723000" })] + public void TestTimeout(int? ms, string formatString, string[] values) + { + var timeout = (ms == null) ? (TimeSpan?)null : TimeSpan.FromMilliseconds(ms.Value); + var built = new MongoUrlBuilder { Server = _localhost }; + if (timeout != null) { built.Timeout = timeout.Value; } + + var canonicalConnectionString = string.Format(formatString, values[0]); + foreach (var builder in EnumerateBuiltAndParsedBuilders(built, formatString, values)) + { + Assert.Equal(timeout, builder.Timeout); + Assert.Equal(canonicalConnectionString, builder.ToString()); + } + } + [Fact] public void TestSrvServiceName() { diff --git a/tests/MongoDB.Driver.Tests/MongoUrlTests.cs b/tests/MongoDB.Driver.Tests/MongoUrlTests.cs index d06704219fe..e63873964ce 100644 --- a/tests/MongoDB.Driver.Tests/MongoUrlTests.cs +++ b/tests/MongoDB.Driver.Tests/MongoUrlTests.cs @@ -186,6 +186,7 @@ public void TestAll() ServerMonitoringMode = ServerMonitoringMode.Poll, ServerSelectionTimeout = TimeSpan.FromSeconds(10), SocketTimeout = TimeSpan.FromSeconds(7), + Timeout = TimeSpan.FromSeconds(13), Username = "username", UseTls = true, W = 2, @@ -225,6 +226,7 @@ public void TestAll() "serverMonitoringMode=Poll", "serverSelectionTimeout=10s", "socketTimeout=7s", + "timeout=13s", "waitQueueSize=123", "waitQueueTimeout=8s", "retryReads=false", @@ -268,6 +270,7 @@ public void TestAll() Assert.Equal(ServerMonitoringMode.Poll, url.ServerMonitoringMode); Assert.Equal(TimeSpan.FromSeconds(10), url.ServerSelectionTimeout); Assert.Equal(TimeSpan.FromSeconds(7), url.SocketTimeout); + Assert.Equal(TimeSpan.FromSeconds(13), url.Timeout); Assert.Equal(true, url.TlsDisableCertificateRevocationCheck); Assert.Equal("username", url.Username); #pragma warning disable 618 diff --git a/tests/MongoDB.Driver.Tests/OfTypeMongoCollectionTests.cs b/tests/MongoDB.Driver.Tests/OfTypeMongoCollectionTests.cs index 04de648d449..a1adc1f8713 100644 --- a/tests/MongoDB.Driver.Tests/OfTypeMongoCollectionTests.cs +++ b/tests/MongoDB.Driver.Tests/OfTypeMongoCollectionTests.cs @@ -954,6 +954,7 @@ public class UpdateTestCases : IValueGenerator } } + [Trait("Category", "Integration")] public class OfTypeCollectionIntegrationTests : IDisposable { private readonly IMongoCollection<BsonDocument> _docsCollection; diff --git a/tests/MongoDB.Driver.Tests/OperationContextExtensionsTests.cs b/tests/MongoDB.Driver.Tests/OperationContextExtensionsTests.cs new file mode 100644 index 00000000000..bf7d8345b77 --- /dev/null +++ b/tests/MongoDB.Driver.Tests/OperationContextExtensionsTests.cs @@ -0,0 +1,77 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using System.Threading; +using FluentAssertions; +using MongoDB.Driver.Core.Misc; +using Xunit; + +namespace MongoDB.Driver.Tests; + +public class OperationContextExtensionsTests +{ + [Fact] + public void IsRootContextTimeoutConfigured_should_throw_on_null() + { + OperationContext context = null; + var exception = Record.Exception(() => context.IsRootContextTimeoutConfigured()); + + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("operationContext"); + } + + [Theory] + [InlineData(false, null)] + [InlineData(true, 0)] + [InlineData(true, Timeout.Infinite)] + [InlineData(true, 5)] + public void IsRootContextTimeoutConfigured_should_return_expected_result(bool expectedResult, int? timeoutMs) + { + TimeSpan? timeout = timeoutMs.HasValue ? TimeSpan.FromMilliseconds(timeoutMs.Value) : null; + var subject = new OperationContext(timeout, CancellationToken.None); + + var result = subject.IsRootContextTimeoutConfigured(); + + result.Should().Be(expectedResult); + } + + [Fact] + public void RemainingTimeoutOrDefault_should_throw_on_null() + { + OperationContext context = null; + var exception = Record.Exception(() => context.RemainingTimeoutOrDefault(TimeSpan.Zero)); + + exception.Should().BeOfType<ArgumentNullException>().Subject + .ParamName.Should().Be("operationContext"); + } + + [Theory] + [InlineData(10, null, 10)] + [InlineData(0, 0, 10)] + [InlineData(Timeout.Infinite, Timeout.Infinite, 10)] + [InlineData(5, 5, 10)] + public void RemainingTimeoutOrDefault_should_return_expected_result(int expectedResultMs, int? timeoutMs, int defaultValueMs) + { + var clock = new FrozenClock(DateTime.UtcNow); + TimeSpan? timeout = timeoutMs.HasValue ? TimeSpan.FromMilliseconds(timeoutMs.Value) : null; + var defaultValue = TimeSpan.FromMilliseconds(defaultValueMs); + var subject = new OperationContext(clock, timeout, CancellationToken.None); + + var result = subject.RemainingTimeoutOrDefault(defaultValue); + + result.Should().Be(TimeSpan.FromMilliseconds(expectedResultMs)); + } +} diff --git a/tests/MongoDB.Driver.Tests/OperationContextTests.cs b/tests/MongoDB.Driver.Tests/OperationContextTests.cs new file mode 100644 index 00000000000..2116f804033 --- /dev/null +++ b/tests/MongoDB.Driver.Tests/OperationContextTests.cs @@ -0,0 +1,350 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; +using FluentAssertions; +using MongoDB.Driver.Core.Misc; +using MongoDB.TestHelpers.XunitExtensions; +using Xunit; + +namespace MongoDB.Driver.Tests +{ + public class OperationContextTests + { + [Fact] + public void Constructor_should_initialize_properties() + { + var timeout = TimeSpan.FromSeconds(42); + var clock = new FrozenClock(DateTime.UtcNow); + using var cancellationTokenSource = new CancellationTokenSource(); + var cancellationToken = cancellationTokenSource.Token; + + using var operationContext = new OperationContext(clock, timeout, cancellationToken); + + operationContext.Timeout.Should().Be(timeout); + operationContext.RemainingTimeout.Should().Be(timeout); + operationContext.CancellationToken.Should().Be(cancellationToken); + operationContext.RootContext.Should().Be(operationContext); + } + + [Fact] + public void Constructor_should_throw_on_negative_timeout() + { + var exception = Record.Exception(() => new OperationContext(TimeSpan.FromSeconds(-5), CancellationToken.None)); + + exception.Should().BeOfType<ArgumentOutOfRangeException>(); + } + + [Theory] + [InlineData(Timeout.Infinite, 0)] + [InlineData(Timeout.Infinite, 100)] + [InlineData(100, 0)] + [InlineData(100, 100)] + [InlineData(100, 1000)] + public void Elapsed_should_return_expected_result(int timeoutMs, int elapsedMs) + { + var expected = TimeSpan.FromMilliseconds(elapsedMs); + using var subject = CreateSubject(TimeSpan.FromMilliseconds(timeoutMs), expected, CancellationToken.None); + + subject.Elapsed.Should().Be(expected); + } + + [Theory] + [InlineData(Timeout.Infinite, 0)] + [InlineData(Timeout.Infinite, 25)] + [InlineData(100, 0)] + [InlineData(100, 25)] + public void Fork_should_copy_context(int timeoutMs, int elapsedMs) + { + using var cancellationTokenSource = new CancellationTokenSource(); + using var originalContext = CreateSubject(TimeSpan.FromMilliseconds(timeoutMs), TimeSpan.FromMilliseconds(elapsedMs), cancellationTokenSource.Token); + + using var forkedContext = originalContext.Fork(); + + forkedContext.Should().NotBe(originalContext); + forkedContext.CancellationToken.Should().Be(originalContext.CancellationToken); + forkedContext.Timeout.Should().Be(originalContext.Timeout); + forkedContext.Elapsed.Should().Be(originalContext.Elapsed); + forkedContext.RootContext.Should().Be(originalContext.RootContext); + } + + [Fact] + public void RemainingTimeout_should_return_expected_result() + { + var timeout = TimeSpan.FromMilliseconds(500); + var elapsed = TimeSpan.FromMilliseconds(10); + using var subject = CreateSubject(timeout, elapsed, CancellationToken.None); + + subject.RemainingTimeout.Should().Be(timeout - elapsed); + } + + [Fact] + public void RemainingTimeout_should_return_infinite_for_infinite_timeout() + { + using var subject = CreateSubject(timeout: Timeout.InfiniteTimeSpan, elapsed: TimeSpan.FromMilliseconds(10)); + + subject.RemainingTimeout.Should().Be(Timeout.InfiniteTimeSpan); + } + + [Fact] + public void RemainingTimeout_should_return_zero_for_timeout_context() + { + using var subject = CreateSubject(timeout: TimeSpan.FromMilliseconds(5), elapsed: TimeSpan.FromMilliseconds(10)); + + subject.RemainingTimeout.Should().Be(TimeSpan.Zero); + } + + [Theory] + [MemberData(nameof(IsTimedOut_test_cases))] + public void IsTimedOut_should_return_expected_result(bool expected, TimeSpan timeout, TimeSpan elapsed) + { + using var subject = CreateSubject(timeout, elapsed); + + var result = subject.IsTimedOut(); + + result.Should().Be(expected); + } + + public static IEnumerable<object[]> IsTimedOut_test_cases = + [ + [false, Timeout.InfiniteTimeSpan, TimeSpan.FromMilliseconds(5)], + [false, TimeSpan.FromMilliseconds(50), TimeSpan.FromMilliseconds(5)], + [true, TimeSpan.FromMilliseconds(5), TimeSpan.FromMilliseconds(10)], + ]; + + [Fact] + public void ThrowIfTimedOutOrCanceled_should_not_throw_if_no_timeout_and_no_cancellation() + { + using var subject = CreateSubject(timeout: TimeSpan.FromMilliseconds(20), elapsed: TimeSpan.FromMilliseconds(10)); + + var exception = Record.Exception(() => subject.ThrowIfTimedOutOrCanceled()); + + exception.Should().BeNull(); + } + + [Fact] + public void ThrowIfTimedOutOrCanceled_should_throw_on_timeout() + { + using var subject = CreateSubject(timeout: TimeSpan.FromMilliseconds(10), elapsed: TimeSpan.FromMilliseconds(20)); + + var exception = Record.Exception(() => subject.ThrowIfTimedOutOrCanceled()); + + exception.Should().BeOfType<TimeoutException>(); + } + + [Fact] + public void ThrowIfTimedOutOrCanceled_should_throw_on_cancellation() + { + using var cancellationSource = new CancellationTokenSource(); + using var subject = CreateSubject(timeout: Timeout.InfiniteTimeSpan, elapsed: TimeSpan.Zero, cancellationSource.Token); + cancellationSource.Cancel(); + + var exception = Record.Exception(() => subject.ThrowIfTimedOutOrCanceled()); + + exception.Should().BeOfType<OperationCanceledException>(); + } + + [Fact] + public void ThrowIfTimedOutOrCanceled_should_throw_CancelledException_when_timedout_and_cancelled() + { + using var cancellationSource = new CancellationTokenSource(); + using var subject = CreateSubject(timeout: TimeSpan.FromMilliseconds(10), elapsed: TimeSpan.FromMilliseconds(20), cancellationSource.Token); + cancellationSource.Cancel(); + + var exception = Record.Exception(() => subject.ThrowIfTimedOutOrCanceled()); + + exception.Should().BeOfType<OperationCanceledException>(); + } + + [Theory] + [ParameterAttributeData] + public async Task Wait_should_throw_if_context_is_timedout([Values(true, false)] bool async) + { + var taskCompletionSource = new TaskCompletionSource<bool>(); + using var subject = CreateSubject(timeout: TimeSpan.FromMilliseconds(10), elapsed: TimeSpan.FromMilliseconds(20)); + + var exception = async ? + await Record.ExceptionAsync(() => subject.WaitTaskAsync(taskCompletionSource.Task)) : + Record.Exception(() => subject.WaitTask(taskCompletionSource.Task)); + + exception.Should().BeOfType<TimeoutException>(); + } + + [Theory] + [ParameterAttributeData] + public async Task Wait_should_throw_if_context_is_cancelled([Values(true, false)] bool async) + { + var taskCompletionSource = new TaskCompletionSource<bool>(); + var cancellationTokenSource = new CancellationTokenSource(); + cancellationTokenSource.Cancel(); + using var subject = CreateSubject(timeout: Timeout.InfiniteTimeSpan, elapsed: TimeSpan.Zero, cancellationTokenSource.Token); + + var exception = async ? + await Record.ExceptionAsync(() => subject.WaitTaskAsync(taskCompletionSource.Task)) : + Record.Exception(() => subject.WaitTask(taskCompletionSource.Task)); + + exception.Should().BeOfType<OperationCanceledException>(); + } + + [Theory] + [ParameterAttributeData] + public async Task Wait_should_rethrow_on_failed_task([Values(true, false)] bool async) + { + var ex = new InvalidOperationException(); + var task = Task.FromException(ex); + using var subject = CreateSubject(timeout: Timeout.InfiniteTimeSpan, elapsed: TimeSpan.FromMilliseconds(20)); + + var exception = async ? + await Record.ExceptionAsync(() => subject.WaitTaskAsync(task)) : + Record.Exception(() => subject.WaitTask(task)); + + exception.Should().Be(ex); + } + + [Theory] + [ParameterAttributeData] + public async Task Wait_should_rethrow_on_failed_promise_task([Values(true, false)] bool async) + { + var ex = new InvalidOperationException("Ups!"); + var taskCompletionSource = new TaskCompletionSource<bool>(); + using var subject = CreateSubject(timeout: Timeout.InfiniteTimeSpan, elapsed: TimeSpan.Zero); + + var task = Task.Run(async () => + { + if (async) + { + await subject.WaitTaskAsync(taskCompletionSource.Task); + } + else + { + subject.WaitTask(taskCompletionSource.Task); + } + }); + Thread.Sleep(10); + taskCompletionSource.SetException(ex); + + var exception = await Record.ExceptionAsync(() => task); + exception.Should().Be(ex); + } + + [Theory] + [ParameterAttributeData] + public async Task Wait_should_throw_on_timeout([Values(true, false)] bool async) + { + var taskCompletionSource = new TaskCompletionSource<bool>(); + using var subject = CreateSubject(timeout: TimeSpan.FromMilliseconds(10), elapsed: TimeSpan.FromMilliseconds(20)); + + var exception = async ? + await Record.ExceptionAsync(() => subject.WaitTaskAsync(taskCompletionSource.Task)) : + Record.Exception(() => subject.WaitTask(taskCompletionSource.Task)); + + exception.Should().BeOfType<TimeoutException>(); + } + + [Theory] + [ParameterAttributeData] + public async Task Wait_should_not_throw_on_resolved_task_with_timedout_context([Values(true, false)] bool async) + { + var task = Task.FromResult(42); + using var subject = CreateSubject(timeout: TimeSpan.FromMilliseconds(10), elapsed: TimeSpan.FromMilliseconds(20)); + + var exception = async ? + await Record.ExceptionAsync(() => subject.WaitTaskAsync(task)) : + Record.Exception(() => subject.WaitTask(task)); + + exception.Should().BeNull(); + } + + + [Theory] + [MemberData(nameof(WithTimeout_test_cases))] + public void WithTimeout_should_calculate_proper_timeout(TimeSpan expected, TimeSpan originalTimeout, TimeSpan newTimeout) + { + using var subject = CreateSubject(timeout: originalTimeout, elapsed: TimeSpan.Zero); + + var resultContext = subject.WithTimeout(newTimeout); + + resultContext.Timeout.Should().Be(expected); + } + + public static IEnumerable<object[]> WithTimeout_test_cases = + [ + [Timeout.InfiniteTimeSpan, Timeout.InfiniteTimeSpan, Timeout.InfiniteTimeSpan], + [TimeSpan.FromMilliseconds(5), Timeout.InfiniteTimeSpan, TimeSpan.FromMilliseconds(5)], + [TimeSpan.FromMilliseconds(5), TimeSpan.FromMilliseconds(5), Timeout.InfiniteTimeSpan], + [TimeSpan.FromMilliseconds(5), TimeSpan.FromMilliseconds(5), TimeSpan.FromMilliseconds(10)], + [TimeSpan.FromMilliseconds(5), TimeSpan.FromMilliseconds(10), TimeSpan.FromMilliseconds(5)], + ]; + + [Fact] + public void WithTimeout_should_set_RootContext() + { + using var rootContext = CreateSubject(timeout: Timeout.InfiniteTimeSpan, elapsed: TimeSpan.Zero); + using var resultContext = rootContext.WithTimeout(TimeSpan.FromSeconds(10)); + + resultContext.RootContext.Should().Be(rootContext); + } + + [Fact] + public void WithTimeout_should_preserve_RootContext() + { + using var rootContext = CreateSubject(timeout: Timeout.InfiniteTimeSpan, elapsed: TimeSpan.Zero); + + using var intermediateContext = rootContext.WithTimeout(TimeSpan.FromSeconds(200)); + using var resultContext = intermediateContext.WithTimeout(TimeSpan.FromSeconds(10)); + + resultContext.RootContext.Should().Be(rootContext); + } + + [Fact] + public void WithTimeout_should_create_timed_out_context_on_timed_out_context() + { + using var rootContext = CreateSubject(timeout: TimeSpan.FromMilliseconds(5), elapsed: TimeSpan.FromMilliseconds(10)); + rootContext.IsTimedOut().Should().BeTrue(); + + using var resultContext = rootContext.WithTimeout(TimeSpan.FromSeconds(7)); + + resultContext.IsTimedOut().Should().BeTrue(); + } + + [Fact] + public void WithTimeout_should_throw_on_negative_timeout() + { + using var rootContext = CreateSubject(timeout: Timeout.InfiniteTimeSpan, elapsed: TimeSpan.Zero); + + var exception = Record.Exception(() => rootContext.WithTimeout(TimeSpan.FromSeconds(-5))); + + exception.Should().BeOfType<ArgumentOutOfRangeException>() + .Subject.ParamName.Should().Be("timeout"); + } + + private static OperationContext CreateSubject(TimeSpan? timeout, TimeSpan elapsed = default, CancellationToken cancellationToken = default) + { + var clock = new FrozenClock(DateTime.UtcNow); + var result = new OperationContext(clock, timeout, cancellationToken); + + if (elapsed != TimeSpan.Zero) + { + clock.AdvanceCurrentTime(elapsed); + } + + return result; + } + } +} + diff --git a/tests/MongoDB.Driver.Tests/OperationExecutorTests.cs b/tests/MongoDB.Driver.Tests/OperationExecutorTests.cs new file mode 100644 index 00000000000..3512a1652b4 --- /dev/null +++ b/tests/MongoDB.Driver.Tests/OperationExecutorTests.cs @@ -0,0 +1,131 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using System.Threading; +using System.Threading.Tasks; +using FluentAssertions; +using MongoDB.Driver.Core.Bindings; +using MongoDB.Driver.Core.Clusters; +using MongoDB.Driver.Core.Operations; +using MongoDB.TestHelpers.XunitExtensions; +using Moq; +using Xunit; + +namespace MongoDB.Driver.Tests +{ + public class OperationExecutorTests + { + [Fact] + public void StartImplicitSession_should_call_cluster_StartSession() + { + var subject = CreateSubject(out var clusterMock); + + subject.StartImplicitSession(); + + clusterMock.Verify(c => c.StartSession(It.Is<CoreSessionOptions>(v => v.IsImplicit && v.IsCausallyConsistent == false && v.IsSnapshot == false))); + } + + [Theory] + [ParameterAttributeData] + public async Task ExecuteReadOperation_throws_on_null_operation([Values(true, false)] bool async) + { + var subject = CreateSubject(out _); + var operationContext = new OperationContext(Timeout.InfiniteTimeSpan, CancellationToken.None); + var session = Mock.Of<IClientSessionHandle>(); + + var exception = async ? + await Record.ExceptionAsync(() => subject.ExecuteReadOperationAsync<object>(operationContext, session, null, ReadPreference.Primary, true)) : + Record.Exception(() => subject.ExecuteReadOperation<object>(operationContext, session, null, ReadPreference.Primary, true)); + + exception.Should().BeOfType<ArgumentNullException>() + .Subject.ParamName.Should().Be("operation"); + } + + [Theory] + [ParameterAttributeData] + public async Task ExecuteReadOperation_throws_on_null_readPreference([Values(true, false)] bool async) + { + var subject = CreateSubject(out _); + var operationContext = new OperationContext(Timeout.InfiniteTimeSpan, CancellationToken.None); + var operation = Mock.Of<IReadOperation<object>>(); + var session = Mock.Of<IClientSessionHandle>(); + + var exception = async ? + await Record.ExceptionAsync(() => subject.ExecuteReadOperationAsync(operationContext, session, operation, null, true)) : + Record.Exception(() => subject.ExecuteReadOperation(operationContext, session, operation, null, true)); + + exception.Should().BeOfType<ArgumentNullException>() + .Subject.ParamName.Should().Be("readPreference"); + } + + [Theory] + [ParameterAttributeData] + public async Task ExecuteReadOperation_throws_on_null_session([Values(true, false)] bool async) + { + var subject = CreateSubject(out _); + var operationContext = new OperationContext(Timeout.InfiniteTimeSpan, CancellationToken.None); + var operation = Mock.Of<IReadOperation<object>>(); + + var exception = async ? + await Record.ExceptionAsync(() => subject.ExecuteReadOperationAsync(operationContext, null, operation, ReadPreference.Primary, true)) : + Record.Exception(() => subject.ExecuteReadOperation(operationContext, null, operation, ReadPreference.Primary, true)); + + exception.Should().BeOfType<ArgumentNullException>() + .Subject.ParamName.Should().Be("session"); + } + + [Theory] + [ParameterAttributeData] + public async Task ExecuteWriteOperation_throws_on_null_operation([Values(true, false)] bool async) + { + var subject = CreateSubject(out _); + var operationContext = new OperationContext(Timeout.InfiniteTimeSpan, CancellationToken.None); + var session = Mock.Of<IClientSessionHandle>(); + + var exception = async ? + await Record.ExceptionAsync(() => subject.ExecuteWriteOperationAsync<object>(operationContext, session, null, true)) : + Record.Exception(() => subject.ExecuteWriteOperation<object>(operationContext, session, null, true)); + + exception.Should().BeOfType<ArgumentNullException>() + .Subject.ParamName.Should().Be("operation"); + } + + [Theory] + [ParameterAttributeData] + public async Task ExecuteWriteOperation_throws_on_null_session([Values(true, false)] bool async) + { + var subject = CreateSubject(out _); + var operationContext = new OperationContext(Timeout.InfiniteTimeSpan, CancellationToken.None); + var operation = Mock.Of<IWriteOperation<object>>(); + + var exception = async ? + await Record.ExceptionAsync(() => subject.ExecuteWriteOperationAsync(operationContext, null, operation, true)) : + Record.Exception(() => subject.ExecuteWriteOperation(operationContext, null, operation, true)); + + exception.Should().BeOfType<ArgumentNullException>() + .Subject.ParamName.Should().Be("session"); + } + + private OperationExecutor CreateSubject(out Mock<IClusterInternal> clusterMock) + { + clusterMock = new Mock<IClusterInternal>(); + var clientMock = new Mock<IMongoClient>(); + clientMock.SetupGet(c => c.Cluster).Returns(clusterMock.Object); + return new OperationExecutor(clientMock.Object); + } + } +} + diff --git a/tests/MongoDB.Driver.Tests/PinnedShardRouterTests.cs b/tests/MongoDB.Driver.Tests/PinnedShardRouterTests.cs index 7f13da6299a..0bc0f122ebc 100644 --- a/tests/MongoDB.Driver.Tests/PinnedShardRouterTests.cs +++ b/tests/MongoDB.Driver.Tests/PinnedShardRouterTests.cs @@ -26,12 +26,12 @@ using MongoDB.Driver.Core.Servers; using MongoDB.Driver.Core.TestHelpers.Logging; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; -using MongoDB.Driver.TestHelpers; using Xunit; using Xunit.Abstractions; namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class PinnedShardRouterTests : LoggableTestClass { private static readonly HashSet<string> __commandsToNotCapture = new HashSet<string> diff --git a/tests/MongoDB.Driver.Tests/PipelineDefinitionBuilderTests.cs b/tests/MongoDB.Driver.Tests/PipelineDefinitionBuilderTests.cs index 3c556553c7d..0c64431d746 100644 --- a/tests/MongoDB.Driver.Tests/PipelineDefinitionBuilderTests.cs +++ b/tests/MongoDB.Driver.Tests/PipelineDefinitionBuilderTests.cs @@ -181,6 +181,7 @@ public void Lookup_should_throw_when_pipeline_is_null() } [Fact] + [Trait("Category", "Integration")] public void Merge_should_add_expected_stage() { var pipeline = new EmptyPipelineDefinition<BsonDocument>(); @@ -233,15 +234,15 @@ public void RankFusion_with_named_pipelines_should_add_expected_stage() stages.Count.Should().Be(1); stages[0].Should().Be(""" { - $rankFusion: { - "input" : { - "pipelines" : { + $rankFusion: { + "input" : { + "pipelines" : { "p1" : [{ "$match" : { "x" : 1 } }, { "$sort" : { "y" : 1 } }], - "p2" : [{ "$match" : { "x" : 2 } }, { "$sort" : { "y" : -1 } }] - } + "p2" : [{ "$match" : { "x" : 2 } }, { "$sort" : { "y" : -1 } }] + } }, "combination" : { "weights" : { "p1" : 0.3, "p2" : 0.7 } } - "scoreDetails" : true + "scoreDetails" : true } } """); @@ -260,12 +261,12 @@ public void RankFusion_without_named_pipelines_should_add_expected_stage() stages.Count.Should().Be(1); stages[0].Should().Be(""" { - $rankFusion: { - "input" : { - "pipelines" : { + $rankFusion: { + "input" : { + "pipelines" : { "pipeline1" : [{ "$match" : { "x" : 1 } }, { "$sort" : { "y" : 1 } }], - "pipeline2" : [{ "$match" : { "x" : 2 } }, { "$sort" : { "y" : -1 } }] - } + "pipeline2" : [{ "$match" : { "x" : 2 } }, { "$sort" : { "y" : -1 } }] + } } } } @@ -286,13 +287,13 @@ public void RankFusion_using_pipeline_weight_tuples_should_add_expected_stage() stages.Count.Should().Be(1); stages[0].Should().Be(""" { - $rankFusion: { - "input" : { - "pipelines" : { + $rankFusion: { + "input" : { + "pipelines" : { "pipeline1" : [{ "$match" : { "x" : 1 } }, { "$sort" : { "y" : 1 } }], - "pipeline2" : [{ "$match" : { "x" : 2 } }, { "$sort" : { "y" : -1 } }], - "pipeline3" : [{ "$match" : { "x" : 3 } }, { "$sort" : { "y" : 1 } }] - } + "pipeline2" : [{ "$match" : { "x" : 2 } }, { "$sort" : { "y" : -1 } }], + "pipeline3" : [{ "$match" : { "x" : 3 } }, { "$sort" : { "y" : 1 } }] + } }, "combination" : { "weights" : { "pipeline1" : 0.3, "pipeline2" : 0.7 } } } diff --git a/tests/MongoDB.Driver.Tests/PipelineStageDefinitionBuilderTests.cs b/tests/MongoDB.Driver.Tests/PipelineStageDefinitionBuilderTests.cs index d78985a8932..1462d3db2be 100644 --- a/tests/MongoDB.Driver.Tests/PipelineStageDefinitionBuilderTests.cs +++ b/tests/MongoDB.Driver.Tests/PipelineStageDefinitionBuilderTests.cs @@ -27,6 +27,7 @@ namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class PipelineStageDefinitionBuilderTests { // public methods @@ -150,15 +151,15 @@ public void GeoNear_with_array_should_return_the_expected_result() var stage = RenderStage(result); stage.Document.Should().Be(""" - { - "$geoNear" : { - "near" : [34.0, 67.0], - "distanceField" : "calculatedDistance", - "maxDistance" : 3.0, - "query" : { "testfield" : "testvalue" }, - "includeLocs" : "usedLocation", - "spherical" : true - } + { + "$geoNear" : { + "near" : [34.0, 67.0], + "distanceField" : "calculatedDistance", + "maxDistance" : 3.0, + "query" : { "testfield" : "testvalue" }, + "includeLocs" : "usedLocation", + "spherical" : true + } } """); } @@ -179,15 +180,15 @@ public void GeoNear_with_geojson_point_should_return_the_expected_result() var stage = RenderStage(result); stage.Document.Should().Be(""" - { - "$geoNear" : { - "near" : { "type" : "Point", "coordinates" : [34.0, 67.0] }, - "distanceField" : "calculatedDistance", - "maxDistance" : 3.0, - "query" : { "testfield" : "testvalue" }, - "includeLocs" : "usedLocation", - "spherical" : true - } + { + "$geoNear" : { + "near" : { "type" : "Point", "coordinates" : [34.0, 67.0] }, + "distanceField" : "calculatedDistance", + "maxDistance" : 3.0, + "query" : { "testfield" : "testvalue" }, + "includeLocs" : "usedLocation", + "spherical" : true + } } """); } diff --git a/tests/MongoDB.Driver.Tests/ReadPreferenceOnStandaloneTests.cs b/tests/MongoDB.Driver.Tests/ReadPreferenceOnStandaloneTests.cs index 63231820663..fb507d13c37 100644 --- a/tests/MongoDB.Driver.Tests/ReadPreferenceOnStandaloneTests.cs +++ b/tests/MongoDB.Driver.Tests/ReadPreferenceOnStandaloneTests.cs @@ -29,6 +29,7 @@ namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class ReadPreferenceOnStandaloneTests : LoggableTestClass { public ReadPreferenceOnStandaloneTests(ITestOutputHelper output) : base(output) diff --git a/tests/MongoDB.Driver.Tests/RetryableWritesTests.cs b/tests/MongoDB.Driver.Tests/RetryableWritesTests.cs index 6f2fc4bad44..c0f7ef4b1d8 100644 --- a/tests/MongoDB.Driver.Tests/RetryableWritesTests.cs +++ b/tests/MongoDB.Driver.Tests/RetryableWritesTests.cs @@ -30,6 +30,7 @@ namespace MongoDB.Driver.Tests { + [Trait("Category", "Integration")] public class RetryableWritesTests : LoggableTestClass { // public constructors @@ -95,26 +96,6 @@ public void Retryable_write_errorlabel_should_not_be_added_with_retryWrites_fals } } - [Fact] - public void Retryable_write_operation_should_throw_custom_exception_on_servers_using_mmapv1() - { - RequireSupportForRetryableWrites(); - RequireServer.Check().ClusterType(ClusterType.ReplicaSet).StorageEngine("mmapv1"); - - using (var client = GetClient()) - using (var session = client.StartSession()) - { - var database = client.GetDatabase(DriverTestConfiguration.DatabaseNamespace.DatabaseName); - var collection = database.GetCollection<BsonDocument>(DriverTestConfiguration.CollectionNamespace.CollectionName); - var document = new BsonDocument("x", 1); - var exception = Record.Exception(() => collection.InsertOne(document)); - - exception.Message.Should().Contain( - "This MongoDB deployment does not support retryable writes. " + - "Please add retryWrites=false to your connection string."); - } - } - [Fact] public void TxnNumber_should_be_included_with_FindOneAndDelete() { diff --git a/tests/MongoDB.Driver.Tests/Samples/AggregationSample.cs b/tests/MongoDB.Driver.Tests/Samples/AggregationSample.cs index ed26da058fe..54dc53dfc90 100644 --- a/tests/MongoDB.Driver.Tests/Samples/AggregationSample.cs +++ b/tests/MongoDB.Driver.Tests/Samples/AggregationSample.cs @@ -23,6 +23,7 @@ namespace MongoDB.Driver.Tests.Samples { + [Trait("Category", "Integration")] public class AggregationSample { private static IMongoCollection<ZipEntry> __collection; diff --git a/tests/MongoDB.Driver.Tests/Search/AtlasSearchIndexManagmentTests.cs b/tests/MongoDB.Driver.Tests/Search/AtlasSearchIndexManagmentTests.cs index 982cb8b5c49..798648fceec 100644 --- a/tests/MongoDB.Driver.Tests/Search/AtlasSearchIndexManagmentTests.cs +++ b/tests/MongoDB.Driver.Tests/Search/AtlasSearchIndexManagmentTests.cs @@ -101,7 +101,7 @@ public async Task Case3_driver_can_successfully_drop_search_indexes( { _collection.SearchIndexes.DropOne(indexName); } - + while (true) { List<BsonDocument> indexes; @@ -114,7 +114,7 @@ public async Task Case3_driver_can_successfully_drop_search_indexes( { indexes = _collection.SearchIndexes.List().ToList(); } - + if (indexes.Count == 0) { return; @@ -141,7 +141,7 @@ public async Task Case4_driver_can_update_a_search_index( { _collection.SearchIndexes.Update(indexName, indexNewDefinition); } - + var updatedIndex = await GetIndexes(async, indexName); updatedIndex[0]["latestDefinition"].AsBsonDocument.Should().Be(indexNewDefinition); } @@ -177,7 +177,7 @@ public async Task Case6_driver_can_create_and_list_search_indexes_with_non_defau var indexNameCreated = async ? await collection.SearchIndexes.CreateOneAsync(_indexDefinition, indexName) : collection.SearchIndexes.CreateOne(_indexDefinition, indexName); - + indexNameCreated.Should().Be(indexName); var indexes = await GetIndexes(async, indexName); @@ -218,7 +218,7 @@ public async Task Case7_driver_can_handle_search_index_types_when_creating_index ? await _collection.SearchIndexes.CreateOneAsync(new CreateSearchIndexModel(indexName3, SearchIndexType.VectorSearch, _vectorIndexDefinition)) : _collection.SearchIndexes.CreateOne(new CreateSearchIndexModel(indexName3, SearchIndexType.VectorSearch, _vectorIndexDefinition)); indexNameCreated.Should().Be(indexName3); - + var indexCreated3 = await GetIndexes(async, indexName3); indexCreated3[0]["type"].AsString.Should().Be("vectorSearch"); } @@ -233,7 +233,7 @@ public async Task Case8_driver_requires_explicit_type_to_create_vector_search_in var exception = async ? await Record.ExceptionAsync(() => _collection.SearchIndexes.CreateOneAsync(_vectorIndexDefinition, indexName)) : Record.Exception(() => _collection.SearchIndexes.CreateOne(_vectorIndexDefinition, indexName)); - + exception.Message.Should().Contain("Attribute mappings missing"); } @@ -242,7 +242,7 @@ private async Task<BsonDocument> CreateIndexAndValidate(string indexName, BsonDo var indexNameActual = async ? await _collection.SearchIndexes.CreateOneAsync(indexDefinition, indexName) : _collection.SearchIndexes.CreateOne(indexDefinition, indexName); - + indexNameActual.Should().Be(indexName); var result = await GetIndexes(async, indexName); diff --git a/tests/MongoDB.Driver.Tests/Search/AtlasSearchTests.cs b/tests/MongoDB.Driver.Tests/Search/AtlasSearchTests.cs index b014e9152a8..0a1205ebb96 100644 --- a/tests/MongoDB.Driver.Tests/Search/AtlasSearchTests.cs +++ b/tests/MongoDB.Driver.Tests/Search/AtlasSearchTests.cs @@ -18,7 +18,6 @@ using System.Linq; using FluentAssertions; using MongoDB.Bson; -using MongoDB.Bson.Serialization; using MongoDB.Bson.Serialization.Attributes; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.TestHelpers.Logging; @@ -61,13 +60,13 @@ public AtlasSearchTests(ITestOutputHelper testOutputHelper) : base(testOutputHel { RequireEnvironment.Check().EnvironmentVariable("ATLAS_SEARCH_TESTS_ENABLED"); - var atlasSearchUri = Environment.GetEnvironmentVariable("ATLAS_SEARCH"); + var atlasSearchUri = Environment.GetEnvironmentVariable("ATLAS_SEARCH_URI"); Ensure.IsNotNullOrEmpty(atlasSearchUri, nameof(atlasSearchUri)); var mongoClientSettings = MongoClientSettings.FromConnectionString(atlasSearchUri); mongoClientSettings.ClusterSource = DisposingClusterSource.Instance; - _mongoClient = new MongoClient(atlasSearchUri); + _mongoClient = new MongoClient(mongoClientSettings); } protected override void DisposeInternal() => _mongoClient.Dispose(); @@ -156,33 +155,33 @@ public void EqualsNull() result.Name.Should().Be("testNull"); } - + [Fact] public void EqualsArrayField() { - var results = GetSynonymTestCollection().Aggregate() + var results = GetMoviesCollection<Movie>().Aggregate() .Search(Builders<Movie>.Search.Equals(p => p.Genres, "family")) .Limit(3) .ToList(); - + results.Should().HaveCount(3); foreach (var result in results) { result.Genres.Should().Contain("Family"); } - + results[0].Title.Should().Be("The Poor Little Rich Girl"); results[1].Title.Should().Be("Robin Hood"); results[2].Title.Should().Be("Peter Pan"); } - + [Fact] public void EqualsStringField() { - var results = GetSynonymTestCollection().Aggregate() + var results = GetMoviesCollection<Movie>().Aggregate() .Search(Builders<Movie>.Search.Equals(p => p.Title, "a corner in wheat")) .ToList(); - + results.Should().ContainSingle().Which.Title.Should().Be("A Corner in Wheat"); } @@ -279,7 +278,7 @@ public void GeoWithin(string geometryType) [Fact] public void In() { - var results = GetSynonymTestCollection() + var results = GetMoviesCollection<Movie>() .Aggregate() .Search( Builders<Movie>.Search.In(x => x.Runtime, new[] { 31, 231 }), @@ -425,7 +424,7 @@ public void PhraseAnalyzerPath() public void PhraseSynonym() { var result = - GetSynonymTestCollection().Aggregate() + GetMoviesCollection<Movie>().Aggregate() .Search( Builders<Movie>.Search.Phrase("plot", "automobile race", new SearchPhraseOptions<Movie> { Synonyms = "transportSynonyms" }), indexName: "synonyms-tests") @@ -437,8 +436,8 @@ public void PhraseSynonym() result[0].Title.Should().Be("The Great Race"); result[1].Title.Should().Be("The Cannonball Run"); result[2].Title.Should().Be("National Mechanics"); - result[3].Title.Should().Be("Genevieve"); - result[4].Title.Should().Be("Speedway Junky"); + result[3].Title.Should().Be("Speedway Junky"); + result[4].Title.Should().Be("Jo pour Jonathan"); } [Fact] @@ -470,16 +469,16 @@ public void Range() results.Should().ContainSingle().Which.Name.Should().Be("House close to station & direct to opera house...."); } - + [Fact] public void RangeString() { - var results = GetSynonymTestCollection().Aggregate() + var results = GetMoviesCollection<Movie>().Aggregate() .Search(Builders<Movie>.Search.Range(p => p.Title, SearchRangeV2Builder.Gt("city").Lt("country"))) .Limit(5) .Project<Movie>(Builders<Movie>.Projection.Include(p => p.Title)) .ToList(); - + results[0].Title.Should().Be("Civilization"); results[1].Title.Should().Be("Clash of the Wolves"); results[2].Title.Should().Be("City Lights"); @@ -487,62 +486,35 @@ public void RangeString() results[4].Title.Should().Be("Come and Get It"); } - // TODO: Once we have an Atlas cluster running server 8.1, update this test to retrieve actual results from the server instead of merely validating the syntax. [Fact] public void RankFusion() { const int limit = 5; - var vector = new[] { 1.0, 2.0, 3.0 }; - var vectorOptions = new VectorSearchOptions<EmbeddedMovie>() + var vector = new[] { -0.00028408386, -0.030921403, 0.0017461961, -0.007926553, -0.008247016, 0.029273309, 0.0027844305, 0.0088290805, 0.0020862792, 0.001850837, 0.0004868257, 0.004914855, 0.030529, -0.033118863, 0.022419326, 0.0031359587, 0.030842923, -0.016101629, 0.018940013, -0.006186897, -0.008390897, 0.007514529, 0.008175075, -0.012380334, 0.007200606, 0.0015352791, 0.0071482854, -0.011484345, 0.007194066, -0.006736262, 0.0009049808, -0.01856069, 0.008959882, -0.02718049, -0.030031955, -0.012609236, -0.011432025, -0.000060597744, 0.0390834, -0.00019783681, 0.0071025053, 0.01747504, -0.015918506, -0.0062261373, -0.0049966057, 0.008534778, 0.009247645, -0.007959253, -0.04015597, 0.013838767, 0.013969569, 0.010934981, -0.0040025166, 0.0022285255, -0.0067820423, -0.008194695, -0.0096335085, 0.006209787, 0.010261354, -0.006445229, -0.0066839415, -0.0025702436, -0.0028007806, 0.0009164259, -0.012151431, -0.00014674259, 0.011314304, -0.019737901, 0.0068997634, 0.007331407, 0.036336575, -0.0021680298, 0.0024606977, -0.0007745884, 0.00985587, -0.0049573653, -0.022066163, -0.0065040896, -0.010745319, -0.008802921, 0.00021173444, -0.028880905, -0.021098234, 0.03481928, 0.03822011, 0.003809585, -0.011693628, 0.012726957, -0.012197211, 0.0019865432, -0.0028776263, -0.008436677, -0.0021631247, 0.0118375085, -0.0044962913, 0.002622564, -0.011360084, 0.00865904, -0.009659668, -0.027677534, -0.019397818, 0.0040875375, 0.011386245, -0.0011322479, 0.003714754, 0.005578671, 0.025218472, 0.012112191, 0.014623574, -0.002947932, 0.0041954485, 0.009456927, 0.018142127, -0.055878274, -0.014335811, -0.03162773, 0.0075733894, 0.015840026, 0.005258208, -0.015879266, 0.033354305, -0.004542072, -0.006638161, -0.0075930096, -0.011366624, 0.019332416, -0.019515539, -0.022445485, 0.005876244, -0.016559431, 0.018220607, -0.0039894367, 0.031209165, -0.0049737156, -0.020195706, 0.0175012, -0.024669107, 0.0014339081, -0.005912214, -0.015800785, 0.0117197875, 0.008161995, -0.00982971, -0.0023348015, -0.008292796, 0.035420965, -0.00040343995, 0.0022628608, 0.00032904677, -0.009273805, 0.01975098, -0.013420203, 0.016650993, -0.009143004, 0.024865309, 0.0035185523, 0.007305247, 0.024132822, -0.012635396, -0.0118375085, -0.00873098, 0.011706707, 0.0009687464, -0.012295313, 0.0057160123, 0.03508088, 0.003142499, 0.00035152823, 0.009476547, -0.028410021, -0.009908191, -0.0033109053, -0.009188784, 0.0148720965, -0.031183006, 0.022066163, 0.014021888, 0.022144644, -0.0108565, -0.008155455, -0.009005663, 0.025231551, 0.018966174, 0.009156084, -0.017017236, -0.017893605, -0.021320596, -0.008103134, 0.015840026, -0.013420203, 0.0027533653, 0.0054249796, -0.009339206, 0.0120271705, -0.6701207, -0.029901154, -0.012995099, -0.008050814, -0.005356309, 0.016977996, 0.021202875, -0.008207776, -0.006464849, 0.0027942406, -0.004453781, 0.027154328, -0.0054249796, -0.015905425, 0.0003684915, -0.011432025, 0.023544217, -0.024289783, 0.0054053594, -0.020313427, -0.03398215, 0.030031955, 0.020993592, -0.0015753369, -0.01965942, 0.0072725466, 0.02515307, -0.0078022913, -0.0094438465, 0.015683064, -0.022288524, 0.028880905, -0.0020388637, 0.009620428, 0.056296837, -0.02825306, -0.0049508256, 0.030319719, -0.020561948, 0.02189612, -0.009594268, -0.014885177, -0.004842914, -0.0040940777, 0.0069651636, -0.0061836266, 0.013564085, -0.0030018876, 0.023504976, 0.007946173, 0.00439492, 0.030895242, -0.007109045, -0.002619294, -0.0028759914, 0.008711359, 0.030215077, -0.013551004, 0.012406494, -0.010562196, -0.010503336, -0.0016023146, 0.005088167, 0.0026634394, -0.0011150802, 0.04154246, 0.015447621, 0.0417779, 0.016781794, -0.03377287, 0.037696905, 0.014466613, -0.0071417456, 0.017775884, -0.010137093, 0.0028972465, 0.024315942, -0.011026541, -0.008103134, -0.004470131, -0.0019244127, -0.0027975107, -0.025859397, -0.0050293063, 0.02404126, -0.012275693, -0.010719159, 0.01203371, 0.0030002524, 0.005892594, -0.008632879, 0.027154328, -0.0020241486, 0.000026620088, 0.007913472, -0.0123345535, 0.007821912, 0.0049508256, 0.0072463863, -0.039449643, 0.017592762, -0.034583837, 0.0058173835, -0.0136164045, 0.0045191813, -0.0022432406, -0.0027599053, 0.004872345, 0.061633524, -0.020287266, -0.008672119, 0.0023658667, 0.014440453, -0.004061377, -0.011785188, -0.02391046, 0.0074360482, 0.009411146, 0.030999884, -0.022602446, -0.0059449147, 0.016755633, 0.008417057, -0.014675895, 0.024760667, 0.015643824, -0.025950957, -0.01975098, -0.034688476, 0.013256702, -0.015853105, -0.004542072, 0.0031147036, -0.0036428133, 0.016167028, -0.0038815255, 0.013838767, -0.0061705466, 0.01963326, 0.0041006175, -0.008436677, 0.013276322, 0.011327384, -0.01757968, 0.00070837024, -0.029377948, -0.0011714882, -0.007501449, -0.04912893, 0.026591884, 0.017736642, -0.009371906, -0.015604583, 0.00028060944, 0.009581188, 0.00657276, -0.024826068, -0.006252297, -0.0038651754, -0.016729474, 0.01858685, 0.0048265643, -0.011628226, 0.011360084, -0.0044014603, -0.017736642, 0.016023146, 0.033171184, -0.008338576, -0.024211302, -0.009025283, -0.035525605, -0.02287713, 0.029037867, -0.002849831, -0.010627598, -0.0019881781, -0.022602446, -0.015316821, 0.008227396, -0.0026863297, 0.011700167, -0.00043777528, -0.001362785, 0.03604881, -0.0003721703, 0.00057225523, 0.017867444, -0.009757769, 0.037749227, -0.021516798, 0.027991457, 0.012864298, -0.011248903, -0.0052778283, 0.013498683, 0.008528238, 0.021137474, 0.008515158, 0.031339966, -0.001971828, -0.009947431, 0.028462341, -0.005150297, 0.020575028, -0.009522327, 0.0011649482, -0.028907064, 0.03508088, 0.003377941, 0.004287009, -0.024865309, 0.02064043, -0.0020192435, 0.006477929, 0.014911337, -0.005856624, 0.0015025787, -0.01629783, -0.0022285255, 0.019070815, -0.012426114, -0.01739656, -0.0031245137, -0.015539182, 0.02084971, 0.0005252486, 0.026539564, 0.006029935, -0.042222627, 0.013152061, 0.008031194, -0.015146779, 0.009221485, 0.022759408, -0.023060251, -0.0026634394, -0.015094458, 0.023295693, -0.014518933, 0.014924417, 0.026448002, 0.04350448, -0.010405235, -0.012184132, 0.014793616, 0.004015597, 0.00004401767, -0.019031575, 0.023714257, -0.017592762, 0.016873354, -0.012511135, 0.020391908, 0.0036297333, 0.012262613, 0.009430766, 0.0035545225, 0.022497807, 0.036336575, 0.0074229683, -0.013269782, 0.020221865, 0.021438317, 0.017972086, -0.013328643, 0.016821034, 0.009169164, 0.005117597, 0.024564466, -0.008037734, -0.01408729, -0.008070434, -0.00977085, 0.023884298, -0.0017412909, 0.006203247, -0.004578042, 0.013799527, -0.008279716, -0.013904167, -0.034296073, 0.020234946, 0.0153299, -0.011006921, -0.012753117, -0.015382221, 0.0047317334, -0.03144461, 0.013655645, -0.025715515, 0.012870838, -0.015055218, -0.011275063, 0.0025244632, -0.0068997634, 0.037696905, -0.005261478, 0.034296073, -0.021987682, 0.0059710746, 0.012746577, -0.02843618, -0.01739656, 0.011870209, -0.004018867, -0.024525225, -0.004571502, 0.0066774013, 0.005133947, -0.0029708222, -0.007534149, -0.0174358, 0.011190043, 0.0009000758, -0.012896998, -0.008541319, 0.013511764, 0.023884298, 0.009156084, -0.007095965, -0.032909583, -0.02733745, -0.002501573, 0.11887213, 0.017605841, 0.018390648, 0.013969569, 0.013577164, -0.004463591, -0.019358577, -0.018024405, 0.014100369, -0.0023462465, 0.005761793, -0.02281173, 0.002182745, 0.0037376443, 0.01747504, 0.00044840286, -0.022026923, -0.016101629, 0.013825687, -0.011772108, 0.0066970214, 0.0108892, -0.0031915493, 0.01631091, -0.021909202, -0.024172062, 0.008619799, 0.012569996, -0.008207776, -0.015042138, -0.015486862, -0.010542576, 0.0008943532, 0.021307515, -0.0015557167, 0.009476547, -0.00005768537, 0.011791728, 0.036388893, -0.0036657036, 0.023871219, 0.024747588, 0.0113404635, -0.037592266, 0.014819776, -0.005568861, 0.013825687, 0.017134957, 0.001968558, -0.021556038, 0.056453798, 0.0017069556, -0.009404606, 0.009842791, 0.02172608, -0.0067493417, 0.014728215, 0.00878984, -0.018286008, 0.01642863, 0.006304618, -0.023151813, 0.009731609, -0.0062457575, 0.011464725, -0.01866533, -0.009496167, 0.007403348, -0.011020001, 0.0038717154, 0.00972507, -0.031758532, -0.010228653, -0.015996987, 0.008384357, 0.0018998875, 0.020234946, -0.02733745, 0.018652251, 0.011229283, -0.0021189793, -0.014466613, -0.018246768, -0.014074209, -0.0106537575, 0.02817458, 0.013642565, 0.007749971, -0.006690481, 0.0053955493, 0.013851847, 0.009306505, 0.0070044044, -0.0015965921, -0.0022481456, -0.025767837, -0.0016824304, 0.02733745, -0.002951202, 0.018299088, 0.021320596, -0.028593142, 0.0017134957, -0.015552263, 0.036179613, -0.010954601, -0.014453532, -0.005362849, -0.025754755, 0.0014151054, -0.0055982913, -0.013551004, 0.0053922795, -0.009417687, 0.0050947065, 0.0112685235, 0.01861301, 0.025924798, 0.00058533537, -0.017723562, -0.008855241, -0.0075603095, 0.027075848, 0.016598672, -0.0017331159, -0.004211799, 0.01522526, -0.026644204, 0.018848453, 0.0065073594, -0.0028253058, -0.0076060896, 0.012550375, 0.0034008313, -0.030790603, 0.002516288, -0.021268275, 0.0025244632, -0.005313799, -0.0014077479, 0.0015107539, -0.009640048, -0.008253556, -0.023387255, -0.024368264, -0.027049689, -0.00092950603, 0.00087718555, 0.00011823202, 0.017723562, -0.0239497, -0.009947431, -0.00163992, 0.0102024935, -0.031078365, -0.04379224, -0.011870209, 0.0063569383, 0.0218438, 0.015905425, 0.034243755, -0.008430137, 0.03497624, 0.019463219, 0.0010153443, 0.00015430454, -0.0060626357, 0.013799527, -0.06629005, 0.036755137, 0.018756893, 0.011170423, 0.032569498, -0.008443218, 0.026107918, 0.010594897, -0.008933722, -0.014610494, -0.023622697, -0.011641307, 0.0030721931, -0.011229283, -0.00072063284, 0.012393414, -0.014597414, -0.015015977, 0.071522094, 0.017775884, -0.029220987, -0.0095157875, 0.023243373, -0.028985545, -0.0049704458, -0.0065073594, -0.006661051, -0.014963658, 0.020967431, -0.00006550279, -0.0013088295, 0.01975098, 0.009430766, 0.0014952212, 0.01962018, -0.009718529, 0.009267265, 0.039737403, 0.022013841, -0.0054347897, 0.010091312, -0.02718049, -0.023792738, -0.022746328, -0.026487242, -0.03573489, -0.017723562, 0.0049867956, -0.005673502, 0.009659668, -0.016821034, -0.0390834, 0.009509247, 0.014545093, 0.04007749, 0.0084497575, 0.02817458, 0.0099408915, -0.013472524, -0.016114708, 0.028148418, 0.015683064, -0.0009205134, 0.0010096218, 0.01844297, -0.001850837, -0.020823551, -0.01089574, 0.012341093, -0.04996606, -0.014335811, 0.015604583, -0.009156084, -0.00436876, -0.008855241, -0.016860275, 0.0019244127, 0.014100369, -0.02504843, 0.010424855, 0.001983273, -0.030921403, -0.047925558, -0.011942149, -0.014558174, 0.0027811604, 0.010509877, -0.0136164045, 0.0014894987, 0.0009164259, 0.0106995385, 0.014597414, 0.020483468, 0.004143128, -0.0028759914, -0.0067035616, 0.007769591, -0.020470388, -0.00028776264, -0.00763879, 0.0091364635, 0.027023528, 0.00545441, 0.01083688, -0.004649983, 0.01625859, -0.007213686, 0.0111508025, -0.001973463, -0.01090882, -0.015866185, -0.0050391164, 0.011418945, 0.015434542, -0.020692749, -0.013152061, -0.022223124, 0.0023609616, -0.01858685, -0.012641936, 0.019031575, -0.014532013, -0.0054871105, -0.0094438465, 0.005022766, 0.0053824694, 0.017252678, 0.0034923921, -0.0151991, 0.017710483, -0.04245807, 0.008881401, 0.0034825818, 0.0064942795, -0.015748464, -0.0071352054, 0.011301223, -0.0069324635, 0.01408729, -0.038743313, -0.019293176, -0.014702055, -0.009208404, 0.00432952, -0.021477558, -0.010640678, 0.010012832, 0.015094458, 0.009280345, -0.02077123, -0.02494379, 0.012635396, 0.0021238844, -0.011752488, 0.028619302, -0.024773747, 0.007965793, 0.008848701, 0.013969569, 0.006860523, -0.034191433, -0.00011067008, -0.008175075, 0.017815124, 0.0091364635, -0.012962399, -0.019855622, -0.013387503, -0.022367004, 0.021385996, 0.013943408, -0.021542957, 0.010019372, 0.027703693, 0.0058500837, -0.0016202999, 0.0306598, -0.0012123636, 0.006111686, -0.023217212, -0.028540822, -0.014034969, -0.0067820423, 0.030738281, 0.003590493, -0.004908315, -0.032988064, -0.0035316325, -0.041804064, 0.0141134495, -0.022000762, 0.0136164045, 0.030764442, -0.0040057865, 0.0040548374, 0.021242116, 0.010065152, -0.0031310536, -0.029848834, -0.0050587365, -0.0028563712, -0.0018328518, -0.003597033, 0.014793616, -0.019554779, -0.0020143385, 0.029874993, -0.0034302615, -0.006455039, 0.014728215, -0.034479197, -0.009731609, -0.010725698, 0.015591503, 0.0067951223, 0.0044308905, 0.009509247, -0.012301853, 0.0017707213, -0.00058697036, 0.010097853, -0.012386873, -0.0082600955, 0.0023969319, -0.0043033594, 0.028854745, -0.0066839415, -0.006742802, -0.00549365, -0.021137474, 0.017331159, -0.011392784, -0.0044308905, 0.01747504, 0.0050521963, -0.03413911, -0.028828584, 0.0075799297, 0.014832856, -0.014571253, 0.0045191813, -0.0038847956, 0.02172608, -0.008462838, 0.018809212, -0.016611753, 0.009156084, -0.02393662, 0.0037180241, -0.03563025, 0.0262518, 0.010032452, -0.01523834, -0.0015123888, 0.0066774013, -0.0015393667, -0.024054341, -0.025506234, -0.010477176, -0.0048559946, -0.0070305644, -0.010869579, -0.017252678, -0.029953474, 0.0061738165, -0.024420584, -0.04258887, 0.00983625, 0.21576966, -0.004787324, 0.0048592645, 0.012046791, -0.014924417, 0.024669107, 0.036493536, 0.016951835, -0.0010063518, -0.0034596918, -0.009299966, 0.008299336, -0.014832856, 0.0019113325, -0.0090645235, -0.018926933, -0.037435304, -0.0015508117, -0.016768714, -0.018756893, 0.021869961, -0.00439492, -0.003606843, -0.010280974, 0.015983906, -0.0073771877, -0.0032471397, 0.0007529244, 0.0005877879, -0.0003386525, -0.006314428, -0.031026045, -0.0051045166, -0.016533272, -0.013577164, -0.004048297, 0.0063536684, 0.005035846, -0.0060887956, 0.015643824, 0.02494379, 0.015042138, -0.015787704, 0.002517923, -0.015316821, 0.0149898175, -0.008364737, -0.01752736, 0.010254814, 0.006435419, -0.024185142, -0.01083688, 0.016624833, 0.051718794, -0.0037572645, -0.00067485246, 0.010575277, 0.022353925, 0.007429508, 0.004564962, -0.005689852, 0.019044654, 0.006186897, 0.019842543, -0.024381343, 0.018992335, -0.048788846, 0.005163377, 0.023034092, -0.021699918, -0.006723182, -0.036022652, -0.012138351, -0.01204025, -0.0103921555, -0.0348716, 0.018024405, 0.008959882, 0.013158601, -0.007043645, 0.00870482, 0.0023282613, 0.008024653, 0.004247769, -0.017252678, -0.020470388, 0.033406626, 0.0104640955, -0.011660927, 0.0021663948, 0.0018230417, -0.02943027, 0.0013913978, -0.0047480837, 0.013184761, 0.032386377, -0.0060855257, -0.001146963, -0.006978244, -0.0086852, -0.018338328, -0.036205772, 0.0196071, -0.01417885, -0.021608358, 0.0071679056, -0.0008632879, 0.012256073, 0.039449643, -0.030712122, -0.0015181114, -0.0136164045, 0.0074360482, -0.010326754, 0.022523966, 0.015395301, -0.011713248, -0.02067967, 0.0015311915, -0.018979253, -0.007318327, -0.038560193, -0.01304088, -0.008914102, -0.010692998, -0.028017618, 0.003175199, 0.020143384, 0.04052221, -0.014034969, 0.028200738, -0.03612729, -0.012936238, -0.0063536684, 0.008358196, 0.00763225, 0.005778143, -0.006886683, 0.010241734, 0.011347004, 0.021215955, 0.01746196, 0.00326022, -0.010287514, 0.016873354, -0.011490885, 0.012144892, 0.0038488253, -0.0055230805, 0.001145328, -0.015552263, -0.024747588, 0.004551882, -0.0039796266, 0.03709522, -0.021032833, -0.020418067, -0.016127788, -0.0037899648, 0.00014061129, -0.016716393, 0.013027799, 0.0050489265, -0.018142127, -0.011732868, 0.01852145, -0.16721626, 0.024682187, 0.008887942, 0.002089549, 0.041908704, -0.002836751, 0.01644171, -0.009646588, -0.008044274, 0.002403472, 0.004103888, -0.00068098377, -0.0370429, -0.0032520448, -0.005133947, 0.007063265, -0.010712618, 0.008606719, 0.019437058, 0.0027811604, 0.0071352054, -0.0064910096, 0.0055525107, 0.007763051, 0.011046161, 0.012968939, 0.009384986, -0.016624833, -0.010228653, -0.014963658, -0.009463467, -0.0031375939, 0.011674007, 0.0144927725, 0.024224382, 0.0009205134, -0.013269782, -0.017605841, -0.0122887725, 0.012779277, 0.005261478, 0.014244251, 0.013263241, -0.004829834, 0.014702055, 0.04805636, 0.003606843, 0.0020143385, -0.0008780031, -0.012347633, 0.006304618, -0.029744193, 0.008135835, 0.00035193696, 0.023871219, 0.022353925, 0.010006292, -0.012831598, 0.014754375, -0.03272646, -0.009201865, 0.0013251797, 0.009522327, -0.014675895, -0.025571635, -0.015879266, -0.004022137, 0.0018819022, -0.02956107, 0.019737901, -0.036781296, -0.013348263, 0.02623872, -0.017893605, 0.006177087, -0.0050718165, -0.014715135, 0.006958624, -0.010071692, -0.0097054485, -0.038455553, 0.03361591, 0.003809585, -0.008436677, 0.014061129, 0.0005665327, -0.0033109053, -0.014675895, -0.013721046, -0.030947564, -0.0021124394, -0.016088547, -0.011922529, -0.03152309, 0.020575028, 0.029325629, -0.0003155579, 0.0038913356, 0.012491515, -0.011883289, -0.009404606, 0.018037485, -0.021477558, 0.020431148, 0.030764442, 0.02510075, 0.021987682, 0.0077761314, 0.042981274, -0.0045322618, -0.0073902677, 0.008574018, -0.002056849, 0.027625212, -0.0017592761, 0.019214695, 0.010019372, -0.020012584, 0.04146398, -0.019201616, 0.03897876, 0.0049671754, -0.02935179, 0.021072073, -0.021490637, -0.010287514, -0.106419854, -0.0026716145, -0.019018494, 0.013171681, -0.0020094335, 0.010640678, -0.0046401727, 0.012321473, -0.0073771877, 0.008390897, -0.0058533535, -0.0218438, 0.019123135, -0.013930327, 0.035342485, 0.000036966667, -0.015460702, 0.0045682318, -0.023191053, 0.015813865, -0.010542576, 0.005549241, 0.0137341255, -0.013407123, 0.012962399, -0.0041202377, -0.032255575, 0.0044374308, 0.007534149, 0.00766495, -0.01194869, -0.015604583, 0.01836449, -0.020182624, 0.0030934485, -0.001960383, -0.030110436, 0.0072725466, 0.034400716, -0.033668227, 0.009175704, 0.009620428, 0.0027942406, -0.017932845, 0.006458309, 0.0027746204, -0.008384357, 0.012419574, -0.0028988817, -0.027546732, -0.030738281, 0.00016186648, -0.029848834, 0.007939633, 0.017344238, 0.0048690746, 0.0046532527, 0.008927182, 0.028017618, 0.005791223, -0.0019947183, -0.01314552, 0.002277576, 0.00044431532, -0.0069847843, -0.0079527125, -0.014976737, 0.011732868, 0.00035398075, -0.0018017865, 0.010137093, 0.021359837, -0.008220855, 0.008632879, -0.037775386, 0.020195706, -0.013995728, 0.0012401589, 0.013799527, -0.017815124, -0.007920013, -0.021346755, 0.009993211, -0.01633707, 0.006873603, 0.004810214, 0.0032896502, -0.00025383607, -0.010163253, -0.012275693, 0.0037605346, -0.008999122, -0.011817888, -0.03623193, -0.021242116, 0.015826944, 0.0061607365, -0.0064975494, -0.0067951223, -0.012393414, -0.013551004, 0.008626339, -0.06644701, 0.024656026, 0.0024590625, -0.00020263967, -0.002517923, -0.012798897, 0.016925676, -0.0026814246, -0.004296819, -0.005748713, -0.0056931223, 0.02725897, 0.0030967183, -0.002411647, -0.006736262, -0.019463219, -0.00075496815, -0.008488998, 0.016572513, 0.011360084, 0.009489627, 0.012504594, 0.001317822, 0.007926553, -0.0066185407, -0.0020862792, -0.0036885939, 0.021569118, -0.0022138103, 0.0018606471, -0.009646588, -0.023491895, -0.010732238, 0.0017592761, 0.0011714882, 0.008273176, -0.001744561, 0.0064942795, 0.01635015, 0.06372634, -0.02731129, -0.014571253, 0.010294055, -0.011052702, -0.019267017, 0.0031441338, 0.008593638, 0.007893852, 0.03495008, -0.019175455, 0.016703313, 0.037644584, -0.00975123, 0.00041938134, -0.00976431, -0.0039730864, 0.018181367, -0.00050440215, 0.014767455, -0.0042379587, 0.02524463, -0.0049966057, 0.009280345, 0.014924417, -0.0012115461, -0.029874993, -0.0132370815, -0.010006292, 0.0035152822, -0.016206268, -0.004914855, -0.023675017, -0.0016619927, 0.005330149, -0.004453781, 0.0003075872, -0.0053857393, -0.0017069556, -0.008384357, 0.021516798, -0.006232677, 0.0031097985, -0.042039506, 0.00438184, 0.024917629, 0.001138788, 0.006853983, 0.015696144, -0.013348263, 0.016114708, -0.010791099, 0.017867444, -0.0035185523, 0.022445485, -0.014270411, 0.014963658, -0.040705334, -0.007279087, 0.01627167, 0.012373794, 0.0070567247, 0.0008632879, 0.0070567247, -0.011588986, -0.011772108, 0.0046140123, -0.032229416, -0.043059755, -0.011013461, 0.004780784, 0.023635777, -0.016546352, -0.013034339, 0.011445105, -0.005814113, 0.00977085, -0.0045061014, -0.002202365, -0.017200358, 0.014675895, 0.007501449, 0.00761917, 0.014021888, 0.0014012079, 0.002275941, 0.015630743, 0.010405235, -0.004264119, -0.009424226, -0.02506151, -0.0010733873, 0.0074949088, -0.01963326, 0.00031433164, -0.018063646, -0.017383479, 0.0000909988, 0.018050566, -0.027102008, 0.028645463, -0.008332036, 0.0029528372, 0.014702055, -0.027154328, 0.021909202, 0.0034466116, 0.03897876, -0.0031228787, -0.015015977, 0.0062915375, -0.035604086, 0.02185688, 0.008613259, -0.008194695, 0.030188916, -0.0010096218, 0.0070174844, 0.0066970214, 0.004607472, 0.022000762, 0.012655016, 0.0087179, -0.007279087, -0.016860275, 0.000018994277, 0.0010177968, 0.01618011, -0.012046791, -0.035473283, 0.013361342, -0.0067035616, -0.013577164, 0.012825058, -0.007965793, -0.021699918, 0.011588986, -0.007161366, -0.0050652763, -0.00540863, -0.019123135, 0.026304122, -0.0042150686, -0.022393165, -0.013943408, 0.00078439846, -0.005801033, -0.00087555056, -0.027232809 }; + + var vectorOptions = new VectorSearchOptions<EmbeddedMovie> { IndexName = "vector_search_embedded_movies" }; var vectorPipeline = new EmptyPipelineDefinition<EmbeddedMovie>().VectorSearch(m => m.Embedding, vector, limit, vectorOptions); var searchDefinition = Builders<EmbeddedMovie>.Search.Text(new[]{"fullplot", "title"}, "ape"); - var searchPipeline = new EmptyPipelineDefinition<EmbeddedMovie>().Search(searchDefinition, indexName: "search_embedded_movies").Limit(limit); + var searchPipeline = new EmptyPipelineDefinition<EmbeddedMovie>().Search(searchDefinition, indexName: "sample_mflix__embedded_movies").Limit(limit); - var result = GetTestCollection<EmbeddedMovie>() + var result = GetEmbeddedMoviesCollection<EmbeddedMovie>() .Aggregate() - .RankFusion(new Dictionary<string, PipelineDefinition<EmbeddedMovie, EmbeddedMovie>>() + .RankFusion(new Dictionary<string, PipelineDefinition<EmbeddedMovie, EmbeddedMovie>> { { "vector", vectorPipeline }, { "search", searchPipeline } - }); - - result.Stages.Count.Should().Be(1); + }) + .Limit(limit) + .ToList(); - var serializerRegistry = BsonSerializer.SerializerRegistry; - var inputSerializer = serializerRegistry.GetSerializer<EmbeddedMovie>(); - var renderedStage = result.Stages[0].Render(inputSerializer, serializerRegistry); - renderedStage.Document.Should().Be( - """ - { - $rankFusion: { - input: { - pipelines: { - vector: [{ - $vectorSearch: { - queryVector: [1.0, 2.0, 3.0], - path: "plot_embedding", - limit: 5, - numCandidates: 50, - index: "vector_search_embedded_movies" - } - }], - search: [{ - $search: { - text: {query: "ape", path: ["fullplot", "title"]}, - index: "search_embedded_movies" - } - }, - { $limit: 5 } - ] - } - }} - } - """); + result.Count.Should().Be(limit); + result.Select(m => m.Title).Should().BeEquivalentTo("Tarzan the Ape Man", "King Kong", + "Battle for the Planet of the Apes", "King Kong Lives", "Mighty Joe Young"); } [Fact] @@ -568,7 +540,7 @@ public void SearchSequenceToken() .MetaSearchSequenceToken(x => x.PaginationToken); // Base search - var baseSearchResults = GetSynonymTestCollection() + var baseSearchResults = GetMoviesCollection<Movie>() .Aggregate() .Search(searchDefinition, searchOptions) .Project<Movie>(projection) @@ -585,7 +557,7 @@ public void SearchSequenceToken() // Testing SearchAfter // We're searching after the 2nd result of the base search searchOptions.SearchAfter = baseSearchResults[1].PaginationToken; - var searchAfterResults = GetSynonymTestCollection() + var searchAfterResults = GetMoviesCollection<Movie>() .Aggregate() .Search(searchDefinition, searchOptions) .Project<Movie>(projection) @@ -601,7 +573,7 @@ public void SearchSequenceToken() // We're searching before the 4th result of the base search searchOptions.SearchAfter = null; searchOptions.SearchBefore = baseSearchResults[3].PaginationToken; - var searchBeforeResults = GetSynonymTestCollection() + var searchBeforeResults = GetMoviesCollection<Movie>() .Aggregate() .Search(searchDefinition, searchOptions) .Project<Movie>(projection) @@ -700,7 +672,7 @@ public void Sort() [Fact] public void Sort_MetaSearchScore() { - var results = GetSynonymTestCollection().Aggregate() + var results = GetMoviesCollection<Movie>().Aggregate() .Search( Builders<Movie>.Search.QueryString(x => x.Title, "dance"), new() { Sort = Builders<Movie>.Sort.MetaSearchScoreAscending() }) @@ -747,7 +719,7 @@ public void Text() public void TextMatchCriteria() { var result = - GetSynonymTestCollection().Aggregate() + GetMoviesCollection<Movie>().Aggregate() .Search( Builders<Movie>.Search.Text("plot", "attire", new SearchTextOptions<Movie> { Synonyms = "attireSynonyms", MatchCriteria = MatchCriteria.Any}), indexName: "synonyms-tests") @@ -770,7 +742,7 @@ public void Synonyms(string query, string synonym, string expected) { var sortDefinition = Builders<Movie>.Sort.Ascending(x => x.Title); var result = - GetSynonymTestCollection().Aggregate() + GetMoviesCollection<Movie>().Aggregate() .Search(Builders<Movie>.Search.Text(x => x.Title, query, synonym), indexName: "synonyms-tests") .Sort(sortDefinition) .Project<Movie>(Builders<Movie>.Projection.Include("Title").Exclude("_id")) @@ -835,7 +807,7 @@ private HistoricalDocument SearchSingle( } private List<BsonDocument> SearchMultipleSynonymMapping(params SearchDefinition<Movie>[] clauses) => - GetSynonymTestCollection().Aggregate() + GetMoviesCollection<Movie>().Aggregate() .Search(Builders<Movie>.Search.Compound().Should(clauses), indexName: "synonyms-tests") .Project(Builders<Movie>.Projection.Include("Title").Exclude("_id")) .ToList(); @@ -848,9 +820,13 @@ private IMongoCollection<T> GetTestCollection<T>() => _mongoClient .GetDatabase("sample_training") .GetCollection<T>("posts"); - private IMongoCollection<Movie> GetSynonymTestCollection() => _mongoClient + private IMongoCollection<T> GetEmbeddedMoviesCollection<T>() => _mongoClient .GetDatabase("sample_mflix") - .GetCollection<Movie>("movies"); + .GetCollection<T>("embedded_movies"); + + private IMongoCollection<T> GetMoviesCollection<T>() => _mongoClient + .GetDatabase("sample_mflix") + .GetCollection<T>("movies"); private IMongoCollection<AirbnbListing> GetGeoTestCollection() => _mongoClient .GetDatabase("sample_airbnb") @@ -872,7 +848,7 @@ public class Movie { [BsonElement("genres")] public string[] Genres { get; set; } - + [BsonElement("title")] public string Title { get; set; } @@ -886,10 +862,11 @@ public class Movie public string PaginationToken { get; set; } } + [BsonIgnoreExtraElements] public class EmbeddedMovie : Movie { [BsonElement("plot_embedding")] - public double[] Embedding { get; set; } + public BinaryVectorFloat32 Embedding { get; set; } } [BsonIgnoreExtraElements] diff --git a/tests/MongoDB.Driver.Tests/Search/MongoQueryableTests.cs b/tests/MongoDB.Driver.Tests/Search/MongoQueryableTests.cs index 3597958e860..64ef9a47bed 100644 --- a/tests/MongoDB.Driver.Tests/Search/MongoQueryableTests.cs +++ b/tests/MongoDB.Driver.Tests/Search/MongoQueryableTests.cs @@ -22,6 +22,7 @@ namespace MongoDB.Driver.Tests.Search { + [Trait("Category", "Integration")] public class MongoQueryableTests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/Search/SearchDefinitionBuilderTests.cs b/tests/MongoDB.Driver.Tests/Search/SearchDefinitionBuilderTests.cs index 0afd28ea4ee..a4b97c2e7f3 100644 --- a/tests/MongoDB.Driver.Tests/Search/SearchDefinitionBuilderTests.cs +++ b/tests/MongoDB.Driver.Tests/Search/SearchDefinitionBuilderTests.cs @@ -266,12 +266,12 @@ public void EmbeddedDocument_typed() subjectFamily.EmbeddedDocument(p => p.Children, subjectPerson.QueryString(p => p.LastName, "berg")), "{ embeddedDocument: { path : 'Children', operator : { 'queryString' : { defaultPath : 'Children.ln', query : 'berg' } } } }"); } - + [Fact] public void Equals_with_array_should_render_supported_type() { var subjectTyped = CreateSubject<Person>(); - + AssertRendered( subjectTyped.Equals(p => p.Hobbies, "soccer"), "{ equals: { path: 'hobbies', value: 'soccer' } }"); @@ -1547,13 +1547,13 @@ public class Person : SimplePerson [BsonElement("ret")] public bool Retired { get; set; } - + [BsonElement("hobbies")] public string[] Hobbies { get; set; } [BsonElement("salaries")] public int[] SalaryHistory { get; set; } - + public object Object { get; set; } public string Name { get; set; } diff --git a/tests/MongoDB.Driver.Tests/Search/VectorSearchTests.cs b/tests/MongoDB.Driver.Tests/Search/VectorSearchTests.cs index d39679f25b6..ad0fd7a6620 100644 --- a/tests/MongoDB.Driver.Tests/Search/VectorSearchTests.cs +++ b/tests/MongoDB.Driver.Tests/Search/VectorSearchTests.cs @@ -28,6 +28,7 @@ namespace MongoDB.Driver.Tests.Search { [Trait("Category", "AtlasSearch")] + [Trait("Category", "Integration")] public class VectorSearchTests : LoggableTestClass { private readonly IMongoClient _mongoClient; @@ -36,13 +37,13 @@ public VectorSearchTests(ITestOutputHelper testOutputHelper) : base(testOutputHe { RequireEnvironment.Check().EnvironmentVariable("ATLAS_SEARCH_TESTS_ENABLED"); - var atlasSearchUri = Environment.GetEnvironmentVariable("ATLAS_SEARCH"); + var atlasSearchUri = Environment.GetEnvironmentVariable("ATLAS_SEARCH_URI"); Ensure.IsNotNullOrEmpty(atlasSearchUri, nameof(atlasSearchUri)); var mongoClientSettings = MongoClientSettings.FromConnectionString(atlasSearchUri); mongoClientSettings.ClusterSource = DisposingClusterSource.Instance; - _mongoClient = new MongoClient(atlasSearchUri); + _mongoClient = new MongoClient(mongoClientSettings); } protected override void DisposeInternal() => _mongoClient.Dispose(); diff --git a/tests/MongoDB.Driver.Tests/SortDefinitionBuilderTests.cs b/tests/MongoDB.Driver.Tests/SortDefinitionBuilderTests.cs index 29b18bdb264..a38f6d62207 100644 --- a/tests/MongoDB.Driver.Tests/SortDefinitionBuilderTests.cs +++ b/tests/MongoDB.Driver.Tests/SortDefinitionBuilderTests.cs @@ -13,6 +13,7 @@ * limitations under the License. */ +using System; using FluentAssertions; using MongoDB.Bson; using MongoDB.Bson.Serialization; @@ -31,6 +32,14 @@ public void Ascending() Assert(subject.Ascending("a"), "{a: 1}"); } + [Fact] + public void Ascending_value() + { + var subject = CreateSubject<BsonDocument>(); + + Assert(subject.Ascending(), "1"); + } + [Fact] public void Ascending_Typed() { @@ -40,6 +49,16 @@ public void Ascending_Typed() Assert(subject.Ascending("FirstName"), "{fn: 1}"); } + [Fact] + public void Calling_render_on_value_based_sort_should_throw() + { + var subject = CreateSubject<BsonDocument>(); + + var exception = Record.Exception(() => subject.Ascending().Render(new RenderArgs<BsonDocument>())); + + exception.Should().BeOfType<InvalidOperationException>(); + } + [Fact] public void Combine() { @@ -76,6 +95,16 @@ public void Combine_with_repeated_fields_using_extension_methods() Assert(sort, "{b: -1, a: -1}"); } + [Fact] + public void Combine_with_value_based_sort_and_additional_sort_should_throw() + { + var subject = CreateSubject<BsonDocument>(); + + var exception = Record.Exception(() => subject.Ascending().Descending("b")); + + exception.Should().BeOfType<InvalidOperationException>(); + } + [Fact] public void Descending() { @@ -84,6 +113,14 @@ public void Descending() Assert(subject.Descending("a"), "{a: -1}"); } + [Fact] + public void Descending_value() + { + var subject = CreateSubject<BsonDocument>(); + + Assert(subject.Descending(), "-1"); + } + [Fact] public void Descending_Typed() { @@ -120,7 +157,7 @@ public void MetaTextScore() private void Assert<TDocument>(SortDefinition<TDocument> sort, string expectedJson) { var documentSerializer = BsonSerializer.SerializerRegistry.GetSerializer<TDocument>(); - var renderedSort = sort.Render(new(documentSerializer, BsonSerializer.SerializerRegistry)); + var renderedSort = sort.RenderAsBsonValue(new(documentSerializer, BsonSerializer.SerializerRegistry)); renderedSort.Should().Be(expectedJson); } diff --git a/tests/MongoDB.Driver.Tests/Specifications/Runner/MongoClientJsonDrivenTestRunnerBase.cs b/tests/MongoDB.Driver.Tests/Specifications/Runner/MongoClientJsonDrivenTestRunnerBase.cs index b0f403147d0..8ad80cafbca 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/Runner/MongoClientJsonDrivenTestRunnerBase.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/Runner/MongoClientJsonDrivenTestRunnerBase.cs @@ -456,11 +456,11 @@ private protected FailPoint ConfigureFailPoint(BsonDocument test, IMongoClient c var serverAddress = EndPointHelper.Parse(settings.Server.ToString()); var selector = new EndPointServerSelector(serverAddress); - _failPointServer = cluster.SelectServer(selector, CancellationToken.None); + _failPointServer = cluster.SelectServer(OperationContext.NoTimeout, selector); } else { - _failPointServer = cluster.SelectServer(WritableServerSelector.Instance, CancellationToken.None); + _failPointServer = cluster.SelectServer(OperationContext.NoTimeout, WritableServerSelector.Instance); } var session = NoCoreSession.NewHandle(); diff --git a/tests/MongoDB.Driver.Tests/Specifications/UnifiedTestSpecRunner.cs b/tests/MongoDB.Driver.Tests/Specifications/UnifiedTestSpecRunner.cs index fe8d7683961..c5560922882 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/UnifiedTestSpecRunner.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/UnifiedTestSpecRunner.cs @@ -32,6 +32,7 @@ namespace MongoDB.Driver.Tests.Specifications { + [Trait("Category", "Integration")] public class UnifiedTestSpecRunner : LoggableTestClass { public UnifiedTestSpecRunner(ITestOutputHelper testOutputHelper) @@ -43,24 +44,55 @@ public UnifiedTestSpecRunner(ITestOutputHelper testOutputHelper) [UnifiedTestsTheory("auth.tests.unified")] public void Auth(JsonDrivenTestCase testCase) => Run(testCase); - [Trait("Category", "AtlasDataLake")] - [UnifiedTestsTheory("atlas_data_lake_testing.tests.unified")] - public void AtlasDataLake(JsonDrivenTestCase testCase) - { - RequireEnvironment.Check().EnvironmentVariable("ATLAS_DATA_LAKE_TESTS_ENABLED"); - Run(testCase); - } - [Category("SupportLoadBalancing")] [UnifiedTestsTheory("change_streams.tests.unified")] public void ChangeStreams(JsonDrivenTestCase testCase) => Run(testCase); + [UnifiedTestsTheory("client_side_operations_timeout.tests")] + public void ClientSideOperationsTimeout(JsonDrivenTestCase testCase) + { + SkipNotSupportedTestCases("dropIndexes"); + SkipNotSupportedTestCases("findOne"); + SkipNotSupportedTestCases("listIndexNames"); + // TODO: CSOT: further skipped tests should be unblocked by upcoming fixes + SkipNotSupportedTestCases("with only 1 RTT"); // blocked by CSHARP-5627 + SkipNotSupportedTestCases("createChangeStream"); // TODO: CSOT not implemented yet, CSHARP-3539 + SkipNotSupportedTestCases("runCommand"); // TODO: CSOT: TimeoutMS is not implemented yet for runCommand + SkipNotSupportedTestCases("timeoutMS applies to whole operation, not individual attempts"); // blocked by DRIVERS-3247 + SkipNotSupportedTestCases("WaitQueueTimeoutError does not clear the pool"); // TODO: CSOT: TimeoutMS is not implemented yet for runCommand + SkipNotSupportedTestCases("write concern error MaxTimeMSExpired is transformed"); // TODO: CSOT: investigate error transformation, implementing the requirement might be breaking change + SkipNotSupportedTestCases("operation succeeds after one socket timeout - listDatabases on client"); // TODO: listDatabases is not retryable in CSharp Driver, CSHARP-5714 + + Run(testCase); + + void SkipNotSupportedTestCases(string operationName) + { + if (testCase.Name.Contains(operationName)) + { + throw new SkipException($"Test skipped because {operationName} is not supported."); + } + } + } + [Category("CSFLE")] [UnifiedTestsTheory("client_side_encryption.tests.unified")] public void ClientSideEncryption(JsonDrivenTestCase testCase) { var testCaseNameLower = testCase.Name.ToLower(); + if (testCaseNameLower.Contains("fle2v2-encryptedfields-vs-encryptedfieldsmap.json") || + testCaseNameLower.Contains("localschema.json") || + testCaseNameLower.Contains("qe-text")) + { + CoreTestConfiguration.SkipMongocryptdTests_SERVER_106469(true); + } + + // This spec test includes an AWS sessionToken in its config, indicating it should use temporary AWS credentials + if (testCaseNameLower.Contains("localschema.json")) + { + RequireEnvironment.Check().EnvironmentVariable("FLE_AWS_TEMPORARY_CREDS_ENABLED"); + } + if (testCaseNameLower.Contains("kmip") || testCase.Shared.ToString().ToLower().Contains("kmip")) { @@ -125,15 +157,15 @@ public void LoadBalancers(JsonDrivenTestCase testCase) Run(testCase); } - [Category("Serverless", "SupportLoadBalancing")] + [Category("SupportLoadBalancing")] [UnifiedTestsTheory("read_write_concern.tests.operation")] public void ReadWriteConcern(JsonDrivenTestCase testCase) => Run(testCase); - [Category("Serverless", "SupportLoadBalancing")] + [Category("SupportLoadBalancing")] [UnifiedTestsTheory("retryable_reads.tests.unified")] public void RetryableReads(JsonDrivenTestCase testCase) => Run(testCase); - [Category("Serverless", "SupportLoadBalancing")] + [Category("SupportLoadBalancing")] [UnifiedTestsTheory("retryable_writes.tests.unified")] public void RetryableWrites(JsonDrivenTestCase testCase) { @@ -148,18 +180,24 @@ public void RetryableWrites(JsonDrivenTestCase testCase) [Category("SDAM", "SupportLoadBalancing")] [UnifiedTestsTheory("server_discovery_and_monitoring.tests.unified")] - public void ServerDiscoveryAndMonitoring(JsonDrivenTestCase testCase) => + public void ServerDiscoveryAndMonitoring(JsonDrivenTestCase testCase) + { + if (testCase.Name.Contains("pool-clear-")) + { + throw new SkipException("This test is flaky and is skipped while being investigated."); + } + Run(testCase, IsSdamLogValid, new SdamRunnerEventsProcessor(testCase.Name)); + } [Category("SupportLoadBalancing")] [UnifiedTestsTheory("server_selection.tests.logging")] public void ServerSelection(JsonDrivenTestCase testCase) => Run(testCase); - [Category("Serverless")] [UnifiedTestsTheory("sessions.tests")] public void Sessions(JsonDrivenTestCase testCase) => Run(testCase); - [Category("Serverless", "SupportLoadBalancing")] + [Category("SupportLoadBalancing")] [UnifiedTestsTheory("transactions.tests.unified")] public void Transactions(JsonDrivenTestCase testCase) { @@ -202,7 +240,7 @@ public void UnifiedTestFormatValidPass(JsonDrivenTestCase testCase) Run(testCase); } - [Category("Serverless", "SupportLoadBalancing")] + [Category("SupportLoadBalancing")] [UnifiedTestsTheory("versioned_api.tests")] public void VersionedApi(JsonDrivenTestCase testCase) => Run(testCase); @@ -262,9 +300,6 @@ private static void RequireKmsMock() => "legacy hello without speculativeAuthenticate is always observed", // transactions - // Skipped because CSharp Driver has an issue with handling read timeout for sync code-path. CSHARP-3662 - "add RetryableWriteError and UnknownTransactionCommitResult labels to connection errors", - // CSHARP Driver does not comply with the requirement to throw in case explicit writeConcern were used, see CSHARP-5468 "client bulkWrite with writeConcern in a transaction causes a transaction error", ]); diff --git a/tests/MongoDB.Driver.Tests/Specifications/atlas-data-lake-testing/prose-tests/AtlasDataLakeProseTests.cs b/tests/MongoDB.Driver.Tests/Specifications/atlas-data-lake-testing/prose-tests/AtlasDataLakeProseTests.cs deleted file mode 100644 index e0fe75bd5f7..00000000000 --- a/tests/MongoDB.Driver.Tests/Specifications/atlas-data-lake-testing/prose-tests/AtlasDataLakeProseTests.cs +++ /dev/null @@ -1,121 +0,0 @@ -/* Copyright 2020-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System.Linq; -using FluentAssertions; -using MongoDB.Bson; -using MongoDB.TestHelpers.XunitExtensions; -using MongoDB.Driver.Core; -using MongoDB.Driver.Core.Events; -using MongoDB.Driver.Core.Misc; -using MongoDB.Driver.Core.TestHelpers.XunitExtensions; -using Xunit; - -namespace MongoDB.Driver.Tests.Specifications.atlas_data_lake_testing.prose_tests -{ - [Trait("Category", "AtlasDataLake")] - public class AtlasDataLakeProseTests - { - [Fact] - public void Driver_should_connect_to_AtlasDataLake_without_authentication() - { - RequireEnvironment.Check().EnvironmentVariable("ATLAS_DATA_LAKE_TESTS_ENABLED"); - - using (var client = DriverTestConfiguration.CreateMongoClient()) - { - client.GetDatabase("admin").RunCommand<BsonDocument>(new BsonDocument("ping", 1)); - } - } - - [Fact] - public void Driver_should_connect_to_AtlasDataLake_with_SCRAM_SHA_1() - { - RequireEnvironment.Check().EnvironmentVariable("ATLAS_DATA_LAKE_TESTS_ENABLED"); - RequireServer.Check(); - - var connectionString = CoreTestConfiguration.ConnectionString; - var username = connectionString.Username; - var password = connectionString.Password; - var source = connectionString.AuthSource; - - var settings = DriverTestConfiguration.Client.Settings.Clone(); - settings.Credential = MongoCredential.FromComponents(mechanism: "SCRAM-SHA-1", source, username, password); - - using (var client = DriverTestConfiguration.CreateMongoClient(settings)) - { - client.GetDatabase("admin").RunCommand<BsonDocument>(new BsonDocument("ping", 1)); - } - } - - [Fact] - public void Driver_should_connect_to_AtlasDataLake_with_SCRAM_SHA_256() - { - RequireEnvironment.Check().EnvironmentVariable("ATLAS_DATA_LAKE_TESTS_ENABLED"); - RequireServer.Check().Supports(Feature.ScramSha256Authentication); - - var connectionString = CoreTestConfiguration.ConnectionString; - var username = connectionString.Username; - var password = connectionString.Password; - var source = connectionString.AuthSource; - - var settings = DriverTestConfiguration.Client.Settings.Clone(); - settings.Credential = MongoCredential.FromComponents(mechanism: "SCRAM-SHA-256", source, username, password); - - using (var client = DriverTestConfiguration.CreateMongoClient(settings)) - { - client.GetDatabase("admin").RunCommand<BsonDocument>(new BsonDocument("ping", 1)); - } - } - - [Fact] - public void KillCursors_should_return_expected_result() - { - RequireEnvironment.Check().EnvironmentVariable("ATLAS_DATA_LAKE_TESTS_ENABLED"); - RequireServer.Check(); - - var databaseName = "test"; - var collectionName = "driverdata"; - - var eventCapturer = new EventCapturer() - .Capture<CommandStartedEvent>(x => "killCursors" == x.CommandName) - .Capture<CommandSucceededEvent>(x => new[] { "killCursors", "find" }.Contains(x.CommandName)); - - using (var client = DriverTestConfiguration.CreateMongoClient(eventCapturer)) - { - var cursor = client - .GetDatabase(databaseName) - .GetCollection<BsonDocument>(collectionName) - .Find(new BsonDocument(), new FindOptions { BatchSize = 2 }) - .ToCursor(); - - var findCommandSucceededEvent = eventCapturer.Events.OfType<CommandSucceededEvent>().First(x => x.CommandName == "find"); - var findCommandResult = findCommandSucceededEvent.Reply; - var cursorId = findCommandResult["cursor"]["id"].AsInt64; - var cursorNamespace = CollectionNamespace.FromFullName(findCommandResult["cursor"]["ns"].AsString); - - cursor.Dispose(); - - var killCursorsCommandStartedEvent = eventCapturer.Events.OfType<CommandStartedEvent>().First(x => x.CommandName == "killCursors"); - var killCursorsCommandSucceededEvent = eventCapturer.Events.OfType<CommandSucceededEvent>().First(x => x.CommandName == "killCursors"); - var killCursorsStartedCommand = killCursorsCommandStartedEvent.Command; - - cursorNamespace.DatabaseNamespace.DatabaseName.Should().Be(killCursorsCommandStartedEvent.DatabaseNamespace.DatabaseName); - cursorNamespace.CollectionName.Should().Be(killCursorsStartedCommand["killCursors"].AsString); - cursorId.Should().Be(killCursorsStartedCommand["cursors"][0].AsInt64); - cursorId.Should().Be(killCursorsCommandSucceededEvent.Reply["cursorsKilled"][0].AsInt64); - } - } - } -} diff --git a/tests/MongoDB.Driver.Tests/Specifications/auth/OidcAuthenticationProseTests.cs b/tests/MongoDB.Driver.Tests/Specifications/auth/OidcAuthenticationProseTests.cs index d2705c2b265..f42bde30483 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/auth/OidcAuthenticationProseTests.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/auth/OidcAuthenticationProseTests.cs @@ -473,6 +473,36 @@ public async Task Speculative_authentication_should_be_ignored_on_reauthenticati eventCapturer.Next().Should().BeOfType<CommandSucceededEvent>(); } + // 4.5 Reauthentication Succeeds when a Session is involved + // https://github.com/mongodb/specifications/blob/668992950d975d3163e538849dd20383a214fc37/source/auth/tests/mongodb-oidc.md?plain=1#L235 + [Theory] + [ParameterAttributeData] + public async Task Reauthentication_Succeeds_when_Session_involved([Values(false, true)] bool async) + { + EnsureOidcIsConfigured("test"); + + var callbackMock = new Mock<IOidcCallback>(); + // configure mock with valid access token + ConfigureOidcCallback(callbackMock, GetAccessTokenValue()); + var credential = MongoCredential.CreateOidcCredential(callbackMock.Object); + var (collection, client, eventCapturer) = CreateOidcTestObjects(credential); + + using (ConfigureFailPoint(1, (int)ServerErrorCode.ReauthenticationRequired, "find")) + { + var session = client.StartSession(); + _ = async + ? await collection.FindAsync(session, Builders<BsonDocument>.Filter.Empty) + : collection.FindSync(session, Builders<BsonDocument>.Filter.Empty); + } + + VerifyCallbackUsage(callbackMock, async, Times.Exactly(2)); + eventCapturer.Count.Should().Be(4); + eventCapturer.Next().Should().BeOfType<CommandStartedEvent>(); + eventCapturer.Next().Should().BeOfType<CommandSucceededEvent>(); + eventCapturer.Next().Should().BeOfType<CommandStartedEvent>(); + eventCapturer.Next().Should().BeOfType<CommandSucceededEvent>(); + } + // 5.1 Azure With No Username // https://github.com/mongodb/specifications/blob/1448ba6eedfa2f16584222e683b427bea07bb085/source/auth/tests/mongodb-oidc.md?plain=1#L212 [Theory] diff --git a/tests/MongoDB.Driver.Tests/Specifications/change-streams/prose-tests/ChangeStreamProseTests.cs b/tests/MongoDB.Driver.Tests/Specifications/change-streams/prose-tests/ChangeStreamProseTests.cs index dcbfbc82cfe..69beb58fa4f 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/change-streams/prose-tests/ChangeStreamProseTests.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/change-streams/prose-tests/ChangeStreamProseTests.cs @@ -25,6 +25,7 @@ namespace MongoDB.Driver.Tests.Specifications.change_streams.prose_tests { + [Trait("Category", "Integration")] public class ChangeStreamProseTests : LoggableTestClass { public ChangeStreamProseTests(ITestOutputHelper testOutputHelper) diff --git a/tests/MongoDB.Driver.Tests/Specifications/client-side-encryption/ClientSideEncryptionTestRunner.cs b/tests/MongoDB.Driver.Tests/Specifications/client-side-encryption/ClientSideEncryptionTestRunner.cs index 29cf6333a79..73456469bd3 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/client-side-encryption/ClientSideEncryptionTestRunner.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/client-side-encryption/ClientSideEncryptionTestRunner.cs @@ -28,7 +28,7 @@ namespace MongoDB.Driver.Tests.Specifications.client_side_encryption { [Trait("Category", "CSFLE")] - [Trait("Category", "Serverless")] + [Trait("Category", "Integration")] public class ClientSideEncryptionTestRunner : MongoClientJsonDrivenTestRunnerBase { #region static @@ -39,6 +39,7 @@ public class ClientSideEncryptionTestRunner : MongoClientJsonDrivenTestRunnerBas public ClientSideEncryptionTestRunner(ITestOutputHelper testOutputHelper) : base(testOutputHelper) { + CoreTestConfiguration.SkipMongocryptdTests_SERVER_106469(true); } [Theory] diff --git a/tests/MongoDB.Driver.Tests/Specifications/client-side-encryption/EncryptionTestHelper.cs b/tests/MongoDB.Driver.Tests/Specifications/client-side-encryption/EncryptionTestHelper.cs index 11b75eae163..5225efd1429 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/client-side-encryption/EncryptionTestHelper.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/client-side-encryption/EncryptionTestHelper.cs @@ -337,12 +337,20 @@ public static IReadOnlyDictionary<string, IReadOnlyDictionary<string, object>> P } else { + // AWS KMS provider uses temporary credentials when a sessionToken is present. + // The sessionToken field is exclusive to temporary credentials in AWS authentication. + var effectiveProviderNameForEnv = kmsProvider.Name; + if (effectiveProviderNameForEnv == "aws" && kmsProviderDocument.Contains("sessionToken")) + { + effectiveProviderNameForEnv = "awsTemporary"; + } + foreach (var providedKmsInfo in kmsProviderDocument) { kmsOptions.Add( providedKmsInfo.Name, IsPlaceholder(providedKmsInfo.Value) - ? GetFromEnvVariables(kmsProvider.Name, providedKmsInfo.Name) // use initial kms name + ? GetFromEnvVariables(effectiveProviderNameForEnv, providedKmsInfo.Name) : providedKmsInfo.Value.AsString); } } diff --git a/tests/MongoDB.Driver.Tests/Specifications/client-side-encryption/prose-tests/ClientEncryptionProseTests.cs b/tests/MongoDB.Driver.Tests/Specifications/client-side-encryption/prose-tests/ClientEncryptionProseTests.cs index f050c834cca..d175b78c66a 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/client-side-encryption/prose-tests/ClientEncryptionProseTests.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/client-side-encryption/prose-tests/ClientEncryptionProseTests.cs @@ -55,6 +55,7 @@ namespace MongoDB.Driver.Tests.Specifications.client_side_encryption.prose_tests { [Trait("Category", "CSFLE")] + [Trait("Category", "Integration")] public class ClientEncryptionProseTests : LoggableTestClass { #region static @@ -85,6 +86,8 @@ public ClientEncryptionProseTests(ITestOutputHelper testOutputHelper) : base(testOutputHelper) { _cluster = CoreTestConfiguration.Cluster; + + CoreTestConfiguration.SkipMongocryptdTests_SERVER_106469(checkForSharedLib: true); } // public methods @@ -418,7 +421,7 @@ public void BypassSpawningMongocryptdViaMongocryptdBypassSpawnTest( var coll = GetCollection(clientEncrypted, __collCollectionNamespace); var exception = Record.Exception(() => Insert(coll, async, new BsonDocument("encrypted", "test"))); - AssertInnerEncryptionException<TimeoutException>(exception, "A timeout occurred after 10000ms selecting a server"); + AssertInnerEncryptionExceptionRegex<TimeoutException>(exception, "A timeout occurred after \\d+ms selecting a server"); } } @@ -446,7 +449,7 @@ public void BypassSpawningMongocryptdTest( var exception = Record.Exception(() => adminDatabase.RunCommand<BsonDocument>(legacyHelloCommand)); exception.Should().BeOfType<TimeoutException>(); - exception.Message.Should().Contain("A timeout occurred after 1000ms selecting a server").And.Contain("localhost:27021"); + exception.Message.Should().MatchRegex(@".*A timeout occurred after \d+ms selecting a server.*").And.Contain("localhost:27021"); } IMongoClient EnsureEnvironmentAndConfigureTestClientEncrypted() @@ -2181,7 +2184,7 @@ public async Task RangeExplicitEncryptionTest( [Range(1, 8)] int testCase, // test case rangeType values correspond to keys used in test configuration files [Values("DecimalNoPrecision", "DecimalPrecision", "DoubleNoPrecision", "DoublePrecision", "Date", "Int", "Long")] string rangeType, - [Values(false, false)] bool async) + [Values(true, false)] bool async) { // CSHARP-4606: Skip all fle2v2 tests on Mac until https://jira.mongodb.org/browse/SERVER-69563 propagates to EG Macs. RequirePlatform.Check().SkipWhen(SupportedOperatingSystem.MacOS); @@ -2727,6 +2730,222 @@ PipelineDefinition<BsonDocument, BsonDocument> CreatePipeline(string pipelineJso } } + // https://github.com/mongodb/specifications/blob/master/source/client-side-encryption/tests/README.md#27-text-explicit-encryption + [Theory] + [ParameterAttributeData] + public void TextExplicitEncryptionTest( + [Range(1, 7)] int testCase, + [Values(true, false)] bool async) + { + RequireServer.Check() + .Supports(Feature.Csfle2QEv2TextPreviewAlgorithm) + .ClusterTypes(ClusterType.ReplicaSet, ClusterType.Sharded, ClusterType.LoadBalanced); + + var prefixSuffixCollectionNamespace = new CollectionNamespace("db", "prefix-suffix"); + var substringCollectionNamespace = new CollectionNamespace("db", "substring"); + + using var keyVaultClient = ConfigureClient(); + + DropAndCreateCollection(keyVaultClient, prefixSuffixCollectionNamespace, encryptedFields: JsonFileReader.Instance.Documents["etc.data.encryptedFields-prefix-suffix.json"]); + DropAndCreateCollection(keyVaultClient, substringCollectionNamespace, encryptedFields: JsonFileReader.Instance.Documents["etc.data.encryptedFields-substring.json"]); + + var key1Document = JsonFileReader.Instance.Documents["etc.data.keys.key1-document.json"]; + var key1Id = key1Document["_id"].AsGuid; + + var keyVaultCollection = GetCollection(keyVaultClient, __keyVaultCollectionNamespace); + Insert(keyVaultCollection, async, key1Document); + + var encryptOptions = new EncryptOptions(EncryptionAlgorithm.TextPreview, keyId: key1Id, contentionFactor: 0); + + using (var clientEncryption = ConfigureClientEncryption(keyVaultClient, kmsProviderFilter: "local")) + using (var encryptedClient = ConfigureClientEncrypted(kmsProviderFilter: "local", bypassQueryAnalysis: true)) + { + var prefixSuffixCollection = GetCollection(encryptedClient, prefixSuffixCollectionNamespace); + var substringCollection = GetCollection(encryptedClient, substringCollectionNamespace); + + var valueToEncrypt = "foobarbaz"; + + var encryptedText = ExplicitEncrypt( + clientEncryption, + encryptOptions.With(textOptions: new TextOptions( + true, + true, + new PrefixOptions(10, 2), + suffixOptions: new SuffixOptions(10, 2))), + valueToEncrypt, + async); + + Insert(prefixSuffixCollection, async, new BsonDocument { { "_id", 0 }, { "encryptedText", encryptedText } }); + + encryptedText = ExplicitEncrypt( + clientEncryption, + encryptOptions.With(textOptions: new TextOptions( + true, + true, + substringOptions: new SubstringOptions(10, 10, 2))), + valueToEncrypt, + async); + + Insert(substringCollection, async, new BsonDocument { { "_id", 0 }, { "encryptedText", encryptedText } }); + + RunTestCase(clientEncryption, prefixSuffixCollection, substringCollection); + } + + void RunTestCase(ClientEncryption clientEncryption, IMongoCollection<BsonDocument> prefixSuffixCollection, IMongoCollection<BsonDocument> substringCollection) + { + switch (testCase) + { + case 1: // can find a document by prefix + { + var encryptedFoo = ExplicitEncrypt( + clientEncryption, + encryptOptions.With(queryType: "prefixPreview", textOptions: new TextOptions( + true, + true, + new PrefixOptions(10, 2))), + "foo", + async); + + var filter = CreateFindFilter("$encStrStartsWith", "encryptedText", encryptedFoo); + + var findResult = Find(prefixSuffixCollection, filter, async).Single(); + findResult["encryptedText"].AsString.Should().Be("foobarbaz"); + break; + } + case 2: // can find a document by suffix + { + var encryptedBaz = ExplicitEncrypt( + clientEncryption, + encryptOptions.With(queryType: "suffixPreview", textOptions: new TextOptions( + true, + true, + suffixOptions: new SuffixOptions(10, 2))), + "baz", + async); + + var filter = CreateFindFilter("$encStrEndsWith", "encryptedText", encryptedBaz); + + var findResult = Find(prefixSuffixCollection, filter, async).Single(); + findResult["encryptedText"].AsString.Should().Be("foobarbaz"); + break; + } + case 3: // assert no document found by prefix + { + var encryptedBaz = ExplicitEncrypt( + clientEncryption, + encryptOptions.With(queryType: "prefixPreview", textOptions: new TextOptions( + true, + true, + new PrefixOptions(10, 2))), + "baz", + async); + + var filter = CreateFindFilter("$encStrStartsWith", "encryptedText", encryptedBaz); + + var findResult = Find(prefixSuffixCollection, filter, async).ToList(); + findResult.Should().BeEmpty(); + break; + } + case 4: // assert no document found by suffix + { + var encryptedFoo = ExplicitEncrypt( + clientEncryption, + encryptOptions.With(queryType: "suffixPreview", textOptions: new TextOptions( + true, + true, + suffixOptions: new SuffixOptions(10, 2))), + "foo", + async); + + var filter = CreateFindFilter("$encStrEndsWith", "encryptedText", encryptedFoo); + + var findResult = Find(prefixSuffixCollection, filter, async).ToList(); + findResult.Should().BeEmpty(); + break; + } + case 5: // can find a document by substring + { + var encryptedBar = ExplicitEncrypt( + clientEncryption, + encryptOptions.With(queryType: "substringPreview", textOptions: new TextOptions( + true, + true, + substringOptions: new SubstringOptions(10, 10, 2))), + "bar", + async); + + var filter = CreateFindFilter("$encStrContains", "encryptedText", encryptedBar); + + var findResult = Find(substringCollection, filter, async).Single(); + findResult["encryptedText"].AsString.Should().Be("foobarbaz"); + break; + } + case 6: // assert no document found by substring + { + var encryptedQux = ExplicitEncrypt( + clientEncryption, + encryptOptions.With(queryType: "substringPreview", textOptions: new TextOptions( + true, + true, + substringOptions: new SubstringOptions(10, 10, 2))), + "qux", + async); + + var filter = CreateFindFilter("$encStrContains", "encryptedText", encryptedQux); + + var findResult = Find(substringCollection, filter, async).ToList(); + findResult.Should().BeEmpty(); + break; + } + case 7: // assert contentionFactor is required + { + var exception = Record.Exception(() => + { + ExplicitEncrypt( + clientEncryption, + encryptOptions.With(contentionFactor: null, queryType: "prefixPreview", textOptions: new TextOptions( + true, + true, + new PrefixOptions(10, 2))), + "foo", + async); + }); + + exception.Should().BeOfType<MongoEncryptionException>() + .Which.Message.Should().Contain("contention factor is required for textPreview algorithm"); + break; + } + default: throw new Exception($"Unexpected test case {testCase}."); + } + } + + BsonDocument CreateFindFilter(string operation, string fieldName, BsonValue encryptedValue) => + new() + { + { + "$expr", new BsonDocument + { + { + $"{operation}", new BsonDocument + { + { "input", $"${fieldName}" }, + { "prefix", encryptedValue, operation == "$encStrStartsWith" }, + { "substring", encryptedValue, operation == "$encStrContains" }, + { "suffix", encryptedValue, operation == "$encStrEndsWith" } + } + } + } + } + }; + + void DropAndCreateCollection(IMongoClient client, CollectionNamespace collectionNamespace, BsonDocument encryptedFields) + { + var db = client.GetDatabase(collectionNamespace.DatabaseNamespace.DatabaseName, new MongoDatabaseSettings{ WriteConcern = WriteConcern.WMajority }); + db.DropCollection(collectionNamespace.CollectionName, new DropCollectionOptions { EncryptedFields = encryptedFields }); + db.CreateCollection(collectionNamespace.CollectionName, new CreateCollectionOptions { EncryptedFields = encryptedFields }); + } + } + [Theory] [ParameterAttributeData] public void ViewAreProhibitedTest([Values(false, true)] bool async) @@ -2901,6 +3120,10 @@ private void AssertInnerEncryptionException<TInnerException>(Exception ex, strin where TInnerException : Exception => AssertInnerEncryptionException<TInnerException>(ex, e => e.Message.Should().Contain(exceptionMessageContains)); + private void AssertInnerEncryptionExceptionRegex<TInnerException>(Exception ex, string exceptionMessageRegex) + where TInnerException : Exception + => AssertInnerEncryptionException<TInnerException>(ex, e => e.Message.Should().MatchRegex(exceptionMessageRegex)); + private IMongoClient ConfigureClient( bool clearCollections = true, int? maxPoolSize = null, @@ -3244,7 +3467,7 @@ private void DropCollection(CollectionNamespace collectionNamespace, BsonDocumen using (var binding = new WritableServerBinding(_cluster, session.Fork())) using (var bindingHandle = new ReadWriteBindingHandle(binding)) { - operation.Execute(bindingHandle, CancellationToken.None); + operation.Execute(OperationContext.NoTimeout, bindingHandle); } } diff --git a/tests/MongoDB.Driver.Tests/Specifications/connection-monitoring-and-pooling/ConnectionMonitoringAndPoolingTestRunner.cs b/tests/MongoDB.Driver.Tests/Specifications/connection-monitoring-and-pooling/ConnectionMonitoringAndPoolingTestRunner.cs index f7b1b28c512..6da8fcb73af 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/connection-monitoring-and-pooling/ConnectionMonitoringAndPoolingTestRunner.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/connection-monitoring-and-pooling/ConnectionMonitoringAndPoolingTestRunner.cs @@ -44,6 +44,7 @@ namespace MongoDB.Driver.Tests.Specifications.connection_monitoring_and_pooling { [Trait("Category", "Pool")] + [Trait("Category", "Integration")] public class ConnectionMonitoringAndPoolingTestRunner { #region static @@ -82,12 +83,6 @@ private static class Schema public readonly static string ignore = nameof(ignore); public readonly static string async = nameof(async); - public static class Operations - { - public const string runOn = nameof(runOn); - public readonly static string failPoint = nameof(failPoint); - } - public static class Intergration { public readonly static string runOn = nameof(runOn); @@ -100,12 +95,6 @@ public static class Styles public readonly static string integration = nameof(integration); } - public sealed class FailPoint - { - public readonly static string appName = nameof(appName); - public readonly static string data = nameof(data); - } - public readonly static string[] AllFields = new[] { _path, @@ -408,18 +397,9 @@ private void ExecuteCheckOut( void CheckOut(BsonDocument op, IConnectionPool cp, ConcurrentDictionary<string, IConnection> cm) { - IConnection conn; - if (async) - { - conn = cp - .AcquireConnectionAsync(CancellationToken.None) - .GetAwaiter() - .GetResult(); - } - else - { - conn = cp.AcquireConnection(CancellationToken.None); - } + var conn = async ? + cp.AcquireConnectionAsync(OperationContext.NoTimeout).GetAwaiter().GetResult() : + cp.AcquireConnection(OperationContext.NoTimeout); if (op.TryGetValue("label", out var label)) { @@ -679,7 +659,7 @@ private void ParseSettings( connectionIdLocalValueProvider: connectionIdProvider)) .Subscribe(eventCapturer)); - var server = cluster.SelectServer(WritableServerSelector.Instance, CancellationToken.None); + var server = cluster.SelectServer(OperationContext.NoTimeout, WritableServerSelector.Instance); connectionPool = server._connectionPool(); if (test.TryGetValue(Schema.Intergration.failPoint, out var failPointDocument)) @@ -737,7 +717,7 @@ o is ServerHeartbeatSucceededEvent || eventCapturer.WaitForOrThrowIfTimeout(events => events.Any(e => e is ConnectionPoolClearedEvent), TimeSpan.FromMilliseconds(500)); } - var failPointServer = CoreTestConfiguration.Cluster.SelectServer(new EndPointServerSelector(server.EndPoint), default); + var failPointServer = CoreTestConfiguration.Cluster.SelectServer(OperationContext.NoTimeout, new EndPointServerSelector(server.EndPoint)); failPoint = FailPoint.Configure(failPointServer, NoCoreSession.NewHandle(), failPointDocument.AsBsonDocument, withAsync: async); if (resetPool) @@ -753,33 +733,6 @@ o is ServerHeartbeatSucceededEvent || return (connectionPool, failPoint, cluster, eventsFilter); } - private IConnectionPool SetupConnectionPoolMock(BsonDocument test, IEventSubscriber eventSubscriber) - { - var endPoint = new DnsEndPoint("localhost", 27017); - var serverId = new ServerId(new ClusterId(), endPoint); - ParseSettings(test, out var connectionPoolSettings, out var connectionSettings); - - var connectionFactory = new Mock<IConnectionFactory>(); - var exceptionHandler = new Mock<IConnectionExceptionHandler>(); - connectionFactory.Setup(f => f.ConnectionSettings).Returns(() => new ConnectionSettings()); - connectionFactory - .Setup(c => c.CreateConnection(serverId, endPoint)) - .Returns(() => - { - var connection = new MockConnection(serverId, connectionSettings, eventSubscriber); - return connection; - }); - var connectionPool = new ExclusiveConnectionPool( - serverId, - endPoint, - connectionPoolSettings, - connectionFactory.Object, - exceptionHandler.Object, - eventSubscriber.ToEventLogger<LogCategories.Connection>()); - - return connectionPool; - } - private void Start(BsonDocument operation, ConcurrentDictionary<string, Task> tasks) { var startTarget = operation.GetValue("target").ToString(); @@ -863,6 +816,14 @@ public static ServerId ServerId(this object @event) internal static class IServerReflector { - public static IConnectionPool _connectionPool(this IServer server) => (IConnectionPool)Reflector.GetFieldValue(server, nameof(_connectionPool)); + public static IConnectionPool _connectionPool(this IServer server) + { + if (server is SelectedServer) + { + server = (IServer)Reflector.GetFieldValue(server, "_server"); + } + + return (IConnectionPool)Reflector.GetFieldValue(server, nameof(_connectionPool)); + } } } diff --git a/tests/MongoDB.Driver.Tests/Specifications/crud/CrudTestRunner.cs b/tests/MongoDB.Driver.Tests/Specifications/crud/CrudTestRunner.cs index acdf4c75f80..b1e22833dc1 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/crud/CrudTestRunner.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/crud/CrudTestRunner.cs @@ -31,7 +31,7 @@ namespace MongoDB.Driver.Tests.Specifications.crud { - [Trait("Category", "Serverless")] + [Trait("Category", "Integration")] public class CrudTestRunner : LoggableTestClass { #region static @@ -312,11 +312,7 @@ private void SkipTestIfNeeded(BsonDocument definition, BsonDocument test) if (definition.TryGetValue("serverless", out var serverless)) { - if (serverless == "forbid") - { - RequireServer.Check().Serverless(false); - } - else + if (serverless != "forbid") { throw new FormatException($"Invalid serverless field value: '{serverless.AsString}'."); } diff --git a/tests/MongoDB.Driver.Tests/Specifications/crud/prose-tests/CrudProseTests.cs b/tests/MongoDB.Driver.Tests/Specifications/crud/prose-tests/CrudProseTests.cs index 41987fcfc3e..c49f577859b 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/crud/prose-tests/CrudProseTests.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/crud/prose-tests/CrudProseTests.cs @@ -33,7 +33,7 @@ namespace MongoDB.Driver.Tests.Specifications.crud.prose_tests { - [Trait("Category", "Serverless")] + [Trait("Category", "Integration")] public class CrudProseTests : LoggableTestClass { // public constructors @@ -160,7 +160,7 @@ BsonDocument GetExpectedWriteErrorDetails(ObjectId objectId) [ParameterAttributeData] public async Task MongoClient_bulkWrite_splits_batches_on_maxWriteBatchSize([Values(true, false)]bool async) { - RequireServer.Check().Supports(Feature.ClientBulkWrite).Serverless(false); + RequireServer.Check().Supports(Feature.ClientBulkWrite); var maxBatchCount = DriverTestConfiguration.GetConnectionDescription().MaxBatchCount; var models = Enumerable .Range(0, maxBatchCount + 1) @@ -184,7 +184,7 @@ public async Task MongoClient_bulkWrite_splits_batches_on_maxWriteBatchSize([Val [ParameterAttributeData] public async Task MongoClient_bulkWrite_splits_batches_on_maxMessageSizeBytes([Values(true, false)]bool async) { - RequireServer.Check().Supports(Feature.ClientBulkWrite).Serverless(false); + RequireServer.Check().Supports(Feature.ClientBulkWrite); var connectionDescription = DriverTestConfiguration.GetConnectionDescription(); var maxDocumentSize = connectionDescription.MaxDocumentSize; var maxMessageSize = connectionDescription.MaxMessageSize; @@ -212,7 +212,7 @@ public async Task MongoClient_bulkWrite_splits_batches_on_maxMessageSizeBytes([V [ParameterAttributeData] public async Task MongoClient_bulkWrite_collects_WriteConcernError_across_batches([Values(true, false)]bool async) { - RequireServer.Check().Supports(Feature.ClientBulkWrite).Serverless(false); + RequireServer.Check().Supports(Feature.ClientBulkWrite); var maxBatchCount = DriverTestConfiguration.GetConnectionDescription().MaxBatchCount; const string failPointCommand = @" { @@ -260,7 +260,7 @@ public async Task MongoClient_bulkWrite_handles_individual_WriteError_across_bat [Values(true, false)] bool async, [Values(true, false)] bool ordered) { - RequireServer.Check().Supports(Feature.ClientBulkWrite).Serverless(false); + RequireServer.Check().Supports(Feature.ClientBulkWrite); var maxBatchCount = DriverTestConfiguration.GetConnectionDescription().MaxBatchCount; var model = new BsonDocument { { "_id", 1 } }; var models = Enumerable @@ -294,7 +294,7 @@ public async Task MongoClient_bulkWrite_handles_cursor_requiring_getMore( [Values(true, false)] bool async, [Values(true, false)] bool isInTransaction) { - RequireServer.Check().Supports(Feature.ClientBulkWrite).Serverless(false); + RequireServer.Check().Supports(Feature.ClientBulkWrite); if (isInTransaction) { RequireServer.Check() @@ -356,7 +356,7 @@ public async Task MongoClient_bulkWrite_handles_cursor_requiring_getMore( [ParameterAttributeData] public async Task MongoClient_bulkWrite_handles_getMore_error([Values(true, false)] bool async) { - RequireServer.Check().Supports(Feature.ClientBulkWrite).Serverless(false); + RequireServer.Check().Supports(Feature.ClientBulkWrite); var maxDocumentSize = DriverTestConfiguration.GetConnectionDescription().MaxDocumentSize; const string failPointCommand = @" @@ -419,7 +419,7 @@ public async Task MongoClient_bulkWrite_handles_getMore_error([Values(true, fals // [Values(true, false)] bool async, // [Values(true, false)] bool isBatchSplit) // { - // RequireServer.Check().Supports(Feature.ClientBulkWrite).Serverless(false); + // RequireServer.Check().Supports(Feature.ClientBulkWrite); // var connectionDescription = DriverTestConfiguration.GetConnectionDescription(); // var maxDocumentSize = connectionDescription.MaxDocumentSize; // var maxMessageSize = connectionDescription.MaxMessageSize; @@ -476,7 +476,7 @@ public async Task MongoClient_bulkWrite_handles_getMore_error([Values(true, fals [ParameterAttributeData] public async Task MongoClient_bulkWrite_throws_if_no_operations_can_be_added_big_document([Values(true, false)]bool async) { - RequireServer.Check().Supports(Feature.ClientBulkWrite).Serverless(false); + RequireServer.Check().Supports(Feature.ClientBulkWrite); var maxMessageSize = DriverTestConfiguration.GetConnectionDescription().MaxMessageSize; var models = new[] @@ -495,6 +495,7 @@ public async Task MongoClient_bulkWrite_throws_if_no_operations_can_be_added_big var bulkWriteException = exception.Should().BeOfType<ClientBulkWriteException>().Subject; bulkWriteException.InnerException.Should().BeOfType<FormatException>(); + bulkWriteException.PartialResult.Should().BeNull(); } // https://github.com/mongodb/specifications/blob/7517681e6a3186cb7f3114314a9fe1bc3a747b9f/source/crud/tests/README.md?plain=1#L602 @@ -502,7 +503,7 @@ public async Task MongoClient_bulkWrite_throws_if_no_operations_can_be_added_big [ParameterAttributeData] public async Task MongoClient_bulkWrite_throws_if_no_operations_can_be_added_big_namespace([Values(true, false)]bool async) { - RequireServer.Check().Supports(Feature.ClientBulkWrite).Serverless(false); + RequireServer.Check().Supports(Feature.ClientBulkWrite); var maxMessageSize = DriverTestConfiguration.GetConnectionDescription().MaxMessageSize; var models = new[] @@ -521,6 +522,7 @@ public async Task MongoClient_bulkWrite_throws_if_no_operations_can_be_added_big var bulkWriteException = exception.Should().BeOfType<ClientBulkWriteException>().Subject; bulkWriteException.InnerException.Should().BeOfType<FormatException>(); + bulkWriteException.PartialResult.Should().BeNull(); } // https://github.com/mongodb/specifications/blob/7517681e6a3186cb7f3114314a9fe1bc3a747b9f/source/crud/tests/README.md?plain=1#L647 @@ -528,7 +530,7 @@ public async Task MongoClient_bulkWrite_throws_if_no_operations_can_be_added_big [ParameterAttributeData] public async Task MongoClient_bulkWrite_throws_if_auto_encryption_configured([Values(true, false)]bool async) { - RequireServer.Check().Supports(Feature.ClientBulkWrite).Serverless(false); + RequireServer.Check().Supports(Feature.ClientBulkWrite); var models = new[] { @@ -559,6 +561,7 @@ public async Task MongoClient_bulkWrite_throws_if_auto_encryption_configured([Va : Record.Exception(() => client.BulkWrite(models)); exception.Should().BeOfType<NotSupportedException>(); + exception.Message.Should().Be("BulkWrite does not currently support automatic encryption."); } // https://github.com/mongodb/specifications/blob/d1bdb68b7b4aec9681ea56d41c8b9a6c1a97d365/source/crud/tests/README.md?plain=1#L699 @@ -566,7 +569,7 @@ public async Task MongoClient_bulkWrite_throws_if_auto_encryption_configured([Va [ParameterAttributeData] public async Task MongoClient_bulkWrite_unacknowledged_write_concern_uses_w0_all_batches([Values(true, false)] bool async) { - RequireServer.Check().Supports(Feature.ClientBulkWrite).Serverless(false); + RequireServer.Check().Supports(Feature.ClientBulkWrite); var connectionDescription = DriverTestConfiguration.GetConnectionDescription(); var maxDocumentSize = connectionDescription.MaxDocumentSize; @@ -671,7 +674,7 @@ public async Task Ensure_generated_ids_are_first_fields_in_document_using_collec [ParameterAttributeData] public async Task Ensure_generated_ids_are_first_fields_in_document_using_client_bulkWrite([Values(true, false)] bool async) { - RequireServer.Check().Supports(Feature.ClientBulkWrite).Serverless(false); + RequireServer.Check().Supports(Feature.ClientBulkWrite); var eventCapturer = new EventCapturer().Capture<CommandStartedEvent>(); using var client = CreateMongoClient(eventCapturer); diff --git a/tests/MongoDB.Driver.Tests/Specifications/initial-dns-seedlist-discovery/InitialDnsSeedlistDiscoveryTestRunner.cs b/tests/MongoDB.Driver.Tests/Specifications/initial-dns-seedlist-discovery/InitialDnsSeedlistDiscoveryTestRunner.cs index f5e2fdac6d5..64f8b1d7482 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/initial-dns-seedlist-discovery/InitialDnsSeedlistDiscoveryTestRunner.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/initial-dns-seedlist-discovery/InitialDnsSeedlistDiscoveryTestRunner.cs @@ -32,6 +32,7 @@ namespace MongoDB.Driver.Tests.Specifications.initial_dns_seedlist_discovery { [Trait("Category", "ConnectionString")] [Trait("Category", "SupportLoadBalancing")] + [Trait("Category", "Integration")] public class InitialDnsSeedlistDiscoveryTestRunner { [Theory] diff --git a/tests/MongoDB.Driver.Tests/Specifications/mongodb-handshake/MongoDbHandshakeProseTests.cs b/tests/MongoDB.Driver.Tests/Specifications/mongodb-handshake/MongoDbHandshakeProseTests.cs index 8117f67bcc5..7e56d4f070b 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/mongodb-handshake/MongoDbHandshakeProseTests.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/mongodb-handshake/MongoDbHandshakeProseTests.cs @@ -13,6 +13,7 @@ * limitations under the License. */ +using System.IO; using System.Net; using System.Threading; using System.Threading.Tasks; @@ -46,6 +47,13 @@ public async Task DriverAcceptsArbitraryAuthMechanism([Values(false, true)] bool { var capturedEvents = new EventCapturer(); var mockStreamFactory = new Mock<IStreamFactory>(); + using var stream = new MemoryStream(); + mockStreamFactory + .Setup(s => s.CreateStream(It.IsAny<EndPoint>(), It.IsAny<CancellationToken>())) + .Returns(stream); + mockStreamFactory + .Setup(s => s.CreateStreamAsync(It.IsAny<EndPoint>(), It.IsAny<CancellationToken>())) + .ReturnsAsync(stream); var endPoint = new DnsEndPoint("localhost", 27017); var serverId = new ServerId(new ClusterId(), endPoint); var connectionId = new ConnectionId(serverId); @@ -56,16 +64,16 @@ public async Task DriverAcceptsArbitraryAuthMechanism([Values(false, true)] bool var mockConnectionInitializer = new Mock<IConnectionInitializer>(); mockConnectionInitializer - .Setup(i => i.SendHello(It.IsAny<IConnection>(), CancellationToken.None)) + .Setup(i => i.SendHello(It.IsAny<OperationContext>(), It.IsAny<IConnection>())) .Returns(connectionInitializerContext); mockConnectionInitializer - .Setup(i => i.Authenticate(It.IsAny<IConnection>(), It.IsAny<ConnectionInitializerContext>(), CancellationToken.None)) + .Setup(i => i.Authenticate(It.IsAny<OperationContext>(), It.IsAny<IConnection>(), It.IsAny<ConnectionInitializerContext>())) .Returns(connectionInitializerContextAfterAuthentication); mockConnectionInitializer - .Setup(i => i.SendHelloAsync(It.IsAny<IConnection>(), CancellationToken.None)) + .Setup(i => i.SendHelloAsync(It.IsAny<OperationContext>(), It.IsAny<IConnection>())) .ReturnsAsync(connectionInitializerContext); mockConnectionInitializer - .Setup(i => i.AuthenticateAsync(It.IsAny<IConnection>(), It.IsAny<ConnectionInitializerContext>(), CancellationToken.None)) + .Setup(i => i.AuthenticateAsync(It.IsAny<OperationContext>(), It.IsAny<IConnection>(), It.IsAny<ConnectionInitializerContext>())) .ReturnsAsync(connectionInitializerContextAfterAuthentication); using var subject = new BinaryConnection( @@ -75,15 +83,17 @@ public async Task DriverAcceptsArbitraryAuthMechanism([Values(false, true)] bool streamFactory: mockStreamFactory.Object, connectionInitializer: mockConnectionInitializer.Object, eventSubscriber: capturedEvents, - LoggerFactory); + LoggerFactory, + socketReadTimeout: Timeout.InfiniteTimeSpan, + socketWriteTimeout: Timeout.InfiniteTimeSpan); if (async) { - await subject.OpenAsync(CancellationToken.None); + await subject.OpenAsync(OperationContext.NoTimeout); } else { - subject.Open(CancellationToken.None); + subject.Open(OperationContext.NoTimeout); } subject._state().Should().Be(3); // 3 - open. diff --git a/tests/MongoDB.Driver.Tests/Specifications/retryable-reads/RetryableReadsProseTests.cs b/tests/MongoDB.Driver.Tests/Specifications/retryable-reads/RetryableReadsProseTests.cs index 9738aab1c95..e95cf102944 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/retryable-reads/RetryableReadsProseTests.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/retryable-reads/RetryableReadsProseTests.cs @@ -33,6 +33,7 @@ namespace MongoDB.Driver.Tests.Specifications.retryable_reads { + [Trait("Category", "Integration")] public class RetryableReadsProseTests { [Theory] @@ -78,7 +79,7 @@ public async Task PoolClearedError_read_retryablity_test([Values(true, false)] b .Capture<ConnectionPoolCheckingOutConnectionFailedEvent>() .CaptureCommandEvents("find"); - var failpointServer = DriverTestConfiguration.Client.GetClusterInternal().SelectServer(failPointSelector, default); + var failpointServer = DriverTestConfiguration.Client.GetClusterInternal().SelectServer(OperationContext.NoTimeout, failPointSelector); using var failPoint = FailPoint.Configure(failpointServer, NoCoreSession.NewHandle(), failPointCommand); using var client = CreateClient(settings, eventCapturer, heartbeatInterval); @@ -145,8 +146,8 @@ public void Sharded_cluster_retryable_reads_are_retried_on_different_mongos_if_a }, useMultipleShardRouters: true); - var failPointServer1 = client.GetClusterInternal().SelectServer(new EndPointServerSelector(client.Cluster.Description.Servers[0].EndPoint), default); - var failPointServer2 = client.GetClusterInternal().SelectServer(new EndPointServerSelector(client.Cluster.Description.Servers[1].EndPoint), default); + var failPointServer1 = client.GetClusterInternal().SelectServer(OperationContext.NoTimeout, new EndPointServerSelector(client.Cluster.Description.Servers[0].EndPoint)); + var failPointServer2 = client.GetClusterInternal().SelectServer(OperationContext.NoTimeout, new EndPointServerSelector(client.Cluster.Description.Servers[1].EndPoint)); using var failPoint1 = FailPoint.Configure(failPointServer1, NoCoreSession.NewHandle(), failPointCommand); using var failPoint2 = FailPoint.Configure(failPointServer2, NoCoreSession.NewHandle(), failPointCommand); @@ -195,7 +196,7 @@ public void Sharded_cluster_retryable_reads_are_retried_on_same_mongos_if_no_oth }, useMultipleShardRouters: false); - var failPointServer = client.GetClusterInternal().SelectServer(new EndPointServerSelector(client.Cluster.Description.Servers[0].EndPoint), default); + var failPointServer = client.GetClusterInternal().SelectServer(OperationContext.NoTimeout, new EndPointServerSelector(client.Cluster.Description.Servers[0].EndPoint)); using var failPoint = FailPoint.Configure(failPointServer, NoCoreSession.NewHandle(), failPointCommand); diff --git a/tests/MongoDB.Driver.Tests/Specifications/retryable-writes/prose-tests/MMapV1Tests.cs b/tests/MongoDB.Driver.Tests/Specifications/retryable-writes/prose-tests/MMapV1Tests.cs deleted file mode 100644 index daf40a50152..00000000000 --- a/tests/MongoDB.Driver.Tests/Specifications/retryable-writes/prose-tests/MMapV1Tests.cs +++ /dev/null @@ -1,73 +0,0 @@ -/* Copyright 2020-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -using System; -using FluentAssertions; -using MongoDB.Bson; -using MongoDB.Driver.Core.Clusters; -using MongoDB.Driver.Core.TestHelpers.Logging; -using MongoDB.Driver.Core.TestHelpers.XunitExtensions; -using MongoDB.TestHelpers.XunitExtensions; -using Xunit; -using Xunit.Abstractions; - -namespace MongoDB.Driver.Tests.Specifications.retryable_writes.prose_tests -{ - public class MMapV1Tests : LoggableTestClass - { - public MMapV1Tests(ITestOutputHelper output) : base(output) - { - } - - [Theory] - [ParameterAttributeData] - public void Write_operation_should_throw_when_retry_writes_is_true_and_storage_engine_is_MMMAPv1( - [Values(false, true)] bool async) - { - RequireServer.Check() - .ClusterType(ClusterType.ReplicaSet) - .StorageEngine("mmapv1"); - - using (var client = CreateIMongoClient()) - { - var database = client.GetDatabase(DriverTestConfiguration.DatabaseNamespace.DatabaseName); - var collection = database.GetCollection<BsonDocument>(DriverTestConfiguration.CollectionNamespace.CollectionName); - database.DropCollection(collection.CollectionNamespace.CollectionName); - - var document = new BsonDocument("_id", 1); - Exception exception; - if (async) - { - exception = Record.Exception(() => collection.InsertOneAsync(document).GetAwaiter().GetResult()); - } - else - { - exception = Record.Exception(() => collection.InsertOne(document)); - } - - var e = exception.Should().BeOfType<MongoCommandException>().Subject; - e.Message.Should().Be("This MongoDB deployment does not support retryable writes. Please add retryWrites=false to your connection string."); - } - } - - // private methods - private IMongoClient CreateIMongoClient() => - DriverTestConfiguration.CreateMongoClient(s => - { - s.RetryWrites = true; - s.LoggingSettings = LoggingSettings; - }); - } -} diff --git a/tests/MongoDB.Driver.Tests/Specifications/retryable-writes/prose-tests/PoolClearRetryability.cs b/tests/MongoDB.Driver.Tests/Specifications/retryable-writes/prose-tests/PoolClearRetryability.cs index b3dca6097a8..78949576f42 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/retryable-writes/prose-tests/PoolClearRetryability.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/retryable-writes/prose-tests/PoolClearRetryability.cs @@ -28,12 +28,12 @@ using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.TestHelpers; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; -using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; namespace MongoDB.Driver.Tests.Specifications.retryable_writes.prose_tests { + [Trait("Category", "Integration")] public class PoolClearRetryability { [Theory] @@ -82,7 +82,7 @@ public async Task PoolClearedError_write_retryablity_test([Values(false, true)] .Capture<ConnectionPoolCheckingOutConnectionFailedEvent>() .CaptureCommandEvents("insert"); - var failpointServer = DriverTestConfiguration.Client.GetClusterInternal().SelectServer(failPointSelector, default); + var failpointServer = DriverTestConfiguration.Client.GetClusterInternal().SelectServer(OperationContext.NoTimeout, failPointSelector); using var failPoint = FailPoint.Configure(failpointServer, NoCoreSession.NewHandle(), failPointCommand); using var client = CreateClient(settings, eventCapturer, heartbeatInterval); diff --git a/tests/MongoDB.Driver.Tests/Specifications/retryable-writes/prose-tests/RetryWriteOnOtherMongos.cs b/tests/MongoDB.Driver.Tests/Specifications/retryable-writes/prose-tests/RetryWriteOnOtherMongos.cs index 9fcb7357c4c..c7424f38880 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/retryable-writes/prose-tests/RetryWriteOnOtherMongos.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/retryable-writes/prose-tests/RetryWriteOnOtherMongos.cs @@ -28,6 +28,7 @@ namespace MongoDB.Driver.Tests.Specifications.retryable_writes.prose_tests { + [Trait("Category", "Integration")] public class RetryWriteOnOtherMongos { [Fact] @@ -60,8 +61,8 @@ public void Sharded_cluster_retryable_writes_are_retried_on_different_mongos_if_ }, useMultipleShardRouters: true); - var failPointServer1 = client.GetClusterInternal().SelectServer(new EndPointServerSelector(client.Cluster.Description.Servers[0].EndPoint), default); - var failPointServer2 = client.GetClusterInternal().SelectServer(new EndPointServerSelector(client.Cluster.Description.Servers[1].EndPoint), default); + var failPointServer1 = client.GetClusterInternal().SelectServer(OperationContext.NoTimeout, new EndPointServerSelector(client.Cluster.Description.Servers[0].EndPoint)); + var failPointServer2 = client.GetClusterInternal().SelectServer(OperationContext.NoTimeout, new EndPointServerSelector(client.Cluster.Description.Servers[1].EndPoint)); using var failPoint1 = FailPoint.Configure(failPointServer1, NoCoreSession.NewHandle(), failPointCommand); using var failPoint2 = FailPoint.Configure(failPointServer2, NoCoreSession.NewHandle(), failPointCommand); @@ -111,7 +112,7 @@ public void Sharded_cluster_retryable_writes_are_retried_on_same_mongo_if_no_oth }, useMultipleShardRouters: false); - var failPointServer = client.GetClusterInternal().SelectServer(new EndPointServerSelector(client.Cluster.Description.Servers[0].EndPoint), default); + var failPointServer = client.GetClusterInternal().SelectServer(OperationContext.NoTimeout, new EndPointServerSelector(client.Cluster.Description.Servers[0].EndPoint)); using var failPoint = FailPoint.Configure(failPointServer, NoCoreSession.NewHandle(), failPointCommand); diff --git a/tests/MongoDB.Driver.Tests/Specifications/server-discovery-and-monitoring/ServerDiscoveryAndMonitoringProseTests.cs b/tests/MongoDB.Driver.Tests/Specifications/server-discovery-and-monitoring/ServerDiscoveryAndMonitoringProseTests.cs index 2d9d3251d7b..4d61e9c7b60 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/server-discovery-and-monitoring/ServerDiscoveryAndMonitoringProseTests.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/server-discovery-and-monitoring/ServerDiscoveryAndMonitoringProseTests.cs @@ -32,13 +32,13 @@ using MongoDB.Driver.Core.Servers; using MongoDB.Driver.Core.TestHelpers; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; -using MongoDB.Driver.TestHelpers; using Moq; using Xunit; namespace MongoDB.Driver.Tests.Specifications.server_discovery_and_monitoring { [Trait("Category", "SDAM")] + [Trait("Category", "Integration")] public class ServerDiscoveryAndMonitoringProseTests { [Fact] @@ -96,7 +96,7 @@ public void Heartbeat_should_be_emitted_before_connection_open() var mockStream = new Mock<Stream>(); mockStream - .Setup(s => s.Write(It.IsAny<byte[]>(), It.IsAny<int>(), It.IsAny<int>())) + .Setup(s => s.BeginWrite(It.IsAny<byte[]>(), It.IsAny<int>(), It.IsAny<int>(), It.IsAny<AsyncCallback>(), It.IsAny<object>())) .Callback(() => EnqueueEvent(HelloReceivedEvent)) .Throws(new Exception("Stream is closed.")); @@ -165,7 +165,7 @@ public void Monitor_sleep_at_least_minHeartbeatFrequencyMS_between_checks() settings.ApplicationName = appName; settings.ServerSelectionTimeout = TimeSpan.FromSeconds(5); - var server = DriverTestConfiguration.Client.GetClusterInternal().SelectServer(new EndPointServerSelector(new DnsEndPoint(serverAddress.Host, serverAddress.Port)), default); + var server = DriverTestConfiguration.Client.GetClusterInternal().SelectServer(OperationContext.NoTimeout, new EndPointServerSelector(new DnsEndPoint(serverAddress.Host, serverAddress.Port))); using var failPoint = FailPoint.Configure(server, NoCoreSession.NewHandle(), failPointCommand); using var client = DriverTestConfiguration.CreateMongoClient(settings); @@ -220,7 +220,7 @@ public void RoundTimeTrip_test() { // Note that the Server Description Equality rule means that ServerDescriptionChangedEvents will not be published. // So we use reflection to obtain the latest RTT instead. - var server = client.GetClusterInternal().SelectServer(WritableServerSelector.Instance, CancellationToken.None); + var server = client.GetClusterInternal().SelectServer(OperationContext.NoTimeout, WritableServerSelector.Instance); var roundTripTimeMonitor = server._monitor()._roundTripTimeMonitor(); var expectedRoundTripTime = TimeSpan.FromMilliseconds(250); var timeout = TimeSpan.FromSeconds(30); // should not be reached without a driver bug @@ -273,7 +273,7 @@ public void ConnectionPool_cleared_on_failed_hello() eventsWaitTimeout); eventCapturer.Clear(); - var failpointServer = DriverTestConfiguration.Client.GetClusterInternal().SelectServer(new EndPointServerSelector(new DnsEndPoint(serverAddress.Host, serverAddress.Port)), default); + var failpointServer = DriverTestConfiguration.Client.GetClusterInternal().SelectServer(OperationContext.NoTimeout, new EndPointServerSelector(new DnsEndPoint(serverAddress.Host, serverAddress.Port))); using var failPoint = FailPoint.Configure(failpointServer, NoCoreSession.NewHandle(), failPointCommand); eventCapturer.WaitForEventOrThrowIfTimeout<ConnectionPoolReadyEvent>(eventsWaitTimeout); diff --git a/tests/MongoDB.Driver.Tests/Specifications/server-discovery-and-monitoring/ServerDiscoveryAndMonitoringTestRunner.cs b/tests/MongoDB.Driver.Tests/Specifications/server-discovery-and-monitoring/ServerDiscoveryAndMonitoringTestRunner.cs index bb9b59e94e7..9fc61009e7b 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/server-discovery-and-monitoring/ServerDiscoveryAndMonitoringTestRunner.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/server-discovery-and-monitoring/ServerDiscoveryAndMonitoringTestRunner.cs @@ -22,6 +22,7 @@ using MongoDB.Bson.TestHelpers; using MongoDB.Bson.TestHelpers.JsonDrivenTests; using MongoDB.Driver.Core; +using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Clusters; using MongoDB.Driver.Core.Configuration; using MongoDB.Driver.Core.ConnectionPools; @@ -617,6 +618,11 @@ public static IConnectionPool _connectionPool(this Server server) public static IServerMonitor _monitor(this IServer server) { + if (server is SelectedServer) + { + server = (IServer)Reflector.GetFieldValue(server, "_server"); + } + return (IServerMonitor)Reflector.GetFieldValue(server, nameof(_monitor)); } @@ -625,7 +631,7 @@ public static void HandleBeforeHandshakeCompletesException(this Server server, E Reflector.Invoke(server, nameof(HandleBeforeHandshakeCompletesException), ex); } - public static void HandleChannelException(this Server server, IConnection connection, Exception ex) + public static void HandleChannelException(this Server server, IConnectionHandle connection, Exception ex) { Reflector.Invoke(server, nameof(HandleChannelException), connection, ex, checkBaseClass: true); } diff --git a/tests/MongoDB.Driver.Tests/Specifications/server-discovery-and-monitoring/prose-tests/ServerDiscoveryProseTests.cs b/tests/MongoDB.Driver.Tests/Specifications/server-discovery-and-monitoring/prose-tests/ServerDiscoveryProseTests.cs index 38fbadcce4c..928823a8570 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/server-discovery-and-monitoring/prose-tests/ServerDiscoveryProseTests.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/server-discovery-and-monitoring/prose-tests/ServerDiscoveryProseTests.cs @@ -30,6 +30,7 @@ namespace MongoDB.Driver.Tests.Specifications.server_discovery_and_monitoring.prose_tests { + [Trait("Category", "Integration")] public class ServerDiscoveryProseTests : LoggableTestClass { // public constructors diff --git a/tests/MongoDB.Driver.Tests/Specifications/server-selection/InWindowTestRunner.cs b/tests/MongoDB.Driver.Tests/Specifications/server-selection/InWindowTestRunner.cs index 84abad019de..a61e7d6658a 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/server-selection/InWindowTestRunner.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/server-selection/InWindowTestRunner.cs @@ -82,8 +82,8 @@ public void RunTestDefinition(JsonDrivenTestCase testCase) for (int i = 0; i < testData.iterations; i++) { var selectedServer = testData.async - ? cluster.SelectServerAsync(readPreferenceSelector, default).GetAwaiter().GetResult() - : cluster.SelectServer(readPreferenceSelector, default); + ? cluster.SelectServerAsync(OperationContext.NoTimeout, readPreferenceSelector).GetAwaiter().GetResult() + : cluster.SelectServer(OperationContext.NoTimeout, readPreferenceSelector); selectionHistogram[selectedServer.ServerId]++; } @@ -125,7 +125,6 @@ private MultiServerCluster CreateAndSetupCluster(ClusterDescription clusterDescr serverDescriptionDisconnected = serverDescriptionDisconnected.With(replicaSetConfig: replicaSetConfig); } var serverDescriptionConnected = serverDescriptionDisconnected.With(state: ServerState.Connected); - var operationsCount = operationsCounts.Single(o => endpoint.ToString().EndsWith(o.address)); diff --git a/tests/MongoDB.Driver.Tests/Specifications/server-selection/ServerSelectionTestHelpers.cs b/tests/MongoDB.Driver.Tests/Specifications/server-selection/ServerSelectionTestHelpers.cs index df7b1b25269..806e0c2115c 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/server-selection/ServerSelectionTestHelpers.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/server-selection/ServerSelectionTestHelpers.cs @@ -127,9 +127,9 @@ private static ServerDescription BuildServerDescription( var lastWriteTimestamp = serverData.lastWrite != null ? BsonUtils.ToDateTimeFromMillisecondsSinceEpoch(serverData.lastWrite.lastWriteDate) : utcNow; var lastUpdateTimestamp = serverData.lastUpdateTime != null ? BsonUtils.ToDateTimeFromMillisecondsSinceEpoch(serverData.lastUpdateTime.Value) : utcNow; - var maxWireVersion = serverData.maxWireVersion ?? 7; + var maxWireVersion = serverData.maxWireVersion ?? 8; var wireVersionRange = new Range<int>(0, maxWireVersion); - var serverVersion = new SemanticVersion(4, 0, 0); + var serverVersion = new SemanticVersion(4, 2, 0); var serverId = new ServerId(clusterId, endPoint); return new ServerDescription( diff --git a/tests/MongoDB.Driver.Tests/Specifications/sessions/SessionsProseTests.cs b/tests/MongoDB.Driver.Tests/Specifications/sessions/SessionsProseTests.cs index a576e48ab48..649e2ae2df9 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/sessions/SessionsProseTests.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/sessions/SessionsProseTests.cs @@ -15,6 +15,7 @@ using System; using System.Collections.Generic; +using System.ComponentModel.DataAnnotations; using System.Linq; using System.Threading.Tasks; using FluentAssertions; @@ -27,13 +28,14 @@ using MongoDB.Driver.Core.TestHelpers.Logging; using MongoDB.Driver.Core.TestHelpers.XunitExtensions; using MongoDB.Driver.Encryption; +using MongoDB.Driver.TestHelpers; using MongoDB.TestHelpers.XunitExtensions; using Xunit; using Xunit.Abstractions; namespace MongoDB.Driver.Tests.Specifications.sessions { - [Trait("Category", "Serverless")] + [Trait("Category", "Integration")] public class SessionsProseTests : LoggableTestClass { public SessionsProseTests(ITestOutputHelper output) : base(output) @@ -62,6 +64,7 @@ public void Snapshot_and_causal_consistent_session_is_not_allowed() public async Task Ensure_explicit_session_raises_error_if_connection_does_not_support_sessions([Values(true, false)] bool async) { RequireServer.Check().Supports(Feature.ClientSideEncryption); + CoreTestConfiguration.SkipMongocryptdTests_SERVER_106469(); using var mongocryptdContext = GetMongocryptdContext(); using var session = mongocryptdContext.MongoClient.StartSession(); @@ -69,8 +72,9 @@ public async Task Ensure_explicit_session_raises_error_if_connection_does_not_su var exception = async ? await Record.ExceptionAsync(() => mongocryptdContext.MongocryptdCollection.FindAsync(session, FilterDefinition<BsonDocument>.Empty)) : Record.Exception(() => mongocryptdContext.MongocryptdCollection.Find(session, FilterDefinition<BsonDocument>.Empty).ToList()); - exception.Should().BeOfType<MongoClientException>().Subject.Message.Should().Be("Sessions are not supported."); + + exception.Should().BeOfType<MongoClientException>().Subject.Message.Should().Be("Sessions are not supported."); exception = async ? await Record.ExceptionAsync(() => mongocryptdContext.MongocryptdCollection.InsertOneAsync(session, new BsonDocument())) : Record.Exception(() => mongocryptdContext.MongocryptdCollection.InsertOne(session, new BsonDocument())); @@ -83,6 +87,7 @@ await Record.ExceptionAsync(() => mongocryptdContext.MongocryptdCollection.FindA public async Task Ensure_implicit_session_is_ignored_if_connection_does_not_support_sessions([Values(true, false)] bool async) { RequireServer.Check().Supports(Feature.ClientSideEncryption); + CoreTestConfiguration.SkipMongocryptdTests_SERVER_106469(); using var mongocryptdContext = GetMongocryptdContext(); diff --git a/tests/MongoDB.Driver.Tests/Specifications/socks5-support/Socks5SupportProseTests.cs b/tests/MongoDB.Driver.Tests/Specifications/socks5-support/Socks5SupportProseTests.cs new file mode 100644 index 00000000000..1f79d06f74c --- /dev/null +++ b/tests/MongoDB.Driver.Tests/Specifications/socks5-support/Socks5SupportProseTests.cs @@ -0,0 +1,154 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading.Tasks; +using MongoDB.Bson; +using MongoDB.Driver.Core.Events; +using MongoDB.Driver.Core.Misc; +using MongoDB.Driver.Core.TestHelpers.Logging; +using MongoDB.Driver.Core.TestHelpers.XunitExtensions; +using MongoDB.TestHelpers.XunitExtensions; +using Xunit; +using Xunit.Abstractions; + +namespace MongoDB.Driver.Tests.Specifications.socks5_support; + +[Trait("Category", "Integration")] +[Trait("Category", "Socks5Proxy")] +public class Socks5SupportProseTests(ITestOutputHelper testOutputHelper) + : LoggableTestClass(testOutputHelper) +{ + public static IEnumerable<object[]> GetTestCombinations() + { + /* From the Socks5 Proxy Support Prose Tests: + * + * Drivers MUST create a MongoClient for each of these connection strings, and attempt to run a hello command using each client. + * The operation must succeed for table entries marked (succeeds) and fail for table entries marked (fails). + * The connection strings MUST all be accepted as valid connection strings. + * + * Drivers MUST run variants of these tests in which the proxy options are substituted for MongoClient options -- This is not done as it would mostly be a repetition of the connection string tests. + * + * Drivers MUST verify for at least one of the connection strings marked (succeeds) that command monitoring events do not reference the SOCKS5 proxy host where the MongoDB service server/port are referenced. + */ + var testCases = new (string ConnectionString, bool ExpectedResult)[] + { + ("mongodb://<mappedhost>/?proxyHost=localhost&proxyPort=1080&directConnection=true", false), + ("mongodb://<mappedhost>/?proxyHost=localhost&proxyPort=1081&directConnection=true", true), + ("mongodb://<replicaset>/?proxyHost=localhost&proxyPort=1080", false), + ("mongodb://<replicaset>/?proxyHost=localhost&proxyPort=1081", true), + ("mongodb://<mappedhost>/?proxyHost=localhost&proxyPort=1080&proxyUsername=nonexistentuser&proxyPassword=badauth&directConnection=true", false), + ("mongodb://<mappedhost>/?proxyHost=localhost&proxyPort=1081&proxyUsername=nonexistentuser&proxyPassword=badauth&directConnection=true", true), + ("mongodb://<replicaset>/?proxyHost=localhost&proxyPort=1081&proxyUsername=nonexistentuser&proxyPassword=badauth", true), + ("mongodb://<mappedhost>/?proxyHost=localhost&proxyPort=1080&proxyUsername=username&proxyPassword=p4ssw0rd&directConnection=true", true), + ("mongodb://<mappedhost>/?proxyHost=localhost&proxyPort=1081&directConnection=true", true), + ("mongodb://<replicaset>/?proxyHost=localhost&proxyPort=1080&proxyUsername=username&proxyPassword=p4ssw0rd", true), + ("mongodb://<replicaset>/?proxyHost=localhost&proxyPort=1081", true) + }; + + return + (from tc in testCases + from useTls in new[] { true, false } + from isAsync in new[] { true, false } + select new { tc.ConnectionString, tc.ExpectedResult, useTls, isAsync }) + .Select((x, i) => new object[] + { + $"{i}_{(x.useTls ? "Tls" : "NoTls")}_{(x.isAsync ? "Async" : "Sync")}", + x.ConnectionString, + x.ExpectedResult, + x.useTls, + x.isAsync + }); + } + + //Prose test: https://github.com/mongodb/specifications/blob/a6dbd208462d97f97c813560cac5cf25925bb0cf/source/socks5-support/tests/README.md + [Theory] + [MemberData(nameof(GetTestCombinations))] + public async Task TestConnectionStrings(string id, string connectionString, bool expectedResult, bool useTls, bool async) + { + RequireServer.Check().Tls(useTls); + RequireEnvironment.Check().EnvironmentVariable("SOCKS5_PROXY_SERVERS_ENABLED"); + + var isMappedHost = connectionString.Contains("<mappedhost>"); + + List<(string Host, int Port)> actualHosts; + + if (isMappedHost) + { + //<mappedhost> is always replaced with localhost:12345, and it's used to verify that the test proxy server is actually used. + //Internally localhost:12345 is mapped to the actual hosts by the test proxy server. + connectionString = connectionString.Replace("<mappedhost>", "localhost:12345"); + actualHosts = [("localhost", 12345)]; + } + else + { + //Convert the hosts to a format that can be used in the connection string (host:port), and join them into a string. + actualHosts = CoreTestConfiguration.ConnectionString.Hosts.Select(h => h.GetHostAndPort()).ToList(); + var stringHosts = string.Join(",", actualHosts.Select( h => $"{h.Host}:{h.Port}")); + connectionString = connectionString.Replace("<replicaset>", stringHosts); + } + + var eventList = new List<CommandStartedEvent>(); + var mongoClientSettings = MongoClientSettings.FromConnectionString(connectionString); + mongoClientSettings.ClusterSource = DisposingClusterSource.Instance; + mongoClientSettings.UseTls = useTls; + mongoClientSettings.ServerSelectionTimeout = TimeSpan.FromSeconds(1.5); + mongoClientSettings.ClusterConfigurator = cb => + { + cb.Subscribe<CommandStartedEvent>(eventList.Add); + }; + + using var client = new MongoClient(mongoClientSettings); + var database = client.GetDatabase("admin"); + + var command = new BsonDocument("hello", 1); + + if (expectedResult) + { + var result = async + ? await database.RunCommandAsync<BsonDocument>(command) + : database.RunCommand<BsonDocument>(command); + + Assert.NotEmpty(result); + AssertEventListContainsCorrectEndpoints(actualHosts, eventList); + } + else + { + var exception = async + ? await Record.ExceptionAsync(() => database.RunCommandAsync<BsonDocument>(command)) + : Record.Exception(() => database.RunCommand<BsonDocument>(command)); + + Assert.IsType<TimeoutException>(exception); + } + } + + private static void AssertEventListContainsCorrectEndpoints(List<(string Host, int Port)> hosts, List<CommandStartedEvent> eventList) + { + var proxyHosts = new List<(string Host, int Port)> + { + ("localhost", 1080), + ("localhost", 1081) + }; + + var endPointsSeen = eventList + .Select(e => e.ConnectionId.ServerId.EndPoint.GetHostAndPort()) + .ToList(); + + Assert.DoesNotContain(endPointsSeen, proxyHosts.Contains); + Assert.Contains(endPointsSeen, hosts.Contains); + } +} \ No newline at end of file diff --git a/tests/MongoDB.Driver.Tests/Specifications/transactions/TransactionsProseTests.cs b/tests/MongoDB.Driver.Tests/Specifications/transactions/TransactionsProseTests.cs new file mode 100644 index 00000000000..7a8f8cb2b2c --- /dev/null +++ b/tests/MongoDB.Driver.Tests/Specifications/transactions/TransactionsProseTests.cs @@ -0,0 +1,71 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System.Threading.Tasks; +using FluentAssertions; +using MongoDB.Bson; +using MongoDB.Driver.Core.Clusters; +using MongoDB.Driver.Core.TestHelpers.Logging; +using MongoDB.Driver.Core.TestHelpers.XunitExtensions; +using MongoDB.TestHelpers.XunitExtensions; +using Xunit; +using Xunit.Abstractions; + +namespace MongoDB.Driver.Tests.Specifications.transactions +{ + [Trait("Category", "Integration")] + public class TransactionsProseTests : LoggableTestClass + { + private const string CollectionName = "txn-test-col"; + private const string DatabaseName = "txn-test"; + + public TransactionsProseTests(ITestOutputHelper output) : base(output) + { + } + + // https://github.com/mongodb/specifications/blob/fc7996db26d0ea92091a5034c6acb287ef7282fe/source/transactions/tests/README.md#10-write-concern-not-inherited-from-collection-object-inside-transaction + [Theory] + [ParameterAttributeData] + public async Task Ensure_write_concern_is_not_inherited_from_collection_object_inside_transaction([Values(false, true)] bool async) + { + RequireServer.Check().ClusterTypes(ClusterType.LoadBalanced, ClusterType.ReplicaSet, ClusterType.Sharded); + + using var client = DriverTestConfiguration.CreateMongoClient(); + var database = client.GetDatabase(DatabaseName).WithWriteConcern(WriteConcern.WMajority); + database.DropCollection(CollectionName); + + var collection = client.GetDatabase(DatabaseName).GetCollection<BsonDocument>(CollectionName) + .WithWriteConcern(WriteConcern.Unacknowledged); + + using (var session = client.StartSession()) + { + session.StartTransaction(); + + if (async) + { + await collection.InsertOneAsync(new BsonDocument("n", 1)); + await session.CommitTransactionAsync(); + } + else + { + collection.InsertOne(new BsonDocument("n", 1)); + session.CommitTransaction(); + } + } + + collection.Find(new BsonDocument("n", 1)).First().Should().NotBeNull().And.Subject["n"].AsInt32.Should().Be(1); + } + } +} \ No newline at end of file diff --git a/tests/MongoDB.Driver.Tests/Specifications/uuid/prose-tests/ImplicitEncodingTests.cs b/tests/MongoDB.Driver.Tests/Specifications/uuid/prose-tests/ImplicitEncodingTests.cs index 5a13163a1f3..c3945fa0fdf 100644 --- a/tests/MongoDB.Driver.Tests/Specifications/uuid/prose-tests/ImplicitEncodingTests.cs +++ b/tests/MongoDB.Driver.Tests/Specifications/uuid/prose-tests/ImplicitEncodingTests.cs @@ -22,6 +22,7 @@ namespace MongoDB.Driver.Tests.Specifications.uuid.prose_tests { + [Trait("Category", "Integration")] public class ImplicitEncodingTests { [Fact] diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/Matchers/UnifiedErrorMatcher.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/Matchers/UnifiedErrorMatcher.cs index a7e3d12ee89..dce082711f1 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/Matchers/UnifiedErrorMatcher.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/Matchers/UnifiedErrorMatcher.cs @@ -48,6 +48,9 @@ public void AssertErrorsMatch(Exception actualException, BsonDocument expectedEr case "isClientError": AssertIsClientError(actualException, element.Value.AsBoolean); break; + case "isTimeoutError": + AssertIsTimeoutError(actualException, element.Value.AsBoolean); + break; case "errorContains": AssertErrorContains(actualException, element.Value.AsString); break; @@ -194,6 +197,18 @@ private void AssertIsError(Exception actualException, bool expectedIsError) actualException.Should().NotBeNull(); } + private void AssertIsTimeoutError(Exception actualException, bool expectedTimeoutError) + { + actualException = UnwrapCommandException(actualException); + + var because = expectedTimeoutError ? + $"error expect to be timeout, but actual exception is {actualException}" : + $"error expect not to be timeout, but actual exception is {actualException}"; + var isTimeout = actualException is TimeoutException or MongoExecutionTimeoutException; + + isTimeout.Should().Be(expectedTimeoutError, because); + } + private void AssertWriteConcernErrors(Exception actualException, BsonArray expectedWriteConcernErrors) { var clientBulkWriteException = actualException.Should().BeAssignableTo<ClientBulkWriteException>().Subject; @@ -224,14 +239,14 @@ private void AssertWriteErrors(Exception actualException, BsonDocument expectedW private static Exception UnwrapCommandException(Exception ex) { - if (ex is MongoConnectionException connectionException) + if (ex is ClientBulkWriteException bulkWriteException) { - ex = connectionException.InnerException; + ex = bulkWriteException.InnerException; } - if (ex is ClientBulkWriteException bulkWriteException) + if (ex is MongoConnectionException connectionException) { - ex = bulkWriteException.InnerException; + ex = connectionException.InnerException; } return ex; diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/Matchers/UnifiedValueMatcher.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/Matchers/UnifiedValueMatcher.cs index a31bb3ede08..236c12d82db 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/Matchers/UnifiedValueMatcher.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/Matchers/UnifiedValueMatcher.cs @@ -14,6 +14,7 @@ */ using System; +using System.Collections; using System.Collections.Generic; using System.Linq; using FluentAssertions; @@ -26,6 +27,8 @@ namespace MongoDB.Driver.Tests.UnifiedTestOperations.Matchers { public class UnifiedValueMatcher { + private static readonly List<string> __numericTypes = ["int", "long", "double", "decimal"]; + private UnifiedEntityMap _entityMap; public UnifiedValueMatcher(UnifiedEntityMap entityMap) @@ -135,7 +138,7 @@ private void AssertValuesMatch(BsonValue actual, BsonValue expected, bool isRoot break; case "$$matchesEntity": var resultId = operatorValue.AsString; - expectedValue = _entityMap.Resutls[resultId]; + expectedValue = _entityMap.Results[resultId]; break; case "$$matchesHexBytes": expectedValue = operatorValue; @@ -197,7 +200,8 @@ private void AssertExpectedType(BsonValue actual, BsonValue expectedTypes) if (expectedTypes.IsString) { - expectedTypeNames = new List<string> { expectedTypes.AsString }; + var expectedType = expectedTypes.AsString; + expectedTypeNames = expectedType == "number" ? __numericTypes : [expectedType]; } else if (expectedTypes.IsBsonArray) { diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedAbortTransactionOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedAbortTransactionOperation.cs index cd8792d7e62..5e5e96c8fa4 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedAbortTransactionOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedAbortTransactionOperation.cs @@ -20,20 +20,22 @@ namespace MongoDB.Driver.Tests.UnifiedTestOperations { - public class UnifiedAbortTransactionOperation : IUnifiedEntityTestOperation + internal class UnifiedAbortTransactionOperation : IUnifiedEntityTestOperation { private readonly IClientSessionHandle _session; + private readonly AbortTransactionOptions _options; - public UnifiedAbortTransactionOperation(IClientSessionHandle session) + public UnifiedAbortTransactionOperation(IClientSessionHandle session, AbortTransactionOptions options) { _session = session; + _options = options; } public OperationResult Execute(CancellationToken cancellationToken) { try { - _session.AbortTransaction(cancellationToken); + _session.AbortTransaction(_options, cancellationToken); return OperationResult.Empty(); } catch (Exception ex) @@ -46,7 +48,7 @@ public async Task<OperationResult> ExecuteAsync(CancellationToken cancellationTo { try { - await _session.AbortTransactionAsync(cancellationToken).ConfigureAwait(false); + await _session.AbortTransactionAsync(_options, cancellationToken).ConfigureAwait(false); return OperationResult.Empty(); } catch (Exception ex) @@ -56,7 +58,7 @@ public async Task<OperationResult> ExecuteAsync(CancellationToken cancellationTo } } - public class UnifiedAbortTransactionOperationBuilder + internal class UnifiedAbortTransactionOperationBuilder { private readonly UnifiedEntityMap _entityMap; @@ -68,13 +70,30 @@ public UnifiedAbortTransactionOperationBuilder(UnifiedEntityMap entityMap) public UnifiedAbortTransactionOperation Build(string targetSessionId, BsonDocument arguments) { var session = _entityMap.Sessions[targetSessionId]; + TimeSpan? timeout = null; if (arguments != null) { - throw new FormatException("AbortTransactionOperation is not expected to contain arguments."); + foreach (var argument in arguments) + { + switch (argument.Name) + { + case "timeoutMS": + timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; + default: + throw new FormatException($"Invalid AbortTransactionOperation argument name: '{argument.Name}'."); + } + } } - return new UnifiedAbortTransactionOperation(session); + AbortTransactionOptions options = null; + if (timeout.HasValue) + { + options = new AbortTransactionOptions(timeout); + } + + return new UnifiedAbortTransactionOperation(session, options); } } } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedAggregateOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedAggregateOperation.cs index 33554ada8ae..ec522c9c173 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedAggregateOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedAggregateOperation.cs @@ -188,6 +188,10 @@ private IUnifiedEntityTestOperation Build(IMongoDatabase database, IMongoCollect options ??= new AggregateOptions(); options.BatchSize = argument.Value.ToInt32(); break; + case "bypassDocumentValidation": + options ??= new AggregateOptions(); + options.BypassDocumentValidation = argument.Value.AsBoolean; + break; case "comment": options ??= new AggregateOptions(); options.Comment = argument.Value; @@ -206,6 +210,10 @@ private IUnifiedEntityTestOperation Build(IMongoDatabase database, IMongoCollect case "session": session = _entityMap.Sessions[argument.Value.AsString]; break; + case "timeoutMS": + options ??= new AggregateOptions(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid AggregateOperation argument name: '{argument.Name}'."); } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedBulkWriteOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedBulkWriteOperation.cs index e0af96914a6..2895c4263e6 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedBulkWriteOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedBulkWriteOperation.cs @@ -107,6 +107,10 @@ public UnifiedBulkWriteOperation Build(string targetCollectionId, BsonDocument a { switch (argument.Name) { + case "bypassDocumentValidation": + options ??= new(); + options.BypassDocumentValidation = argument.Value.AsBoolean; + break; case "comment": options ??= new BulkWriteOptions(); options.Comment = argument.Value; @@ -125,6 +129,10 @@ public UnifiedBulkWriteOperation Build(string targetCollectionId, BsonDocument a case "session": session = _entityMap.Sessions[argument.Value.AsString]; break; + case "timeoutMS": + options ??= new(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid BulkWriteOperation argument name: '{argument.Name}'."); } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedClientBulkWriteOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedClientBulkWriteOperation.cs index 10e1adab54b..f87829cc29b 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedClientBulkWriteOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedClientBulkWriteOperation.cs @@ -102,7 +102,7 @@ public static BsonDocument ConvertClientBulkWriteResult(ClientBulkWriteResult re { "deletedCount", (int)result.DeletedCount }, { "insertResults", ConvertResults(result.InsertResults, - item => new() { { "insertedId", item.InsertedId } }) + item => new() { { "insertedId", BsonValue.Create(item.DocumentId) } }) }, { "updateResults", ConvertResults(result.UpdateResults, diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCloseCursorOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCloseCursorOperation.cs index 1e75489e6f8..1eeb6c12b01 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCloseCursorOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCloseCursorOperation.cs @@ -87,7 +87,7 @@ public static IAsyncCursor<TDocument> _cursor<TDocument>(this AsyncCursorEnumera } internal static class ChangeStreamCursorReflector - { + { public static IAsyncCursor<RawBsonDocument> _cursor(this ChangeStreamCursor<ChangeStreamDocument<BsonDocument>> cursor) { return (IAsyncCursor<RawBsonDocument>)Reflector.GetFieldValue(cursor, nameof(_cursor)); diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCommitTransactionOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCommitTransactionOperation.cs index a14f0199b2e..2f6d3aa733a 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCommitTransactionOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCommitTransactionOperation.cs @@ -20,20 +20,22 @@ namespace MongoDB.Driver.Tests.UnifiedTestOperations { - public class UnifiedCommitTransactionOperation : IUnifiedEntityTestOperation + internal class UnifiedCommitTransactionOperation : IUnifiedEntityTestOperation { private readonly IClientSessionHandle _session; + private readonly CommitTransactionOptions _options; - public UnifiedCommitTransactionOperation(IClientSessionHandle session) + public UnifiedCommitTransactionOperation(IClientSessionHandle session, CommitTransactionOptions options) { _session = session; + _options = options; } public OperationResult Execute(CancellationToken cancellationToken) { try { - _session.CommitTransaction(cancellationToken); + _session.CommitTransaction(_options, cancellationToken); return OperationResult.Empty(); } catch (Exception ex) @@ -46,7 +48,7 @@ public async Task<OperationResult> ExecuteAsync(CancellationToken cancellationTo { try { - await _session.CommitTransactionAsync(cancellationToken).ConfigureAwait(false); + await _session.CommitTransactionAsync(_options, cancellationToken).ConfigureAwait(false); return OperationResult.Empty(); } catch (Exception ex) @@ -65,16 +67,33 @@ public UnifiedCommitTransactionOperationBuilder(UnifiedEntityMap entityMap) _entityMap = entityMap; } - public UnifiedCommitTransactionOperation Build(string targetSessionId, BsonDocument arguments) + internal UnifiedCommitTransactionOperation Build(string targetSessionId, BsonDocument arguments) { var session = _entityMap.Sessions[targetSessionId]; + TimeSpan? timeout = null; if (arguments != null) { - throw new FormatException("CommitTransactionOperation is not expected to contain arguments."); + foreach (var argument in arguments) + { + switch (argument.Name) + { + case "timeoutMS": + timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; + default: + throw new FormatException($"Invalid CommitTransactionOperation argument name: '{argument.Name}'."); + } + } } - return new UnifiedCommitTransactionOperation(session); + CommitTransactionOptions options = null; + if (timeout.HasValue) + { + options = new CommitTransactionOptions(timeout); + } + + return new UnifiedCommitTransactionOperation(session, options); } } } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCountDocumentsOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCountDocumentsOperation.cs index 0381bfee097..ab5138a7a6c 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCountDocumentsOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCountDocumentsOperation.cs @@ -103,6 +103,10 @@ public UnifiedCountDocumentsOperation Build(string targetCollectionId, BsonDocum case "session": session = _entityMap.Sessions[argument.Value.AsString]; break; + case "timeoutMS": + options ??= new CountOptions(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid CountDocumentsOperation argument name: '{argument.Name}'."); } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCountOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCountOperation.cs index e12428550c5..13b7c993feb 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCountOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCountOperation.cs @@ -104,9 +104,17 @@ public UnifiedCountOperation Build(string targetCollectionId, BsonDocument argum case "filter": filter = new BsonDocumentFilterDefinition<BsonDocument>(argument.Value.AsBsonDocument); break; + case "maxTimeMS": + options ??= new CountOptions(); + options.MaxTime = TimeSpan.FromMilliseconds(argument.Value.AsInt32); + break; case "session": session = _entityMap.Sessions[argument.Value.AsString]; break; + case "timeoutMS": + options ??= new CountOptions(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid CountOperation argument name: '{argument.Name}'."); } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCreateChangeStreamOnClientOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCreateChangeStreamOnClientOperation.cs index 3853daacf2b..c07713d5886 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCreateChangeStreamOnClientOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCreateChangeStreamOnClientOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2020-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -89,13 +89,17 @@ public UnifiedCreateChangeStreamOnClientOperation Build(string targetClientId, B switch (argument.Name) { case "batchSize": - options = options ?? new ChangeStreamOptions(); + options ??= new ChangeStreamOptions(); options.BatchSize = argument.Value.AsInt32; break; case "pipeline": var stages = argument.Value.AsBsonArray.Cast<BsonDocument>(); pipeline = new BsonDocumentStagePipelineDefinition<ChangeStreamDocument<BsonDocument>, ChangeStreamDocument<BsonDocument>>(stages); break; + case "timeoutMS": + options ??= new ChangeStreamOptions(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid CreateChangeStreamOperation argument name: '{argument.Name}'."); } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCreateIndexOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCreateIndexOperation.cs index a76b48af90d..4bdb4dfe773 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCreateIndexOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedCreateIndexOperation.cs @@ -24,16 +24,19 @@ public class UnifiedCreateIndexOperation : IUnifiedEntityTestOperation { private readonly IMongoCollection<BsonDocument> _collection; private readonly CreateIndexModel<BsonDocument> _createIndexModel; + private readonly CreateOneIndexOptions _options; private readonly IClientSessionHandle _session; public UnifiedCreateIndexOperation( IClientSessionHandle session, IMongoCollection<BsonDocument> collection, - CreateIndexModel<BsonDocument> createIndexModel) + CreateIndexModel<BsonDocument> createIndexModel, + CreateOneIndexOptions options) { _session = session; _collection = collection; _createIndexModel = createIndexModel; + _options = options; } public OperationResult Execute(CancellationToken cancellationToken) @@ -44,11 +47,11 @@ public OperationResult Execute(CancellationToken cancellationToken) if (_session == null) { - result = _collection.Indexes.CreateOne(_createIndexModel, cancellationToken: cancellationToken); + result = _collection.Indexes.CreateOne(_createIndexModel, _options, cancellationToken: cancellationToken); } else { - result = _collection.Indexes.CreateOne(_session, _createIndexModel, cancellationToken: cancellationToken); + result = _collection.Indexes.CreateOne(_session, _createIndexModel, _options, cancellationToken: cancellationToken); } return OperationResult.FromResult(BsonString.Create(result)); @@ -67,11 +70,11 @@ public async Task<OperationResult> ExecuteAsync(CancellationToken cancellationTo if (_session == null) { - result = await _collection.Indexes.CreateOneAsync(_createIndexModel, cancellationToken: cancellationToken); + result = await _collection.Indexes.CreateOneAsync(_createIndexModel, _options, cancellationToken: cancellationToken); } else { - result = await _collection.Indexes.CreateOneAsync(_session, _createIndexModel, cancellationToken: cancellationToken); + result = await _collection.Indexes.CreateOneAsync(_session, _createIndexModel, _options, cancellationToken: cancellationToken); } return OperationResult.FromResult(BsonString.Create(result)); @@ -98,6 +101,7 @@ public UnifiedCreateIndexOperation Build(string targetCollectionId, BsonDocument BsonDocument keys = null; CreateIndexOptions options = null; + CreateOneIndexOptions createOneIndexOptions = null; IClientSessionHandle session = null; foreach (var argument in arguments) @@ -107,14 +111,22 @@ public UnifiedCreateIndexOperation Build(string targetCollectionId, BsonDocument case "keys": keys = argument.Value.AsBsonDocument; break; + case "maxTimeMS": + createOneIndexOptions ??= new CreateOneIndexOptions(); + createOneIndexOptions.MaxTime = TimeSpan.FromMilliseconds(argument.Value.AsInt32); + break; case "name": - options = options ?? new CreateIndexOptions(); + options ??= new CreateIndexOptions(); options.Name = argument.Value.AsString; break; case "session": var sessionId = argument.Value.AsString; session = _entityMap.Sessions[sessionId]; break; + case "timeoutMS": + createOneIndexOptions ??= new CreateOneIndexOptions(); + createOneIndexOptions.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid CreateIndexOperation argument name: '{argument.Name}'."); } @@ -122,7 +134,7 @@ public UnifiedCreateIndexOperation Build(string targetCollectionId, BsonDocument var createIndexModel = new CreateIndexModel<BsonDocument>(keys, options); - return new UnifiedCreateIndexOperation(session, collection, createIndexModel); + return new UnifiedCreateIndexOperation(session, collection, createIndexModel, createOneIndexOptions); } } } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedDeleteManyOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedDeleteManyOperation.cs index 2ec6069bbb3..4fe8a604b6c 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedDeleteManyOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedDeleteManyOperation.cs @@ -111,6 +111,10 @@ public UnifiedDeleteManyOperation Build(string targetCollectionId, BsonDocument case "session": session = _entityMap.Sessions[argument.Value.AsString]; break; + case "timeoutMS": + options ??= new DeleteOptions(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid DeleteManyOperation argument name: '{argument.Name}'."); } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedDeleteOneOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedDeleteOneOperation.cs index 8d095e40235..491a825cd27 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedDeleteOneOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedDeleteOneOperation.cs @@ -111,6 +111,10 @@ public UnifiedDeleteOneOperation Build(string targetCollectionId, BsonDocument a case "session": session = _entityMap.Sessions[argument.Value.AsString]; break; + case "timeoutMS": + options ??= new DeleteOptions(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid DeleteOneOperation argument name: '{argument.Name}'."); } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedDistinctOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedDistinctOperation.cs index 271913e9695..151b07c7457 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedDistinctOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedDistinctOperation.cs @@ -102,7 +102,8 @@ public UnifiedDistinctOperation Build(string targetCollectionId, BsonDocument ar switch (argument.Name) { case "comment": - options = new DistinctOptions { Comment = argument.Value }; + options ??= new DistinctOptions(); + options.Comment = argument.Value; break; case "fieldName": fieldName = argument.Value.AsString; @@ -110,9 +111,17 @@ public UnifiedDistinctOperation Build(string targetCollectionId, BsonDocument ar case "filter": filter = argument.Value.AsBsonDocument; break; + case "maxTimeMS": + options ??= new DistinctOptions(); + options.MaxTime = TimeSpan.FromMilliseconds(argument.Value.AsInt32); + break; case "session": session = _entityMap.Sessions[argument.Value.AsString]; break; + case "timeoutMS": + options ??= new DistinctOptions(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid DistinctOperation argument name: '{argument.Name}'."); } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedDropIndexOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedDropIndexOperation.cs index c903360490d..d13e381f25a 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedDropIndexOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedDropIndexOperation.cs @@ -25,15 +25,18 @@ public class UnifiedDropIndexOperation : IUnifiedEntityTestOperation private readonly IMongoCollection<BsonDocument> _collection; private readonly string _indexName; private readonly IClientSessionHandle _session; + private readonly DropIndexOptions _options; public UnifiedDropIndexOperation( IClientSessionHandle session, IMongoCollection<BsonDocument> collection, - string indexName) + string indexName, + DropIndexOptions options) { _session = session; _collection = collection; _indexName = indexName; + _options = options; } public OperationResult Execute(CancellationToken cancellationToken) @@ -42,11 +45,11 @@ public OperationResult Execute(CancellationToken cancellationToken) { if (_session == null) { - _collection.Indexes.DropOne(_indexName, cancellationToken); + _collection.Indexes.DropOne(_indexName, _options, cancellationToken); } else { - _collection.Indexes.DropOne(_session, _indexName, cancellationToken); + _collection.Indexes.DropOne(_session, _indexName, _options, cancellationToken); } return OperationResult.Empty(); @@ -63,11 +66,11 @@ public async Task<OperationResult> ExecuteAsync(CancellationToken cancellationTo { if (_session == null) { - await _collection.Indexes.DropOneAsync(_indexName, cancellationToken); + await _collection.Indexes.DropOneAsync(_indexName, _options, cancellationToken); } else { - await _collection.Indexes.DropOneAsync(_session, _indexName, cancellationToken); + await _collection.Indexes.DropOneAsync(_session, _indexName, _options, cancellationToken); } return OperationResult.Empty(); @@ -93,6 +96,7 @@ public UnifiedDropIndexOperation Build(string targetCollectionId, BsonDocument a var collection = _entityMap.Collections[targetCollectionId]; string indexName = null; IClientSessionHandle session = null; + DropIndexOptions options = null; foreach (var argument in arguments) { @@ -101,16 +105,24 @@ public UnifiedDropIndexOperation Build(string targetCollectionId, BsonDocument a case "name": indexName = argument.Value.AsString; break; + case "maxTimeMS": + options ??= new DropIndexOptions(); + options.MaxTime = TimeSpan.FromMilliseconds(argument.Value.AsInt32); + break; case "session": var sessionId = argument.Value.AsString; session = _entityMap.Sessions[sessionId]; break; + case "timeoutMS": + options ??= new DropIndexOptions(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid DropIndexOperation argument name: '{argument.Name}'."); } } - return new UnifiedDropIndexOperation(session, collection, indexName); + return new UnifiedDropIndexOperation(session, collection, indexName, options); } } } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedEntityMap.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedEntityMap.cs index c3f4ad1c8a3..5649ca8a3c2 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedEntityMap.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedEntityMap.cs @@ -16,6 +16,7 @@ using System; using System.Collections; using System.Collections.Generic; +using System.Diagnostics; using System.Linq; using System.Threading; using System.Threading.Tasks; @@ -24,6 +25,7 @@ using MongoDB.Driver.Core; using MongoDB.Driver.Core.Clusters; using MongoDB.Driver.Core.Configuration; +using MongoDB.Driver.Core.ConnectionPools; using MongoDB.Driver.Core.Events; using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Core.Servers; @@ -202,7 +204,7 @@ public Dictionary<string, Dictionary<string, LogLevel>> LoggingComponents } } - public Dictionary<string, BsonValue> Resutls + public Dictionary<string, BsonValue> Results { get { @@ -303,6 +305,65 @@ public void RegisterForDispose(IDisposable disposable) } // private methods + private AutoEncryptionOptions ConfigureAutoEncryptionOptions(BsonDocument autoEncryptOpts) + { + var extraOptions = new Dictionary<string, object>(); + EncryptionTestHelper.ConfigureDefaultExtraOptions(extraOptions); + + var bypassAutoEncryption = false; + bool? bypassQueryAnalysis = null; + Optional<IReadOnlyDictionary<string, BsonDocument>> encryptedFieldsMap = null; + CollectionNamespace keyVaultNamespace = null; + IReadOnlyDictionary<string, IReadOnlyDictionary<string, object>> kmsProviders = null; + Optional<IReadOnlyDictionary<string, BsonDocument>> schemaMap = null; + Optional<IReadOnlyDictionary<string, SslSettings>> tlsOptions = null; + + foreach (var option in autoEncryptOpts.Elements) + { + switch (option.Name) + { + case "bypassAutoEncryption": + bypassAutoEncryption = option.Value.AsBoolean; + break; + case "bypassQueryAnalysis": + bypassQueryAnalysis = option.Value.AsBoolean; + break; + case "encryptedFieldsMap": + var encryptedFieldsMapDocument = option.Value.AsBsonDocument; + encryptedFieldsMap = encryptedFieldsMapDocument.Elements.ToDictionary(e => e.Name, e => e.Value.AsBsonDocument); + break; + case "extraOptions": + ParseExtraOptions(option.Value.AsBsonDocument, extraOptions); + break; + case "keyVaultNamespace": + keyVaultNamespace = CollectionNamespace.FromFullName(option.Value.AsString); + break; + case "kmsProviders": + kmsProviders = EncryptionTestHelper.ParseKmsProviders(option.Value.AsBsonDocument); + tlsOptions = EncryptionTestHelper.CreateTlsOptionsIfAllowed(kmsProviders, allowClientCertificateFunc: (kms) => kms.StartsWith("kmip")); + break; + case "schemaMap": + var schemaMapDocument = option.Value.AsBsonDocument; + schemaMap = schemaMapDocument.Elements.ToDictionary(e => e.Name, e => e.Value.AsBsonDocument); + break; + default: + throw new FormatException($"Invalid autoEncryption option argument name {option.Name}."); + } + } + + var autoEncryptionOptions = new AutoEncryptionOptions( + keyVaultNamespace, + kmsProviders, + bypassAutoEncryption, + extraOptions, + bypassQueryAnalysis: bypassQueryAnalysis, + encryptedFieldsMap: encryptedFieldsMap, + schemaMap: schemaMap, + tlsOptions: tlsOptions); + + return autoEncryptionOptions; + } + private void CreateEntities(BsonArray entitiesArray) { if (entitiesArray != null) @@ -407,6 +468,7 @@ private IGridFSBucket CreateBucket(BsonDocument entity, Dictionary<string, IMong string appName = null; string authMechanism = null; var authMechanismProperties = new Dictionary<string, object>(); + AutoEncryptionOptions autoEncryptionOptions = null; var clientEventCapturers = new Dictionary<string, EventCapturer>(); Dictionary<string, LogLevel> loggingComponents = null; string clientId = null; @@ -428,10 +490,13 @@ private IGridFSBucket CreateBucket(BsonDocument entity, Dictionary<string, IMong TimeSpan? serverSelectionTimeout = null; int? waitQueueSize = null; TimeSpan? socketTimeout = null; + TimeSpan? timeout = null; var useMultipleShardRouters = false; TimeSpan? waitQueueTimeout = null; var writeConcern = WriteConcern.Acknowledged; var serverApi = CoreTestConfiguration.ServerApi; + TimeSpan? wTimeout = null; + TimeSpan? awaitMinPoolSizeTimeout = null; foreach (var element in entity) { @@ -440,6 +505,12 @@ private IGridFSBucket CreateBucket(BsonDocument entity, Dictionary<string, IMong case "id": clientId = element.Value.AsString; break; + case "autoEncryptOpts": + autoEncryptionOptions = ConfigureAutoEncryptionOptions(element.Value.AsBsonDocument); + break; + case "awaitMinPoolSizeMS": + awaitMinPoolSizeTimeout = TimeSpan.FromMilliseconds(element.Value.AsInt32); + break; case "uriOptions": foreach (var option in element.Value.AsBsonDocument) { @@ -534,6 +605,9 @@ private IGridFSBucket CreateBucket(BsonDocument entity, Dictionary<string, IMong case "socketTimeoutMS": socketTimeout = TimeSpan.FromMilliseconds(option.Value.AsInt32); break; + case "timeoutMS": + timeout = ParseTimeout(option.Value); + break; case "w": writeConcern = new WriteConcern(WriteConcern.WValue.Parse(option.Value.ToString())); break; @@ -543,6 +617,9 @@ private IGridFSBucket CreateBucket(BsonDocument entity, Dictionary<string, IMong case "waitQueueTimeoutMS": waitQueueTimeout = TimeSpan.FromMilliseconds(option.Value.ToInt32()); break; + case "wTimeoutMS": + wTimeout = TimeSpan.FromMilliseconds(option.Value.ToInt32()); + break; default: throw new FormatException($"Invalid client uriOption argument name: '{option.Name}'."); } @@ -624,6 +701,11 @@ private IGridFSBucket CreateBucket(BsonDocument entity, Dictionary<string, IMong } } + if (wTimeout.HasValue) + { + writeConcern = writeConcern.With(wTimeout: wTimeout); + } + // Regardless of whether events are observed, we still need to track some info about the pool in order to implement // the assertNumberConnectionsCheckedOut operation if (eventTypesToCapture.Count == 0) @@ -660,6 +742,7 @@ private IGridFSBucket CreateBucket(BsonDocument entity, Dictionary<string, IMong settings => { settings.ApplicationName = FailPoint.DecorateApplicationName(appName, async); + settings.AutoEncryptionOptions = autoEncryptionOptions; settings.ConnectTimeout = connectTimeout.GetValueOrDefault(defaultValue: settings.ConnectTimeout); settings.LoadBalanced = loadBalanced.GetValueOrDefault(defaultValue: settings.LoadBalanced); settings.LoggingSettings = _loggingSettings; @@ -685,6 +768,7 @@ private IGridFSBucket CreateBucket(BsonDocument entity, Dictionary<string, IMong settings.ServerMonitoringMode = serverMonitoringMode.GetValueOrDefault(settings.ServerMonitoringMode); settings.ServerSelectionTimeout = serverSelectionTimeout.GetValueOrDefault(defaultValue: settings.ServerSelectionTimeout); settings.SocketTimeout = socketTimeout.GetValueOrDefault(defaultValue: settings.SocketTimeout); + settings.Timeout = timeout; if (eventCapturers.Length > 0) { settings.ClusterConfigurator = c => @@ -715,6 +799,24 @@ private IGridFSBucket CreateBucket(BsonDocument entity, Dictionary<string, IMong }, useMultipleShardRouters); + if (awaitMinPoolSizeTimeout.HasValue && minPoolSize is > 0) + { + if (!SpinWait.SpinUntil(() => + { + var servers = ((IClusterInternal)client.Cluster).Servers.Where(s => s.Description.IsDataBearing).ToArray(); + return servers.Any() && servers.All(s => ((ExclusiveConnectionPool)s.ConnectionPool).DormantCount >= minPoolSize); + }, awaitMinPoolSizeTimeout.Value)) + { + client.Dispose(); + throw new TimeoutException("MinPoolSize population took too long"); + } + + foreach (var eventCapturer in clientEventCapturers.Values) + { + eventCapturer.Clear(); + } + } + return (client, clientEventCapturers, loggingComponents); } @@ -751,7 +853,7 @@ private ClientEncryption CreateClientEncryption(Dictionary<string, IMongoClient> keyExpiration = TimeSpan.FromMilliseconds(option.Value.AsInt32); break; default: - throw new FormatException($"Invalid collection option argument name: '{option.Name}'."); + throw new FormatException($"Invalid clientEncryption option argument name: '{option.Name}'."); } } @@ -764,7 +866,7 @@ private ClientEncryption CreateClientEncryption(Dictionary<string, IMongoClient> options.SetKeyExpiration(keyExpiration); break; default: - throw new FormatException($"Invalid {nameof(ClientEncryptionOptions)} argument name: '{element.Name}'."); + throw new FormatException($"Invalid clientEncryption argument name: '{element.Name}'."); } } @@ -802,6 +904,9 @@ private IMongoCollection<BsonDocument> CreateCollection(BsonDocument entity, Dic case "readPreference": settings.ReadPreference = ReadPreference.FromBsonDocument(option.Value.AsBsonDocument); break; + case "timeoutMS": + settings.Timeout = ParseTimeout(option.Value); + break; case "writeConcern": settings.WriteConcern = ParseWriteConcern(option.Value.AsBsonDocument); break; @@ -851,6 +956,9 @@ private IMongoDatabase CreateDatabase(BsonDocument entity, Dictionary<string, IM case "readPreference": databaseSettings.ReadPreference = ReadPreference.FromBsonDocument(option.Value.AsBsonDocument); break; + case "timeoutMS": + databaseSettings.Timeout = ParseTimeout(option.Value); + break; case "writeConcern": databaseSettings.WriteConcern = ParseWriteConcern(option.Value.AsBsonDocument); break; @@ -975,6 +1083,15 @@ private IClientSessionHandle CreateSession(BsonDocument entity, Dictionary<strin case "causalConsistency": options.CausalConsistency = option.Value.ToBoolean(); break; + case "defaultTimeoutMS": + var timeout = ParseTimeout(option.Value); + options.DefaultTransactionOptions = new TransactionOptions( + timeout, + options.DefaultTransactionOptions?.ReadConcern, + options.DefaultTransactionOptions?.ReadPreference, + options.DefaultTransactionOptions?.WriteConcern, + options.DefaultTransactionOptions?.MaxCommitTime); + break; case "defaultTransactionOptions": ReadConcern readConcern = null; ReadPreference readPreference = null; @@ -1001,7 +1118,7 @@ private IClientSessionHandle CreateSession(BsonDocument entity, Dictionary<strin } } - options.DefaultTransactionOptions = new TransactionOptions(readConcern, readPreference, writeConcern, maxCommitTime); + options.DefaultTransactionOptions = new TransactionOptions(options.DefaultTransactionOptions?.Timeout, readConcern, readPreference, writeConcern, maxCommitTime); break; default: throw new FormatException($"Invalid session option argument name: '{option.Name}'."); @@ -1023,6 +1140,21 @@ private IClientSessionHandle CreateSession(BsonDocument entity, Dictionary<strin return session; } + private void ParseExtraOptions(BsonDocument extraOptionsDocument, Dictionary<string, object> extraOptions) + { + foreach (var extraOption in extraOptionsDocument.Elements) + { + switch (extraOption.Name) + { + case "mongocryptdBypassSpawn": + extraOptions.Add(extraOption.Name, extraOption.Value.ToBoolean()); + break; + default: + throw new FormatException($"Invalid extraOption argument name {extraOption.Name}."); + } + } + } + private void ThrowIfDisposed() { if (_disposed) @@ -1048,5 +1180,8 @@ public static WriteConcern ParseWriteConcern(BsonDocument writeConcernDocument) return writeConcern; } + + public static TimeSpan ParseTimeout(BsonValue value) + => value.AsInt32 == 0 ? Timeout.InfiniteTimeSpan : TimeSpan.FromMilliseconds(value.AsInt32); } } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedEstimatedDocumentCountOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedEstimatedDocumentCountOperation.cs index b144a6715bb..fa0222ca47e 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedEstimatedDocumentCountOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedEstimatedDocumentCountOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2021-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -87,6 +87,10 @@ public UnifiedEstimatedDocumentCountOperation Build(string targetCollectionId, B case "maxTimeMS": options.MaxTime = TimeSpan.FromMilliseconds(argument.Value.AsInt32); break; + case "timeoutMS": + options ??= new EstimatedDocumentCountOptions(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid {nameof(UnifiedEstimatedDocumentCountOperation)} argument name: '{argument.Name}'."); } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedFindOneAndDeleteOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedFindOneAndDeleteOperation.cs index e6844ce567d..a7a1a8b5001 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedFindOneAndDeleteOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedFindOneAndDeleteOperation.cs @@ -108,6 +108,12 @@ public UnifiedFindOneAndDeleteOperation Build(string targetCollectionId, BsonDoc options ??= new FindOneAndDeleteOptions<BsonDocument>(); options.Let = argument.Value.AsBsonDocument; break; +#pragma warning disable CS0618 // Type or member is obsolete + case "maxTimeMS": + options ??= new FindOneAndDeleteOptions<BsonDocument>(); + options.MaxTime = TimeSpan.FromMilliseconds(argument.Value.AsInt32); + break; +#pragma warning restore CS0618 // Type or member is obsolete case "session": session = _entityMap.Sessions[argument.Value.AsString]; break; @@ -115,6 +121,10 @@ public UnifiedFindOneAndDeleteOperation Build(string targetCollectionId, BsonDoc options ??= new FindOneAndDeleteOptions<BsonDocument>(); options.Sort = argument.Value.AsBsonDocument; break; + case "timeoutMS": + options ??= new FindOneAndDeleteOptions<BsonDocument>(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid FindOneAndDeleteOperation argument name: '{argument.Name}'."); } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedFindOneAndReplaceOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedFindOneAndReplaceOperation.cs index 9a5f9ab2555..7845c1ed925 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedFindOneAndReplaceOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedFindOneAndReplaceOperation.cs @@ -97,6 +97,10 @@ public UnifiedFindOneAndReplaceOperation Build(string targetCollectionId, BsonDo { switch (argument.Name) { + case "bypassDocumentValidation": + options ??= new(); + options.BypassDocumentValidation = argument.Value.AsBoolean; + break; case "comment": options ??= new FindOneAndReplaceOptions<BsonDocument>(); options.Comment = argument.Value; @@ -112,6 +116,12 @@ public UnifiedFindOneAndReplaceOperation Build(string targetCollectionId, BsonDo options ??= new FindOneAndReplaceOptions<BsonDocument>(); options.Let = argument.Value.AsBsonDocument; break; +#pragma warning disable CS0618 // Type or member is obsolete + case "maxTimeMS": + options ??= new FindOneAndReplaceOptions<BsonDocument>(); + options.MaxTime = TimeSpan.FromMilliseconds(argument.Value.AsInt32); + break; +#pragma warning restore CS0618 // Type or member is obsolete case "replacement": replacement = argument.Value.AsBsonDocument; break; @@ -126,6 +136,10 @@ public UnifiedFindOneAndReplaceOperation Build(string targetCollectionId, BsonDo options ??= new FindOneAndReplaceOptions<BsonDocument>(); options.IsUpsert = argument.Value.AsBoolean; break; + case "timeoutMS": + options ??= new FindOneAndReplaceOptions<BsonDocument>(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid FindOneAndReplaceOperation argument name: '{argument.Name}'."); } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedFindOneAndUpdateOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedFindOneAndUpdateOperation.cs index c2a4dcb9516..661852518c8 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedFindOneAndUpdateOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedFindOneAndUpdateOperation.cs @@ -98,6 +98,10 @@ public UnifiedFindOneAndUpdateOperation Build(string targetCollectionId, BsonDoc { switch (argument.Name) { + case "bypassDocumentValidation": + options ??= new(); + options.BypassDocumentValidation = argument.Value.AsBoolean; + break; case "comment": options ??= new FindOneAndUpdateOptions<BsonDocument>(); options.Comment = argument.Value; @@ -113,6 +117,12 @@ public UnifiedFindOneAndUpdateOperation Build(string targetCollectionId, BsonDoc options ??= new FindOneAndUpdateOptions<BsonDocument>(); options.Let = argument.Value.AsBsonDocument; break; +#pragma warning disable CS0618 // Type or member is obsolete + case "maxTimeMS": + options ??= new FindOneAndUpdateOptions<BsonDocument>(); + options.MaxTime = TimeSpan.FromMilliseconds(argument.Value.AsInt32); + break; +#pragma warning restore CS0618 // Type or member is obsolete case "returnDocument": options ??= new FindOneAndUpdateOptions<BsonDocument>(); options.ReturnDocument = (ReturnDocument)Enum.Parse(typeof(ReturnDocument), argument.Value.AsString); @@ -124,6 +134,10 @@ public UnifiedFindOneAndUpdateOperation Build(string targetCollectionId, BsonDoc options ??= new FindOneAndUpdateOptions<BsonDocument>(); options.Sort = new BsonDocumentSortDefinition<BsonDocument>(argument.Value.AsBsonDocument); break; + case "timeoutMS": + options ??= new FindOneAndUpdateOptions<BsonDocument>(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; case "update": switch (argument.Value) { diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedFindOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedFindOperation.cs index d4777a1c7b6..76189268c46 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedFindOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedFindOperation.cs @@ -159,6 +159,10 @@ public UnifiedFindOperation Build(string targetCollectionId, BsonDocument argume options ??= new FindOptions<BsonDocument>(); options.Sort = new BsonDocumentSortDefinition<BsonDocument>(argument.Value.AsBsonDocument); break; + case "timeoutMS": + options ??= new FindOptions<BsonDocument>(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid FindOperation argument name: '{argument.Name}'."); } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedInsertManyOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedInsertManyOperation.cs index 64694909694..d0596052b1f 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedInsertManyOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedInsertManyOperation.cs @@ -105,6 +105,10 @@ public UnifiedInsertManyOperation Build(string targetCollectionId, BsonDocument { switch (argument.Name) { + case "bypassDocumentValidation": + options ??= new(); + options.BypassDocumentValidation = argument.Value.AsBoolean; + break; case "comment": options ??= new InsertManyOptions(); options.Comment = argument.Value; @@ -113,12 +117,16 @@ public UnifiedInsertManyOperation Build(string targetCollectionId, BsonDocument documents = argument.Value.AsBsonArray.Cast<BsonDocument>().ToList(); break; case "ordered": - options = options ?? new InsertManyOptions(); + options ??= new InsertManyOptions(); options.IsOrdered = argument.Value.AsBoolean; break; case "session": session = _entityMap.Sessions[argument.Value.AsString]; break; + case "timeoutMS": + options ??= new InsertManyOptions(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid InsertManyOperation argument name: '{argument.Name}'."); } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedInsertOneOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedInsertOneOperation.cs index 3466c3f8062..a9619aa6f18 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedInsertOneOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedInsertOneOperation.cs @@ -118,6 +118,10 @@ public UnifiedInsertOneOperation Build(string targetCollectionId, BsonDocument a var sessionId = argument.Value.AsString; session = _entityMap.Sessions[sessionId]; break; + case "timeoutMS": + options ??= new InsertOneOptions(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid InsertOneOperation argument name: '{argument.Name}'."); } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListCollectionNamesOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListCollectionNamesOperation.cs index 2de6bb5f7d9..c385a676969 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListCollectionNamesOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListCollectionNamesOperation.cs @@ -86,8 +86,7 @@ public UnifiedListCollectionNamesOperationBuilder(UnifiedEntityMap entityMap) public UnifiedListCollectionNamesOperation Build(string targetDatabaseId, BsonDocument arguments) { var database = _entityMap.Databases[targetDatabaseId]; - - var listCollectionsOptions = new ListCollectionNamesOptions(); + ListCollectionNamesOptions options = null; IClientSessionHandle session = null; if (arguments != null) @@ -97,18 +96,23 @@ public UnifiedListCollectionNamesOperation Build(string targetDatabaseId, BsonDo switch (argument.Name) { case "filter": - listCollectionsOptions.Filter = argument.Value.AsBsonDocument; + options ??= new ListCollectionNamesOptions(); + options.Filter = argument.Value.AsBsonDocument; break; case "session": session = _entityMap.Sessions[argument.Value.AsString]; break; + case "timeoutMS": + options ??= new ListCollectionNamesOptions(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid {nameof(UnifiedListCollectionNamesOperation)} argument name: '{argument.Name}'."); } } } - return new UnifiedListCollectionNamesOperation(database, listCollectionsOptions, session); + return new UnifiedListCollectionNamesOperation(database, options, session); } } } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListCollectionsOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListCollectionsOperation.cs index 922c9b16e1a..3e032186d40 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListCollectionsOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListCollectionsOperation.cs @@ -1,4 +1,4 @@ -/* Copyright 2021-present MongoDB Inc. +/* Copyright 2010-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -86,8 +86,7 @@ public UnifiedListCollectionsOperationBuilder(UnifiedEntityMap entityMap) public UnifiedListCollectionsOperation Build(string targetDatabaseId, BsonDocument arguments) { var database = _entityMap.Databases[targetDatabaseId]; - - var listCollectionsOptions = new ListCollectionsOptions(); + ListCollectionsOptions options = null; IClientSessionHandle session = null; if (arguments != null) @@ -97,21 +96,27 @@ public UnifiedListCollectionsOperation Build(string targetDatabaseId, BsonDocume switch (argument.Name) { case "filter": - listCollectionsOptions.Filter = argument.Value.AsBsonDocument; + options ??= new ListCollectionsOptions(); + options.Filter = argument.Value.AsBsonDocument; break; case "batchSize": - listCollectionsOptions.BatchSize = argument.Value.ToInt32(); + options ??= new ListCollectionsOptions(); + options.BatchSize = argument.Value.ToInt32(); break; case "session": session = _entityMap.Sessions[argument.Value.AsString]; break; + case "timeoutMS": + options ??= new ListCollectionsOptions(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid {nameof(UnifiedListCollectionsOperation)} argument name: '{argument.Name}'."); } } } - return new UnifiedListCollectionsOperation(database, listCollectionsOptions, session); + return new UnifiedListCollectionsOperation(database, options, session); } } } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListDatabaseNamesOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListDatabaseNamesOperation.cs index e6262d71142..21943c36d19 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListDatabaseNamesOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListDatabaseNamesOperation.cs @@ -101,6 +101,10 @@ public UnifiedListDatabaseNamesOperation Build(string targetClientId, BsonDocume case "session": session = _entityMap.Sessions[argument.Value.AsString]; break; + case "timeoutMS": + options ??= new ListDatabaseNamesOptions(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid ListDatabasesOperation argument name: '{argument.Name}'."); } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListDatabasesOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListDatabasesOperation.cs index d64cd4ae204..e511044d6dd 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListDatabasesOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListDatabasesOperation.cs @@ -101,6 +101,10 @@ public UnifiedListDatabasesOperation Build(string targetClientId, BsonDocument a case "session": session = _entityMap.Sessions[argument.Value.AsString]; break; + case "timeoutMS": + options ??= new ListDatabasesOptions(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid ListDatabasesOperation argument name: '{argument.Name}'."); } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListIndexesOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListIndexesOperation.cs index 16665b6e994..896ab3a13c2 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListIndexesOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedListIndexesOperation.cs @@ -86,8 +86,7 @@ public UnifiedListIndexesOperationBuilder(UnifiedEntityMap entityMap) public UnifiedListIndexesOperation Build(string targetCollectionId, BsonDocument arguments) { var collection = _entityMap.Collections[targetCollectionId]; - - var listIndexesOptions = new ListIndexesOptions(); + ListIndexesOptions options = null; IClientSessionHandle session = null; if (arguments != null) @@ -97,18 +96,23 @@ public UnifiedListIndexesOperation Build(string targetCollectionId, BsonDocument switch (argument.Name) { case "batchSize": - listIndexesOptions.BatchSize = argument.Value.ToInt32(); + options ??= new ListIndexesOptions(); + options.BatchSize = argument.Value.ToInt32(); break; case "session": session = _entityMap.Sessions[argument.Value.AsString]; break; + case "timeoutMS": + options ??= new ListIndexesOptions(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; default: throw new FormatException($"Invalid {nameof(UnifiedListIndexesOperation)} argument name: '{argument.Name}'."); } } } - return new UnifiedListIndexesOperation(collection, listIndexesOptions, session); + return new UnifiedListIndexesOperation(collection, options, session); } } } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedReplaceOneOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedReplaceOneOperation.cs index c7c503f5f2c..3ca959f2cc4 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedReplaceOneOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedReplaceOneOperation.cs @@ -97,6 +97,10 @@ public UnifiedReplaceOneOperation Build(string targetCollectionId, BsonDocument { switch (argument.Name) { + case "bypassDocumentValidation": + options ??= new(); + options.BypassDocumentValidation = argument.Value.AsBoolean; + break; case "comment": options ??= new ReplaceOptions<BsonDocument>(); options.Comment = argument.Value; @@ -122,6 +126,10 @@ public UnifiedReplaceOneOperation Build(string targetCollectionId, BsonDocument options ??= new ReplaceOptions<BsonDocument>(); options.Sort = argument.Value.AsBsonDocument; break; + case "timeoutMS": + options ??= new ReplaceOptions<BsonDocument>(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; case "upsert": options ??= new ReplaceOptions<BsonDocument>(); options.IsUpsert = argument.Value.AsBoolean; diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedTargetedFailPointOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedTargetedFailPointOperation.cs index a1ff0e272c3..97463a719b0 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedTargetedFailPointOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedTargetedFailPointOperation.cs @@ -14,7 +14,6 @@ */ using System; -using System.Threading; using MongoDB.Bson; using MongoDB.Driver.Core.Bindings; using MongoDB.Driver.Core.Clusters.ServerSelectors; @@ -53,7 +52,7 @@ public void Execute() _entityMap.RegisterForDispose(client); var cluster = client.GetClusterInternal(); - var server = cluster.SelectServer(new EndPointServerSelector(pinnedServer), CancellationToken.None); + var server = cluster.SelectServer(OperationContext.NoTimeout, new EndPointServerSelector(pinnedServer)); var session = NoCoreSession.NewHandle(); diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedTestRunner.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedTestRunner.cs index 1f95953b77d..3b4625bc4f1 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedTestRunner.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedTestRunner.cs @@ -108,7 +108,7 @@ public void Run( var schemaSemanticVersion = SemanticVersion.Parse(schemaVersion); if (schemaSemanticVersion < new SemanticVersion(1, 0, 0) || - schemaSemanticVersion > new SemanticVersion(1, 22, 0)) + schemaSemanticVersion > new SemanticVersion(1, 26, 0)) { throw new FormatException($"Schema version '{schemaVersion}' is not supported."); } @@ -125,14 +125,7 @@ public void Run( throw new SkipException($"Test skipped because '{skipReason}'."); } - // should skip on KillOpenTransactions for Atlas Data Lake tests. - // https://github.com/mongodb/specifications/blob/80f88d0af6e47407c03874512e0d9b73708edad5/source/atlas-data-lake-testing/tests/README.md?plain=1#L23 - if (string.IsNullOrEmpty(Environment.GetEnvironmentVariable("ATLAS_DATA_LAKE_TESTS_ENABLED"))) - { - KillOpenTransactions(DriverTestConfiguration.Client); - } - - BsonDocument lastKnownClusterTime = AddInitialData(DriverTestConfiguration.Client, initialData); + var lastKnownClusterTime = AddInitialData(DriverTestConfiguration.Client, initialData); _entityMap = UnifiedEntityMap.Create(_eventFormatters, _loggingService.LoggingSettings, async, lastKnownClusterTime); _entityMap.AddRange(entities); @@ -193,10 +186,32 @@ private BsonDocument AddInitialData(IMongoClient client, BsonArray initialData) var database = client.GetDatabase(databaseName).WithWriteConcern(WriteConcern.WMajority); + var createCollectionOptions = new CreateCollectionOptions(); + if (dataItem.AsBsonDocument.Contains("createOptions")) + { + var options = dataItem.AsBsonDocument["createOptions"].AsBsonDocument; + foreach (var option in options) + { + switch (option.Name) + { + case "encryptedFields": + createCollectionOptions.EncryptedFields = option.Value.AsBsonDocument; + break; + default: + throw new FormatException($"Invalid createOptions argument name: '{option.Name}'."); + } + } + } + _logger.LogDebug("Dropping {0}", collectionName); using var session = client.StartSession(); - database.DropCollection(session, collectionName); - database.CreateCollection(session, collectionName); + + // For some QE spec tests we need to drop QE state collections (enxcol_.*.esc, enxcol_.*.ecoc). + // DropCollection with EncryptedFields automatically handles cleanup of those QE state collections + database.DropCollection(session, collectionName, new DropCollectionOptions { EncryptedFields = createCollectionOptions.EncryptedFields }); + + database.CreateCollection(session, collectionName, createCollectionOptions); + if (documents.Any()) { var collection = database.GetCollection<BsonDocument>(collectionName); @@ -334,7 +349,7 @@ private void AssertResult(OperationResult actualResult, BsonDocument operation, { if (actualResult.Result != null) { - entityMap.Resutls.Add(saveResultAsEntity.AsString, actualResult.Result); + entityMap.Results.Add(saveResultAsEntity.AsString, actualResult.Result); } else if (actualResult.ChangeStream != null) { @@ -378,9 +393,7 @@ private void KillOpenTransactions(IMongoClient client) // SERVER-38335 serverVersion < new SemanticVersion(4, 1, 9) && ex.Code == (int)ServerErrorCode.Interrupted || // SERVER-54216 - ex.Code == (int)ServerErrorCode.Unauthorized || - // Serverless has a different code for Unauthorized error - ex.Code == (int)ServerErrorCode.UnauthorizedServerless) + ex.Code == (int)ServerErrorCode.Unauthorized) { // ignore errors } diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedUpdateManyOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedUpdateManyOperation.cs index 5fc30a8b2e0..5d78fd9c7f3 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedUpdateManyOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedUpdateManyOperation.cs @@ -112,6 +112,10 @@ public UnifiedUpdateManyOperation Build(string targetCollectionId, BsonDocument { switch (argument.Name) { + case "bypassDocumentValidation": + options ??= new(); + options.BypassDocumentValidation = argument.Value.AsBoolean; + break; case "comment": options ??= new UpdateOptions(); options.Comment = argument.Value; @@ -130,6 +134,10 @@ public UnifiedUpdateManyOperation Build(string targetCollectionId, BsonDocument case "session": session = _entityMap.Sessions[argument.Value.AsString]; break; + case "timeoutMS": + options ??= new UpdateOptions(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; case "update": switch (argument.Value) { diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedUpdateOneOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedUpdateOneOperation.cs index 6daa2089b19..151b335f7ad 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedUpdateOneOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedUpdateOneOperation.cs @@ -112,6 +112,10 @@ public UnifiedUpdateOneOperation Build(string targetCollectionId, BsonDocument a { switch (argument.Name) { + case "bypassDocumentValidation": + options ??= new(); + options.BypassDocumentValidation = argument.Value.AsBoolean; + break; case "comment": options ??= new UpdateOptions<BsonDocument>(); options.Comment = argument.Value; @@ -134,6 +138,10 @@ public UnifiedUpdateOneOperation Build(string targetCollectionId, BsonDocument a options ??= new UpdateOptions<BsonDocument>(); options.Sort = argument.Value.AsBsonDocument; break; + case "timeoutMS": + options ??= new UpdateOptions<BsonDocument>(); + options.Timeout = UnifiedEntityMap.ParseTimeout(argument.Value); + break; case "upsert": options ??= new UpdateOptions<BsonDocument>(); options.IsUpsert = argument.Value.AsBoolean; diff --git a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedWithTransactionOperation.cs b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedWithTransactionOperation.cs index 67a673c61c1..6a40b5d650e 100644 --- a/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedWithTransactionOperation.cs +++ b/tests/MongoDB.Driver.Tests/UnifiedTestOperations/UnifiedWithTransactionOperation.cs @@ -105,7 +105,11 @@ public UnifiedWithTransactionOperation Build(string targetSessionId, BsonDocumen var session = _entityMap.Sessions[targetSessionId]; BsonArray operations = null; - TransactionOptions options = null; + TimeSpan? maxCommitTime = null; + ReadConcern readConcern = null; + ReadPreference readPreference = null; + TimeSpan? timeout = null; + WriteConcern writeConcern = null; foreach (var argument in arguments) { @@ -115,26 +119,31 @@ public UnifiedWithTransactionOperation Build(string targetSessionId, BsonDocumen operations = argument.Value.AsBsonArray; break; case "maxCommitTimeMS": - options = options ?? new TransactionOptions(); - options = options.With(maxCommitTime: TimeSpan.FromMilliseconds(argument.Value.AsInt32)); + maxCommitTime = TimeSpan.FromMilliseconds(argument.Value.AsInt32); break; case "readConcern": - options = options ?? new TransactionOptions(); - options = options.With(readConcern: ReadConcern.FromBsonDocument(argument.Value.AsBsonDocument)); + readConcern = ReadConcern.FromBsonDocument(argument.Value.AsBsonDocument); break; case "readPreference": - options = options ?? new TransactionOptions(); - options = options.With(readPreference: ReadPreference.FromBsonDocument(argument.Value.AsBsonDocument)); + readPreference = ReadPreference.FromBsonDocument(argument.Value.AsBsonDocument); + break; + case "timeoutMS": + timeout = UnifiedEntityMap.ParseTimeout(argument.Value); break; case "writeConcern": - options = options ?? new TransactionOptions(); - options = options.With(writeConcern: UnifiedEntityMap.ParseWriteConcern(argument.Value.AsBsonDocument)); + writeConcern = UnifiedEntityMap.ParseWriteConcern(argument.Value.AsBsonDocument); break; default: throw new FormatException($"Invalid WithTransactionOperation argument name: '{argument.Name}'."); } } + TransactionOptions options = null; + if (maxCommitTime.HasValue || readConcern != null || readPreference != null || timeout.HasValue || writeConcern != null) + { + options = new TransactionOptions(timeout, readConcern, readPreference, writeConcern, maxCommitTime); + } + return new UnifiedWithTransactionOperation(session, operations, options); } } diff --git a/tests/MongoDB.Driver.Tests/X509Tests.cs b/tests/MongoDB.Driver.Tests/X509Tests.cs new file mode 100644 index 00000000000..a948204ae6f --- /dev/null +++ b/tests/MongoDB.Driver.Tests/X509Tests.cs @@ -0,0 +1,125 @@ +/* Copyright 2010-present MongoDB Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +using System; +using System.Security.Cryptography.X509Certificates; +using FluentAssertions; +using MongoDB.Driver.Core.TestHelpers.XunitExtensions; +using MongoDB.TestHelpers.XunitExtensions; +using Xunit; + +namespace MongoDB.Driver.Tests; + +[Trait("Category", "Integration")] +[Trait("Category", "X509")] +public class X509Tests +{ + const string MONGODB_X509_CLIENT_CERTIFICATE_PATH = "MONGO_X509_CLIENT_CERTIFICATE_PATH"; + const string MONGODB_X509_CLIENT_CERTIFICATE_PASSWORD = "MONGO_X509_CLIENT_CERTIFICATE_PASSWORD"; + + const string MONGO_X509_CLIENT_NO_USER_CERTIFICATE_PATH = "MONGO_X509_CLIENT_NO_USER_CERTIFICATE_PATH"; + const string MONGO_X509_CLIENT_NO_USER_CERTIFICATE_PASSWORD = "MONGO_X509_CLIENT_NO_USER_CERTIFICATE_PASSWORD"; + + [Fact] + public void Authentication_succeeds_with_MONGODB_X509_mechanism() + { + var clientCertificate = GetClientCertificate(CertificateType.MONGO_X509); + + var settings = DriverTestConfiguration.GetClientSettings(); + settings.SslSettings.ClientCertificates = [clientCertificate]; + + AssertAuthenticationSucceeds(settings); + } + + [Fact] + public void Authentication_fails_with_MONGODB_X509_mechanism_when_username_is_wrong() + { + var clientCertificate = GetClientCertificate(CertificateType.MONGO_X509); + + var settings = DriverTestConfiguration.GetClientSettings(); + settings.Credential = MongoCredential.CreateMongoX509Credential("wrong_username"); + settings.SslSettings.ClientCertificates = [clientCertificate]; + + AssertAuthenticationFails(settings); + } + + [Fact] + public void Authentication_fails_with_MONGODB_X509_mechanism_when_user_is_not_in_database() + { + var noUserClientCertificate = GetClientCertificate(CertificateType.MONGO_X509_CLIENT_NO_USER); + + var settings = DriverTestConfiguration.GetClientSettings(); + settings.SslSettings.ClientCertificates = [noUserClientCertificate]; + + AssertAuthenticationFails(settings, "Could not find user"); + } + + private void AssertAuthenticationSucceeds(MongoClientSettings settings) + { + using var client = DriverTestConfiguration.CreateMongoClient(settings); + _ = client.ListDatabaseNames().ToList(); + } + + private void AssertAuthenticationFails(MongoClientSettings settings, string innerExceptionMessage = null) + { + using var client = DriverTestConfiguration.CreateMongoClient(settings); + var exception = Record.Exception(() => client.ListDatabaseNames().ToList()); + exception.Should().BeOfType<MongoAuthenticationException>(); + + if (innerExceptionMessage != null) + { + var innerException = exception.InnerException; + innerException.Should().BeOfType<MongoCommandException>(); + innerException.Message.Should().Contain(innerExceptionMessage); + } + } + + private enum CertificateType + { + MONGO_X509, + MONGO_X509_CLIENT_NO_USER + } + + private X509Certificate2 GetClientCertificate(CertificateType certificateType) + { + RequireServer.Check().Tls(required: true); + + string pathVariable = null; + string passwordVariable = null; + + switch (certificateType) + { + case CertificateType.MONGO_X509: + pathVariable = MONGODB_X509_CLIENT_CERTIFICATE_PATH; + passwordVariable = MONGODB_X509_CLIENT_CERTIFICATE_PASSWORD; + break; + case CertificateType.MONGO_X509_CLIENT_NO_USER: + pathVariable = MONGO_X509_CLIENT_NO_USER_CERTIFICATE_PATH; + passwordVariable = MONGO_X509_CLIENT_NO_USER_CERTIFICATE_PASSWORD; + break; + default: + throw new ArgumentException("Wrong certificate type specified.", nameof(certificateType)); + } + + RequireEnvironment.Check() + .EnvironmentVariable(pathVariable, isDefined: true) + .EnvironmentVariable(passwordVariable, isDefined: true); + + var path = Environment.GetEnvironmentVariable(pathVariable); + var password = Environment.GetEnvironmentVariable(passwordVariable); + + return new X509Certificate2(path, password); + } +} \ No newline at end of file diff --git a/tests/MongoDB.TestHelpers/MongoDB.TestHelpers.csproj b/tests/MongoDB.TestHelpers/MongoDB.TestHelpers.csproj index 9c2d724e529..f26db14e0fb 100644 --- a/tests/MongoDB.TestHelpers/MongoDB.TestHelpers.csproj +++ b/tests/MongoDB.TestHelpers/MongoDB.TestHelpers.csproj @@ -2,6 +2,7 @@ <Import Project="..\BuildProps\Tests.Build.props" /> <PropertyGroup> + <IsTestProject>false</IsTestProject> <CodeAnalysisRuleSet>..\..\MongoDBLegacyTest.ruleset</CodeAnalysisRuleSet> </PropertyGroup> diff --git a/tests/MongoDB.TestHelpers/XunitExtensions/IntegrationTestAttribute.cs b/tests/MongoDB.TestHelpers/XunitExtensions/IntegrationTestAttribute.cs deleted file mode 100644 index 06b49d22336..00000000000 --- a/tests/MongoDB.TestHelpers/XunitExtensions/IntegrationTestAttribute.cs +++ /dev/null @@ -1,25 +0,0 @@ -/* Copyright 2010-present MongoDB Inc. -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -namespace MongoDB.TestHelpers.XunitExtensions -{ - public sealed class IntegrationTestAttribute : CategoryAttribute - { - public IntegrationTestAttribute() - : base("Integration") - { - } - } -} diff --git a/tests/MongoDB.TestHelpers/XunitExtensions/ValuesAttribute.cs b/tests/MongoDB.TestHelpers/XunitExtensions/ValuesAttribute.cs index 18a949b0eed..b39b3b0c56d 100644 --- a/tests/MongoDB.TestHelpers/XunitExtensions/ValuesAttribute.cs +++ b/tests/MongoDB.TestHelpers/XunitExtensions/ValuesAttribute.cs @@ -30,6 +30,11 @@ public ValuesAttribute(params object[] values) public object[] GenerateValues() { + if (_values.Distinct().Count() != _values.Length) + { + throw new InvalidOperationException(); + } + return _values; } } diff --git a/tests/SmokeTests/MongoDB.Driver.SmokeTests.Sdk/LibmongocryptTests.cs b/tests/SmokeTests/MongoDB.Driver.SmokeTests.Sdk/LibmongocryptTests.cs index 627a5ea99d7..db3edd2394d 100644 --- a/tests/SmokeTests/MongoDB.Driver.SmokeTests.Sdk/LibmongocryptTests.cs +++ b/tests/SmokeTests/MongoDB.Driver.SmokeTests.Sdk/LibmongocryptTests.cs @@ -15,16 +15,20 @@ using System; using System.Collections.Generic; +using System.Runtime.InteropServices; using System.Threading; +using FluentAssertions; using Microsoft.Extensions.Logging; using MongoDB.Bson; using MongoDB.Driver.Core.Configuration; +using MongoDB.Driver.Core.Misc; using MongoDB.Driver.Encryption; using Xunit; using Xunit.Abstractions; namespace MongoDB.Driver.SmokeTests.Sdk { + [Trait("Category", "Integration")] public class LibmongocryptTests { private const string LocalMasterKey = "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk"; @@ -111,6 +115,21 @@ public void Explicit_encryption_with_libmongocrypt_package_works() var result = collection.Find(FilterDefinition<BsonDocument>.Empty).First(); _output.WriteLine(result.ToJson()); } + catch (Exception ex) + { + // SERVER-106469 +#pragma warning disable CS0618 // Type or member is obsolete + var serverVersion = client.Cluster.Description.Servers[0].Version; +#pragma warning restore CS0618 // Type or member is obsolete + if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows) && + serverVersion >= new SemanticVersion(8, 1, 9999)) + { + ex.Should().BeOfType<MongoEncryptionException>(); + return; + } + + throw; + } finally { ClusterRegistry.Instance.UnregisterAndDisposeCluster(client.Cluster); diff --git a/tests/SmokeTests/MongoDB.Driver.SmokeTests.Sdk/LoggingTests.cs b/tests/SmokeTests/MongoDB.Driver.SmokeTests.Sdk/LoggingTests.cs index a51cfd21a6e..21e78fea664 100644 --- a/tests/SmokeTests/MongoDB.Driver.SmokeTests.Sdk/LoggingTests.cs +++ b/tests/SmokeTests/MongoDB.Driver.SmokeTests.Sdk/LoggingTests.cs @@ -23,6 +23,7 @@ namespace MongoDB.Driver.SmokeTests.Sdk { + [Trait("Category", "Integration")] public sealed class LoggingTests { private readonly ITestOutputHelper _output;