diff --git a/bson/src/main/org/bson/AbstractBsonWriter.java b/bson/src/main/org/bson/AbstractBsonWriter.java index a7cc978f8ba..8a8b238e8af 100644 --- a/bson/src/main/org/bson/AbstractBsonWriter.java +++ b/bson/src/main/org/bson/AbstractBsonWriter.java @@ -748,6 +748,15 @@ protected void throwInvalidState(final String methodName, final State... validSt methodName, validStatesString, state)); } + /** + * {@inheritDoc} + *

+ * The {@link #flush()} method of {@link AbstractBsonWriter} does nothing.

+ */ + @Override + public void flush() { + } + @Override public void close() { closed = true; diff --git a/bson/src/main/org/bson/BSONCallbackAdapter.java b/bson/src/main/org/bson/BSONCallbackAdapter.java index d00a2eaecbf..1d8b5ffe746 100644 --- a/bson/src/main/org/bson/BSONCallbackAdapter.java +++ b/bson/src/main/org/bson/BSONCallbackAdapter.java @@ -39,11 +39,6 @@ protected BSONCallbackAdapter(final BsonWriterSettings settings, final BSONCallb this.bsonCallback = bsonCallback; } - @Override - public void flush() { - //Looks like should be no-op? - } - @Override public void doWriteStartDocument() { BsonContextType contextType = getState() == State.SCOPE_DOCUMENT diff --git a/bson/src/main/org/bson/BsonBinaryWriter.java b/bson/src/main/org/bson/BsonBinaryWriter.java index d9301fd5cb3..e6255ea8478 100644 --- a/bson/src/main/org/bson/BsonBinaryWriter.java +++ b/bson/src/main/org/bson/BsonBinaryWriter.java @@ -108,10 +108,6 @@ public BsonBinaryWriterSettings getBinaryWriterSettings() { return binaryWriterSettings; } - @Override - public void flush() { - } - @Override protected Context getContext() { return (Context) super.getContext(); diff --git a/bson/src/main/org/bson/BsonDocumentWriter.java b/bson/src/main/org/bson/BsonDocumentWriter.java index 7c36a368336..a34188645cd 100644 --- a/bson/src/main/org/bson/BsonDocumentWriter.java +++ b/bson/src/main/org/bson/BsonDocumentWriter.java @@ -194,10 +194,6 @@ public void doWriteUndefined() { write(new BsonUndefined()); } - @Override - public void flush() { - } - @Override protected Context getContext() { return (Context) super.getContext(); diff --git a/bson/src/main/org/bson/io/OutputBuffer.java b/bson/src/main/org/bson/io/OutputBuffer.java index 8793acad9a2..00f88cea706 100644 --- a/bson/src/main/org/bson/io/OutputBuffer.java +++ b/bson/src/main/org/bson/io/OutputBuffer.java @@ -41,6 +41,16 @@ public void write(final byte[] b) { public void close() { } + /** + * {@inheritDoc} + *

+ * The {@link #flush()} method of {@link OutputBuffer} does nothing.

+ */ + @Override + public void flush() throws IOException { + super.flush(); + } + @Override public void write(final byte[] bytes, final int offset, final int length) { writeBytes(bytes, offset, length); diff --git a/build.gradle b/build.gradle index e846ea53d93..df2f70c49de 100644 --- a/build.gradle +++ b/build.gradle @@ -59,6 +59,7 @@ ext { junitBomVersion = '5.10.2' logbackVersion = '1.3.14' graalSdkVersion = '24.0.0' + reflectionsVersion = '0.9.10' gitVersion = getGitVersion() } @@ -128,7 +129,7 @@ configure(scalaProjects) { testImplementation('org.scalatestplus:junit-4-13_%%:3.2.9.0') testImplementation('org.scalatestplus:mockito-3-12_%%:3.2.10.0') testImplementation("ch.qos.logback:logback-classic:$logbackVersion") - testImplementation('org.reflections:reflections:0.9.10') + testImplementation("org.reflections:reflections:$reflectionsVersion") } test{ diff --git a/config/detekt/detekt.yml b/config/detekt/detekt.yml index 4c083b0bce5..4ac460b0738 100644 --- a/config/detekt/detekt.yml +++ b/config/detekt/detekt.yml @@ -159,7 +159,7 @@ complexity: active: true excludes: ['**/test/**'] thresholdInFiles: 25 - thresholdInClasses: 25 + thresholdInClasses: 27 thresholdInInterfaces: 25 thresholdInObjects: 25 thresholdInEnums: 25 diff --git a/driver-core/build.gradle b/driver-core/build.gradle index 70061ca2b1e..a44f65bbc1b 100644 --- a/driver-core/build.gradle +++ b/driver-core/build.gradle @@ -60,6 +60,7 @@ dependencies { testImplementation project(':bson').sourceSets.test.output testImplementation('org.junit.jupiter:junit-jupiter-api') + testImplementation("org.reflections:reflections:$reflectionsVersion") testRuntimeOnly "io.netty:netty-tcnative-boringssl-static" classifiers.forEach { diff --git a/driver-core/src/main/com/mongodb/ClientBulkWriteException.java b/driver-core/src/main/com/mongodb/ClientBulkWriteException.java new file mode 100644 index 00000000000..fcc23b45f71 --- /dev/null +++ b/driver-core/src/main/com/mongodb/ClientBulkWriteException.java @@ -0,0 +1,154 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb; + +import com.mongodb.bulk.WriteConcernError; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.lang.Nullable; + +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.operation.ClientBulkWriteOperation.Exceptions.serverAddressFromException; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.unmodifiableList; +import static java.util.Collections.unmodifiableMap; +import static java.util.Optional.ofNullable; + +/** + * The result of an unsuccessful or partially unsuccessful client-level bulk write operation. + * Note that the {@linkplain #getCode() code} and {@linkplain #getErrorLabels() labels} from this exception are not useful. + * An application should use those from the {@linkplain #getCause() top-level error}. + * + * @see ClientBulkWriteResult + * @since 5.3 + * @serial exclude + */ +public final class ClientBulkWriteException extends MongoServerException { + private static final long serialVersionUID = 1; + + private final List writeConcernErrors; + private final Map writeErrors; + @Nullable + private final ClientBulkWriteResult partialResult; + + /** + * Constructs a new instance. + * + * @param error The {@linkplain #getCause() top-level error}. + * @param writeConcernErrors The {@linkplain #getWriteConcernErrors() write concern errors}. + * @param writeErrors The {@linkplain #getWriteErrors() write errors}. + * @param partialResult The {@linkplain #getPartialResult() partial result}. + * @param serverAddress The {@linkplain MongoServerException#getServerAddress() server address}. + * If {@code error} is a {@link MongoServerException} or a {@link MongoSocketException}, then {@code serverAddress} + * must be equal to the {@link ServerAddress} they bear. + */ + public ClientBulkWriteException( + @Nullable final MongoException error, + @Nullable final List writeConcernErrors, + @Nullable final Map writeErrors, + @Nullable final ClientBulkWriteResult partialResult, + final ServerAddress serverAddress) { + super( + message( + error, writeConcernErrors, writeErrors, partialResult, + notNull("serverAddress", serverAddress)), + validateServerAddress(error, serverAddress)); + initCause(error); + isTrueArgument("At least one of `writeConcernErrors`, `writeErrors`, `partialResult` must be non-null or non-empty", + !(writeConcernErrors == null || writeConcernErrors.isEmpty()) + || !(writeErrors == null || writeErrors.isEmpty()) + || partialResult != null); + this.writeConcernErrors = writeConcernErrors == null ? emptyList() : unmodifiableList(writeConcernErrors); + this.writeErrors = writeErrors == null ? emptyMap() : unmodifiableMap(writeErrors); + this.partialResult = partialResult; + } + + private static String message( + @Nullable final MongoException error, + @Nullable final List writeConcernErrors, + @Nullable final Map writeErrors, + @Nullable final ClientBulkWriteResult partialResult, + final ServerAddress serverAddress) { + return "Client-level bulk write operation error on server " + serverAddress + "." + + (error == null ? "" : " Top-level error: " + error + ".") + + (writeErrors == null || writeErrors.isEmpty() ? "" : " Write errors: " + writeErrors + ".") + + (writeConcernErrors == null || writeConcernErrors.isEmpty() ? "" : " Write concern errors: " + writeConcernErrors + ".") + + (partialResult == null ? "" : " Partial result: " + partialResult + "."); + } + + private static ServerAddress validateServerAddress(@Nullable final MongoException error, final ServerAddress serverAddress) { + serverAddressFromException(error).ifPresent(serverAddressFromError -> + isTrueArgument("`serverAddress` must be equal to that of the `error`", serverAddressFromError.equals(serverAddress))); + return error instanceof MongoServerException + ? ((MongoServerException) error).getServerAddress() + : serverAddress; + } + + /** + * The top-level error. That is an error that is neither a {@linkplain #getWriteConcernErrors() write concern error}, + * nor is an {@linkplain #getWriteErrors() error of an individual write operation}. + * + * @return The top-level error. Non-{@code null} only if a top-level error occurred. + */ + @Override + @Nullable + public MongoException getCause() { + return (MongoException) super.getCause(); + } + + /** + * The {@link WriteConcernError}s that occurred while executing the client-level bulk write operation. + *

+ * There are no guarantees on mutability of the {@link List} returned.

+ * + * @return The {@link WriteConcernError}s. + */ + public List getWriteConcernErrors() { + return writeConcernErrors; + } + + /** + * The indexed {@link WriteError}s. + * The {@linkplain Map#keySet() keys} are the indexes of the corresponding {@link ClientNamespacedWriteModel}s + * in the corresponding client-level bulk write operation. + *

+ * There are no guarantees on mutability or iteration order of the {@link Map} returned.

+ * + * @return The indexed {@link WriteError}s. + * @see ClientBulkWriteResult.VerboseResults#getInsertResults() + * @see ClientBulkWriteResult.VerboseResults#getUpdateResults() + * @see ClientBulkWriteResult.VerboseResults#getDeleteResults() + */ + public Map getWriteErrors() { + return writeErrors; + } + + /** + * The result of the part of a client-level bulk write operation that is known to be successful. + * + * @return The successful partial result. {@linkplain Optional#isPresent() Present} only if the client received a response indicating success + * of at least one {@linkplain ClientNamespacedWriteModel individual write operation}. + */ + public Optional getPartialResult() { + return ofNullable(partialResult); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientDeleteOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientDeleteOptions.java new file mode 100644 index 00000000000..8c0a74406ef --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientDeleteOptions.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +/** + * The methods declared in this interface are part of the public API of subclasses or sub-interfaces. + */ +interface BaseClientDeleteOptions extends BaseClientWriteModelOptions { +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientUpdateOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientUpdateOptions.java new file mode 100644 index 00000000000..10b97e2f570 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientUpdateOptions.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * The methods declared in this interface are part of the public API of subclasses or sub-interfaces. + */ +interface BaseClientUpdateOptions extends BaseClientWriteModelOptions, BaseClientUpsertableWriteModelOptions { + + BaseClientUpdateOptions arrayFilters(@Nullable Iterable arrayFilters); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientUpsertableWriteModelOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientUpsertableWriteModelOptions.java new file mode 100644 index 00000000000..d26a96e1ba5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientUpsertableWriteModelOptions.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.lang.Nullable; + +/** + * The methods declared in this interface are part of the public API of subclasses or sub-interfaces. + */ +interface BaseClientUpsertableWriteModelOptions { + BaseClientUpsertableWriteModelOptions upsert(@Nullable Boolean upsert); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientWriteModelOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientWriteModelOptions.java new file mode 100644 index 00000000000..f7cd4e7a491 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientWriteModelOptions.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * The methods declared in this interface are part of the public API of subclasses or sub-interfaces. + */ +interface BaseClientWriteModelOptions { + BaseClientWriteModelOptions collation(@Nullable Collation collation); + + BaseClientWriteModelOptions hint(@Nullable Bson hint); + + BaseClientWriteModelOptions hintString(@Nullable String hintString); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientBulkWriteOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientBulkWriteOptions.java new file mode 100644 index 00000000000..942a37c43df --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientBulkWriteOptions.java @@ -0,0 +1,90 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Filters; +import com.mongodb.internal.client.model.bulk.ConcreteClientBulkWriteOptions; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +/** + * The options to apply when executing a client-level bulk write operation. + * + * @since 5.3 + */ +@Sealed +public interface ClientBulkWriteOptions { + /** + * Creates the default options. + * + * @return The default options. + */ + static ClientBulkWriteOptions clientBulkWriteOptions() { + return new ConcreteClientBulkWriteOptions(); + } + + /** + * Enables or disables ordered execution of {@linkplain ClientNamespacedWriteModel individual write operations}. + * In an ordered execution a failure of an individual operation prevents the rest of them + * from being executed. + * In an unordered execution failures of individual operations do not prevent the rest of them + * from being executed. + * + * @param ordered The ordered flag. If {@code null}, the client defaults to {@code true}. + * @return {@code this}. + */ + ClientBulkWriteOptions ordered(@Nullable Boolean ordered); + + /** + * Disables or enables checking against document validation rules, a.k.a., schema validation. + * + * @param bypassDocumentValidation The flag specifying whether to bypass the document validation rules. + * {@code null} represents the server default. + * @return {@code this}. + */ + ClientBulkWriteOptions bypassDocumentValidation(@Nullable Boolean bypassDocumentValidation); + + /** + * Sets variables that can be referenced from {@linkplain ClientNamespacedWriteModel individual write operations} + * with the {@code "$$"} syntax, which in turn requires using {@link Filters#expr(Object)} when specifying filters. + * Values must be constants or expressions that do not reference fields. + * + * @param let The variables. {@code null} represents the server default. + * @return {@code this}. + * @mongodb.driver.manual reference/aggregation-variables/ Variables in Aggregation Expressions + */ + ClientBulkWriteOptions let(@Nullable Bson let); + + /** + * Sets the comment to attach to the {@code bulkWrite} administration command. + * + * @param comment The comment. {@code null} represents the server default. + * @return {@code this}. + */ + ClientBulkWriteOptions comment(@Nullable BsonValue comment); + + /** + * Enables or disables requesting {@linkplain ClientBulkWriteResult#getVerboseResults() verbose results}. + * + * @param verboseResults The flag specifying whether to request verbose results. + * If {@code null}, the client defaults to {@code false}. + * This value corresponds inversely to the {@code errorsOnly} field of the {@code bulkWrite} administration command. + * @return {@code this}. + */ + ClientBulkWriteOptions verboseResults(@Nullable Boolean verboseResults); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientBulkWriteResult.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientBulkWriteResult.java new file mode 100644 index 00000000000..04257cb8460 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientBulkWriteResult.java @@ -0,0 +1,136 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.ClientBulkWriteException; +import com.mongodb.WriteConcern; +import com.mongodb.annotations.Evolving; + +import java.util.Map; +import java.util.Optional; + +/** + * The result of a successful or partially successful client-level bulk write operation. + * Note that if a client-level bulk write operation fails while some of the + * {@linkplain ClientNamespacedWriteModel individual write operations} are known to be successful, + * then the successful partial result is still accessible via {@link ClientBulkWriteException#getPartialResult()}. + * + * @see ClientBulkWriteException + * @since 5.3 + */ +@Evolving +public interface ClientBulkWriteResult { + /** + * Indicates whether this result was {@linkplain WriteConcern#isAcknowledged() acknowledged}. + * If not, then all other methods throw {@link UnsupportedOperationException}. + * + * @return Whether this result was acknowledged. + */ + boolean isAcknowledged(); + + /** + * The number of documents that were inserted across all insert operations. + * + * @return The number of documents that were inserted. + * @throws UnsupportedOperationException If this result is not {@linkplain #isAcknowledged() acknowledged}. + */ + long getInsertedCount(); + + /** + * The number of documents that were upserted across all update and replace operations. + * + * @return The number of documents that were upserted. + * @throws UnsupportedOperationException If this result is not {@linkplain #isAcknowledged() acknowledged}. + */ + long getUpsertedCount(); + + /** + * The number of documents that matched the filters across all operations with filters. + * + * @return The number of documents that were matched. + * @throws UnsupportedOperationException If this result is not {@linkplain #isAcknowledged() acknowledged}. + */ + long getMatchedCount(); + + /** + * The number of documents that were modified across all update and replace operations. + * + * @return The number of documents that were modified. + * @throws UnsupportedOperationException If this result is not {@linkplain #isAcknowledged() acknowledged}. + */ + long getModifiedCount(); + + /** + * The number of documents that were deleted across all delete operations. + * + * @return The number of documents that were deleted. + * @throws UnsupportedOperationException If this result is not {@linkplain #isAcknowledged() acknowledged}. + */ + long getDeletedCount(); + + /** + * The verbose results of individual operations. + * + * @return {@link Optional} verbose results of individual operations. + * @throws UnsupportedOperationException If this result is not {@linkplain #isAcknowledged() acknowledged}. + * @see ClientBulkWriteOptions#verboseResults(Boolean) + */ + Optional getVerboseResults(); + + /** + * The {@linkplain ClientBulkWriteResult#getVerboseResults() verbose results} of individual operations. + * + * @since 5.3 + */ + @Evolving + interface VerboseResults { + /** + * The indexed {@link ClientInsertOneResult}s. + * The {@linkplain Map#keySet() keys} are the indexes of the corresponding {@link ClientNamespacedWriteModel}s + * in the client-level bulk write operation. + *

+ * There are no guarantees on mutability or iteration order of the {@link Map} returned.

+ * + * @return The indexed {@link ClientInsertOneResult}s. + * @see ClientBulkWriteException#getWriteErrors() + */ + Map getInsertResults(); + + /** + * The indexed {@link ClientUpdateResult}s. + * The {@linkplain Map#keySet() keys} are the indexes of the corresponding {@link ClientNamespacedWriteModel}s + * in the client-level bulk write operation. + *

+ * There are no guarantees on mutability or iteration order of the {@link Map} returned.

+ * + * @return The indexed {@link ClientUpdateResult}s. + * @see ClientBulkWriteException#getWriteErrors() + */ + Map getUpdateResults(); + + /** + * The indexed {@link ClientDeleteResult}s. + * The {@linkplain Map#keySet() keys} are the indexes of the corresponding {@link ClientNamespacedWriteModel}s + * in the client-level bulk write operation. + *

+ * There are no guarantees on mutability or iteration order of the {@link Map} returned.

+ * + * @return The indexed {@link ClientDeleteResult}s. + * @see ClientBulkWriteException#getWriteErrors() + */ + Map getDeleteResults(); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientDeleteManyOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientDeleteManyOptions.java new file mode 100644 index 00000000000..f899c5244c3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientDeleteManyOptions.java @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.client.model.bulk.ConcreteClientDeleteManyOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * The options to apply when deleting documents. + * + * @since 5.3 + */ +@Sealed +public interface ClientDeleteManyOptions extends BaseClientDeleteOptions { + /** + * Creates the default options. + * + * @return The default options. + */ + static ClientDeleteManyOptions clientDeleteManyOptions() { + return new ConcreteClientDeleteManyOptions(); + } + + /** + * Sets the collation. + * + * @param collation The collation. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientDeleteManyOptions collation(@Nullable Collation collation); + + /** + * Sets the index specification, + * {@code null}-ifies {@linkplain #hintString(String) hint string}. + * + * @param hint The index specification. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientDeleteManyOptions hint(@Nullable Bson hint); + + /** + * Sets the index name, + * {@code null}-ifies {@linkplain #hint(Bson) hint}. + * + * @param hintString The index name. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientDeleteManyOptions hintString(@Nullable String hintString); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientDeleteOneOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientDeleteOneOptions.java new file mode 100644 index 00000000000..0c515c7960b --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientDeleteOneOptions.java @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.client.model.bulk.ConcreteClientDeleteOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * The options to apply when deleting a document. + * + * @since 5.3 + */ +@Sealed +public interface ClientDeleteOneOptions extends BaseClientDeleteOptions { + /** + * Creates the default options. + * + * @return The default options. + */ + static ClientDeleteOneOptions clientDeleteOneOptions() { + return new ConcreteClientDeleteOneOptions(); + } + + /** + * Sets the collation. + * + * @param collation The collation. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientDeleteOneOptions collation(@Nullable Collation collation); + + /** + * Sets the index specification, + * {@code null}-ifies {@linkplain #hintString(String) hint string}. + * + * @param hint The index specification. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientDeleteOneOptions hint(@Nullable Bson hint); + + /** + * Sets the index name, + * {@code null}-ifies {@linkplain #hint(Bson) hint}. + * + * @param hintString The index name. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientDeleteOneOptions hintString(@Nullable String hintString); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientDeleteResult.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientDeleteResult.java new file mode 100644 index 00000000000..fcf66488114 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientDeleteResult.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Evolving; +import com.mongodb.bulk.WriteConcernError; + +/** + * The result of a successful {@linkplain ClientNamespacedWriteModel individual delete operation}. + * Note that {@link WriteConcernError}s are not considered as making individual operations unsuccessful. + * + * @since 5.3 + */ +@Evolving +public interface ClientDeleteResult { + /** + * The number of documents that were deleted. + * + * @return The number of documents that were deleted. + */ + long getDeletedCount(); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientInsertOneResult.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientInsertOneResult.java new file mode 100644 index 00000000000..960078c6be5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientInsertOneResult.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Evolving; +import com.mongodb.bulk.WriteConcernError; +import org.bson.BsonValue; +import org.bson.RawBsonDocument; + +import java.util.Optional; + +/** + * The result of a successful {@linkplain ClientNamespacedWriteModel individual insert one operation}. + * Note that {@link WriteConcernError}s are not considered as making individual operations unsuccessful. + * + * @since 5.3 + */ +@Evolving +public interface ClientInsertOneResult { + /** + * The {@code "_id"} of the inserted document. + * + * @return The {@code "_id"} of the inserted document. + * {@linkplain Optional#isPresent() Present} unless a {@link RawBsonDocument} is inserted, + * because the driver neither generates the missing {@code "_id"} field for a {@link RawBsonDocument}, + * nor does it read the {@code "_id"} field from a {@link RawBsonDocument} when inserting it. + */ + Optional getInsertedId(); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedDeleteManyModel.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedDeleteManyModel.java new file mode 100644 index 00000000000..a4e445e5e86 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedDeleteManyModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; + +/** + * A model for deleting all documents matching a filter. + * + * @since 5.3 + */ +@Sealed +public interface ClientNamespacedDeleteManyModel extends ClientNamespacedWriteModel { +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedDeleteOneModel.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedDeleteOneModel.java new file mode 100644 index 00000000000..0ba508007a6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedDeleteOneModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; + +/** + * A model for deleting at most one document matching a filter. + * + * @since 5.3 + */ +@Sealed +public interface ClientNamespacedDeleteOneModel extends ClientNamespacedWriteModel { +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedInsertOneModel.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedInsertOneModel.java new file mode 100644 index 00000000000..66d9f39c74d --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedInsertOneModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; + +/** + * A model for inserting a document. + * + * @since 5.3 + */ +@Sealed +public interface ClientNamespacedInsertOneModel extends ClientNamespacedWriteModel { +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedReplaceOneModel.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedReplaceOneModel.java new file mode 100644 index 00000000000..a4edf9b716a --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedReplaceOneModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; + +/** + * A model for replacing at most one document matching a filter. + * + * @since 5.3 + */ +@Sealed +public interface ClientNamespacedReplaceOneModel extends ClientNamespacedWriteModel { +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedUpdateManyModel.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedUpdateManyModel.java new file mode 100644 index 00000000000..3900c8779f7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedUpdateManyModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; + +/** + * A model for updating all documents matching a filter. + * + * @since 5.3 + */ +@Sealed +public interface ClientNamespacedUpdateManyModel extends ClientNamespacedWriteModel { +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedUpdateOneModel.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedUpdateOneModel.java new file mode 100644 index 00000000000..3d9e785004f --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedUpdateOneModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; + +/** + * A model for updating at most one document matching a filter. + * + * @since 5.3 + */ +@Sealed +public interface ClientNamespacedUpdateOneModel extends ClientNamespacedWriteModel { +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedWriteModel.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedWriteModel.java new file mode 100644 index 00000000000..3673c35a9de --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedWriteModel.java @@ -0,0 +1,325 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.MongoNamespace; +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Aggregates; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.Updates; +import com.mongodb.internal.client.model.bulk.ConcreteClientDeleteManyModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientDeleteOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientInsertOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedDeleteManyModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedDeleteOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedInsertOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedReplaceOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedUpdateManyModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedUpdateOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientReplaceOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientUpdateManyModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientUpdateOneModel; +import org.bson.Document; +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A combination of an individual write operation and a {@linkplain MongoNamespace namespace} + * the operation is targeted at. + * + * @since 5.3 + */ +@Sealed +public interface ClientNamespacedWriteModel { + /** + * Creates a model for inserting the {@code document} into the {@code namespace}. + * + * @param namespace The namespace. + * @param document The document. + * @return The requested {@link ClientNamespacedInsertOneModel}. + * @param The document type, for example {@link Document}. + */ + static ClientNamespacedInsertOneModel insertOne(final MongoNamespace namespace, final TDocument document) { + notNull("namespace", namespace); + notNull("document", document); + return new ConcreteClientNamespacedInsertOneModel(namespace, new ConcreteClientInsertOneModel(document)); + } + + /** + * Creates a model for updating at most one document in the {@code namespace} matching the {@code filter}. + * This method is functionally equivalent to {@link #updateOne(MongoNamespace, Bson, Bson, ClientUpdateOneOptions)} + * with the {@linkplain ClientUpdateOneOptions#clientUpdateOneOptions() default options}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param update The update. + * @return The requested {@link ClientNamespacedUpdateOneModel}. + * @see Filters + * @see Updates + */ + static ClientNamespacedUpdateOneModel updateOne(final MongoNamespace namespace, final Bson filter, final Bson update) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("update", update); + return new ConcreteClientNamespacedUpdateOneModel(namespace, new ConcreteClientUpdateOneModel(filter, update, null, null)); + } + + /** + * Creates a model for updating at most one document in the {@code namespace} matching the {@code filter}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param update The update. + * @param options The options. + * @return The requested {@link ClientNamespacedUpdateOneModel}. + * @see Filters + * @see Updates + */ + static ClientNamespacedUpdateOneModel updateOne( + final MongoNamespace namespace, final Bson filter, final Bson update, final ClientUpdateOneOptions options) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("update", update); + notNull("options", options); + return new ConcreteClientNamespacedUpdateOneModel(namespace, new ConcreteClientUpdateOneModel(filter, update, null, options)); + } + + /** + * Creates a model for updating at most one document in the {@code namespace} matching the {@code filter}. + * This method is functionally equivalent to {@link #updateOne(MongoNamespace, Bson, Iterable, ClientUpdateOneOptions)} + * with the {@linkplain ClientUpdateOneOptions#clientUpdateOneOptions() default options}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param updatePipeline The update pipeline. + * @return The requested {@link ClientNamespacedUpdateOneModel}. + * @see Filters + * @see Aggregates + */ + static ClientNamespacedUpdateOneModel updateOne( + final MongoNamespace namespace, final Bson filter, final Iterable updatePipeline) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("updatePipeline", updatePipeline); + return new ConcreteClientNamespacedUpdateOneModel(namespace, new ConcreteClientUpdateOneModel(filter, null, updatePipeline, null)); + } + + /** + * Creates a model for updating at most one document in the {@code namespace} matching the {@code filter}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param updatePipeline The update pipeline. + * @param options The options. + * @return The requested {@link ClientNamespacedUpdateOneModel}. + * @see Filters + * @see Aggregates + */ + static ClientNamespacedUpdateOneModel updateOne( + final MongoNamespace namespace, final Bson filter, final Iterable updatePipeline, final ClientUpdateOneOptions options) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("updatePipeline", updatePipeline); + notNull("options", options); + return new ConcreteClientNamespacedUpdateOneModel(namespace, new ConcreteClientUpdateOneModel(filter, null, updatePipeline, options)); + } + + /** + * Creates a model for updating all documents in the {@code namespace} matching the {@code filter}. + * This method is functionally equivalent to {@link #updateMany(MongoNamespace, Bson, Bson, ClientUpdateManyOptions)} + * with the {@linkplain ClientUpdateManyOptions#clientUpdateManyOptions() default}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param update The update. + * @return The requested {@link ClientNamespacedUpdateManyModel}. + * @see Filters + * @see Updates + */ + static ClientNamespacedUpdateManyModel updateMany(final MongoNamespace namespace, final Bson filter, final Bson update) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("update", update); + return new ConcreteClientNamespacedUpdateManyModel(namespace, new ConcreteClientUpdateManyModel(filter, update, null, null)); + } + + /** + * Creates a model for updating all documents in the {@code namespace} matching the {@code filter}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param update The update. + * @param options The options. + * @return The requested {@link ClientNamespacedUpdateManyModel}. + * @see Filters + * @see Updates + */ + static ClientNamespacedUpdateManyModel updateMany( + final MongoNamespace namespace, final Bson filter, final Bson update, final ClientUpdateManyOptions options) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("update", update); + notNull("options", options); + return new ConcreteClientNamespacedUpdateManyModel(namespace, new ConcreteClientUpdateManyModel(filter, update, null, options)); + } + + /** + * Creates a model for updating all documents in the {@code namespace} matching the {@code filter}. + * This method is functionally equivalent to {@link #updateMany(MongoNamespace, Bson, Iterable, ClientUpdateManyOptions)} + * with the {@linkplain ClientUpdateManyOptions#clientUpdateManyOptions() default options}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param updatePipeline The update pipeline. + * @return The requested {@link ClientNamespacedUpdateManyModel}. + * @see Filters + * @see Aggregates + */ + static ClientNamespacedUpdateManyModel updateMany( + final MongoNamespace namespace, final Bson filter, final Iterable updatePipeline) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("updatePipeline", updatePipeline); + return new ConcreteClientNamespacedUpdateManyModel(namespace, new ConcreteClientUpdateManyModel(filter, null, updatePipeline, null)); + } + + /** + * Creates a model for updating all documents in the {@code namespace} matching the {@code filter}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param updatePipeline The update pipeline. + * @param options The options. + * @return The requested {@link ClientNamespacedUpdateManyModel}. + * @see Filters + * @see Aggregates + */ + static ClientNamespacedUpdateManyModel updateMany( + final MongoNamespace namespace, final Bson filter, final Iterable updatePipeline, final ClientUpdateManyOptions options) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("updatePipeline", updatePipeline); + notNull("options", options); + return new ConcreteClientNamespacedUpdateManyModel(namespace, new ConcreteClientUpdateManyModel(filter, null, updatePipeline, options)); + } + + /** + * Creates a model for replacing at most one document in the {@code namespace} matching the {@code filter}. + * This method is functionally equivalent to {@link #replaceOne(MongoNamespace, Bson, Object, ClientReplaceOneOptions)} + * with the {@linkplain ClientReplaceOneOptions#clientReplaceOneOptions() default options}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param replacement The replacement. + * The keys of this document must not start with {@code $}, unless they express a {@linkplain com.mongodb.DBRef database reference}. + * @return The requested {@link ClientNamespacedReplaceOneModel}. + * @param The document type, for example {@link Document}. + * @see Filters + */ + static ClientNamespacedReplaceOneModel replaceOne(final MongoNamespace namespace, final Bson filter, final TDocument replacement) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("replacement", replacement); + return new ConcreteClientNamespacedReplaceOneModel(namespace, new ConcreteClientReplaceOneModel(filter, replacement, null)); + } + + /** + * Creates a model for replacing at most one document in the {@code namespace} matching the {@code filter}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param replacement The replacement. + * The keys of this document must not start with {@code $}, unless they express a {@linkplain com.mongodb.DBRef database reference}. + * @param options The options. + * @return The requested {@link ClientNamespacedReplaceOneModel}. + * @param The document type, for example {@link Document}. + * @see Filters + */ + static ClientNamespacedReplaceOneModel replaceOne( + final MongoNamespace namespace, final Bson filter, final TDocument replacement, final ClientReplaceOneOptions options) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("replacement", replacement); + notNull("options", options); + return new ConcreteClientNamespacedReplaceOneModel(namespace, new ConcreteClientReplaceOneModel(filter, replacement, options)); + } + + /** + * Creates a model for deleting at most one document from the {@code namespace} matching the {@code filter}. + * This method is functionally equivalent to {@link #deleteOne(MongoNamespace, Bson, ClientDeleteOneOptions)} + * with the {@linkplain ClientDeleteOneOptions#clientDeleteOneOptions() default options}. + * + * @param namespace The namespace. + * @param filter The filter. + * @return The requested {@link ClientNamespacedDeleteOneModel}. + * @see Filters + */ + static ClientNamespacedDeleteOneModel deleteOne(final MongoNamespace namespace, final Bson filter) { + notNull("namespace", namespace); + notNull("filter", filter); + return new ConcreteClientNamespacedDeleteOneModel(namespace, new ConcreteClientDeleteOneModel(filter, null)); + } + + /** + * Creates a model for deleting at most one document from the {@code namespace} matching the {@code filter}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param options The options. + * @return The requested {@link ClientNamespacedDeleteOneModel}. + * @see Filters + */ + static ClientNamespacedDeleteOneModel deleteOne(final MongoNamespace namespace, final Bson filter, final ClientDeleteOneOptions options) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("options", options); + return new ConcreteClientNamespacedDeleteOneModel(namespace, new ConcreteClientDeleteOneModel(filter, options)); + } + + /** + * Creates a model for deleting all documents from the {@code namespace} matching the {@code filter}. + * This method is functionally equivalent to {@link #deleteMany(MongoNamespace, Bson, ClientDeleteManyOptions)} + * with the {@linkplain ClientDeleteManyOptions#clientDeleteManyOptions() default options}. + * + * @param namespace The namespace. + * @param filter The filter. + * @return The requested {@link ClientNamespacedDeleteManyModel}. + * @see Filters + */ + static ClientNamespacedDeleteManyModel deleteMany(final MongoNamespace namespace, final Bson filter) { + notNull("namespace", namespace); + notNull("filter", filter); + return new ConcreteClientNamespacedDeleteManyModel(namespace, new ConcreteClientDeleteManyModel(filter, null)); + } + + /** + * Creates a model for deleting all documents from the {@code namespace} matching the {@code filter}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param options The options. + * @return The requested {@link ClientNamespacedDeleteManyModel}. + * @see Filters + */ + static ClientNamespacedDeleteManyModel deleteMany(final MongoNamespace namespace, final Bson filter, final ClientDeleteManyOptions options) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("options", options); + return new ConcreteClientNamespacedDeleteManyModel(namespace, new ConcreteClientDeleteManyModel(filter, options)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientReplaceOneOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientReplaceOneOptions.java new file mode 100644 index 00000000000..2142d736f60 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientReplaceOneOptions.java @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.client.model.bulk.ConcreteClientReplaceOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * The options to apply when replacing a document. + * + * @since 5.3 + */ +@Sealed +public interface ClientReplaceOneOptions extends BaseClientWriteModelOptions, BaseClientUpsertableWriteModelOptions { + /** + * Creates the default options. + * + * @return The default options. + */ + static ClientReplaceOneOptions clientReplaceOneOptions() { + return new ConcreteClientReplaceOneOptions(); + } + + /** + * Sets the collation. + * + * @param collation The collation. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientReplaceOneOptions collation(@Nullable Collation collation); + + /** + * Sets the index specification, + * {@code null}-ifies {@linkplain #hintString(String) hint string}. + * + * @param hint The index specification. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientReplaceOneOptions hint(@Nullable Bson hint); + + /** + * Sets the index name, + * {@code null}-ifies {@linkplain #hint(Bson) hint}. + * + * @param hintString The index name. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientReplaceOneOptions hintString(@Nullable String hintString); + + /** + * Enables or disables creation of a document if no documents match the filter. + * + * @param upsert The upsert flag. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientReplaceOneOptions upsert(@Nullable Boolean upsert); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientUpdateManyOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientUpdateManyOptions.java new file mode 100644 index 00000000000..fd0b0d12f08 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientUpdateManyOptions.java @@ -0,0 +1,88 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.Filters; +import com.mongodb.internal.client.model.bulk.ConcreteClientUpdateManyOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * The options to apply when updating documents. + * + * @since 5.3 + */ +@Sealed +public interface ClientUpdateManyOptions extends BaseClientUpdateOptions { + /** + * Creates the default options. + * + * @return The default options. + */ + static ClientUpdateManyOptions clientUpdateManyOptions() { + return new ConcreteClientUpdateManyOptions(); + } + + /** + * Sets the filters specifying to which array elements an update should apply. + * + * @param arrayFilters The array filters. {@code null} represents the server default. + * @return {@code this}. + * @see Filters + */ + @Override + ClientUpdateManyOptions arrayFilters(@Nullable Iterable arrayFilters); + + /** + * Sets the collation. + * + * @param collation The collation. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientUpdateManyOptions collation(@Nullable Collation collation); + + /** + * Sets the index specification, + * {@code null}-ifies {@linkplain #hintString(String) hint string}. + * + * @param hint The index specification. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientUpdateManyOptions hint(@Nullable Bson hint); + + /** + * Sets the index name, + * {@code null}-ifies {@linkplain #hint(Bson) hint}. + * + * @param hintString The index name. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientUpdateManyOptions hintString(@Nullable String hintString); + + /** + * Enables or disables creation of a document if no documents match the filter. + * + * @param upsert The upsert flag. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientUpdateManyOptions upsert(@Nullable Boolean upsert); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientUpdateOneOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientUpdateOneOptions.java new file mode 100644 index 00000000000..9b04ec6ef15 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientUpdateOneOptions.java @@ -0,0 +1,88 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.Filters; +import com.mongodb.internal.client.model.bulk.ConcreteClientUpdateOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * The options to apply when updating a document. + * + * @since 5.3 + */ +@Sealed +public interface ClientUpdateOneOptions extends BaseClientUpdateOptions { + /** + * Creates the default options. + * + * @return The default options. + */ + static ClientUpdateOneOptions clientUpdateOneOptions() { + return new ConcreteClientUpdateOneOptions(); + } + + /** + * Sets the filters specifying to which array elements an update should apply. + * + * @param arrayFilters The array filters. {@code null} represents the server default. + * @return {@code this}. + * @see Filters + */ + @Override + ClientUpdateOneOptions arrayFilters(@Nullable Iterable arrayFilters); + + /** + * Sets the collation. + * + * @param collation The collation. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientUpdateOneOptions collation(@Nullable Collation collation); + + /** + * Sets the index specification, + * {@code null}-ifies {@linkplain #hintString(String) hint string}. + * + * @param hint The index specification. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientUpdateOneOptions hint(@Nullable Bson hint); + + /** + * Sets the index name, + * {@code null}-ifies {@linkplain #hint(Bson) hint}. + * + * @param hintString The index name. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientUpdateOneOptions hintString(@Nullable String hintString); + + /** + * Enables or disables creation of a document if no documents match the filter. + * + * @param upsert The upsert flag. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientUpdateOneOptions upsert(@Nullable Boolean upsert); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientUpdateResult.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientUpdateResult.java new file mode 100644 index 00000000000..c667db97c9e --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientUpdateResult.java @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Evolving; +import com.mongodb.bulk.WriteConcernError; +import org.bson.BsonValue; + +import java.util.Optional; + +/** + * The result of a successful {@linkplain ClientNamespacedWriteModel individual update or replace operation}. + * Note that {@link WriteConcernError}s are not considered as making individual operations unsuccessful. + * + * @since 5.3 + */ +@Evolving +public interface ClientUpdateResult { + /** + * The number of documents that matched the filter. + * + * @return The number of documents that matched the filter. + */ + long getMatchedCount(); + + /** + * The number of documents that were modified. + * + * @return The number of documents that were modified. + */ + long getModifiedCount(); + + /** + * The {@code "_id"} of the upserted document if and only if an upsert occurred. + * + * @return The {@code "_id"} of the upserted. + * {@linkplain Optional#isPresent() Present} if and only if an upsert occurred. + */ + Optional getUpsertedId(); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/package-info.java b/driver-core/src/main/com/mongodb/client/model/bulk/package-info.java new file mode 100644 index 00000000000..b9cb98f41a7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Models, options, results for the client-level bulk write operation. + */ +@NonNullApi +package com.mongodb.client.model.bulk; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/async/AsyncBatchCursor.java b/driver-core/src/main/com/mongodb/internal/async/AsyncBatchCursor.java index 89260ac7b52..bd8d6c64a3f 100644 --- a/driver-core/src/main/com/mongodb/internal/async/AsyncBatchCursor.java +++ b/driver-core/src/main/com/mongodb/internal/async/AsyncBatchCursor.java @@ -19,8 +19,11 @@ import com.mongodb.internal.operation.BatchCursor; import java.io.Closeable; +import java.util.ArrayList; import java.util.List; +import static com.mongodb.internal.async.AsyncRunnable.beginAsync; + /** * MongoDB returns query results as batches, and this interface provides an asynchronous iterator over those batches. The first call to * the {@code next} method will return the first batch, and subsequent calls will trigger an asynchronous request to get the next batch @@ -72,4 +75,22 @@ public interface AsyncBatchCursor extends Closeable { */ @Override void close(); + + default void exhaust(final SingleResultCallback>> finalCallback) { + List> results = new ArrayList<>(); + + beginAsync().thenRunDoWhileLoop(iterationCallback -> { + beginAsync().>thenSupply(c -> { + next(c); + }).thenConsume((batch, c) -> { + if (!batch.isEmpty()) { + results.add(batch); + } + c.complete(c); + }).finish(iterationCallback); + }, () -> !this.isClosed() + ).>>thenSupply(c -> { + c.complete(results); + }).finish(finalCallback); + } } diff --git a/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java b/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java index d4ead3c5b96..e404e2b8152 100644 --- a/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java +++ b/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java @@ -17,9 +17,12 @@ package com.mongodb.internal.async; import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.async.function.AsyncCallbackLoop; +import com.mongodb.internal.async.function.LoopState; import com.mongodb.internal.async.function.RetryState; import com.mongodb.internal.async.function.RetryingAsyncCallbackSupplier; +import java.util.function.BooleanSupplier; import java.util.function.Predicate; import java.util.function.Supplier; @@ -120,49 +123,6 @@ static AsyncRunnable beginAsync() { return (c) -> c.complete(c); } - /** - * Must be invoked at end of async chain - * @param runnable the sync code to invoke (under non-exceptional flow) - * prior to the callback - * @param callback the callback provided by the method the chain is used in - */ - default void thenRunAndFinish(final Runnable runnable, final SingleResultCallback callback) { - this.finish((r, e) -> { - if (e != null) { - callback.completeExceptionally(e); - return; - } - try { - runnable.run(); - } catch (Throwable t) { - callback.completeExceptionally(t); - return; - } - callback.complete(callback); - }); - } - - /** - * See {@link #thenRunAndFinish(Runnable, SingleResultCallback)}, but the runnable - * will always be executed, including on the exceptional path. - * @param runnable the runnable - * @param callback the callback - */ - default void thenAlwaysRunAndFinish(final Runnable runnable, final SingleResultCallback callback) { - this.finish((r, e) -> { - try { - runnable.run(); - } catch (Throwable t) { - if (e != null) { - t.addSuppressed(e); - } - callback.completeExceptionally(t); - return; - } - callback.onResult(r, e); - }); - } - /** * @param runnable The async runnable to run after this runnable * @return the composition of this runnable and the runnable, a runnable @@ -282,4 +242,33 @@ default AsyncRunnable thenRunRetryingWhile( ).get(callback); }); } + + /** + * This method is equivalent to a do-while loop, where the loop body is executed first and + * then the condition is checked to determine whether the loop should continue. + * + * @param loopBodyRunnable the asynchronous task to be executed in each iteration of the loop + * @param whileCheck a condition to check after each iteration; the loop continues as long as this condition returns true + * @return the composition of this and the looping branch + * @see AsyncCallbackLoop + */ + default AsyncRunnable thenRunDoWhileLoop(final AsyncRunnable loopBodyRunnable, final BooleanSupplier whileCheck) { + return thenRun(finalCallback -> { + LoopState loopState = new LoopState(); + new AsyncCallbackLoop(loopState, iterationCallback -> { + + loopBodyRunnable.finish((result, t) -> { + if (t != null) { + iterationCallback.completeExceptionally(t); + return; + } + if (loopState.breakAndCompleteIf(() -> !whileCheck.getAsBoolean(), iterationCallback)) { + return; + } + iterationCallback.complete(iterationCallback); + }); + + }).run(finalCallback); + }); + } } diff --git a/driver-core/src/main/com/mongodb/internal/async/AsyncSupplier.java b/driver-core/src/main/com/mongodb/internal/async/AsyncSupplier.java index 77c289c8723..6dd89e4d9b0 100644 --- a/driver-core/src/main/com/mongodb/internal/async/AsyncSupplier.java +++ b/driver-core/src/main/com/mongodb/internal/async/AsyncSupplier.java @@ -81,6 +81,49 @@ default void finish(final SingleResultCallback callback) { } } + /** + * Must be invoked at end of async chain + * @param runnable the sync code to invoke (under non-exceptional flow) + * prior to the callback + * @param callback the callback provided by the method the chain is used in + */ + default void thenRunAndFinish(final Runnable runnable, final SingleResultCallback callback) { + this.finish((r, e) -> { + if (e != null) { + callback.completeExceptionally(e); + return; + } + try { + runnable.run(); + } catch (Throwable t) { + callback.completeExceptionally(t); + return; + } + callback.onResult(r, null); + }); + } + + /** + * See {@link #thenRunAndFinish(Runnable, SingleResultCallback)}, but the runnable + * will always be executed, including on the exceptional path. + * @param runnable the runnable + * @param callback the callback + */ + default void thenAlwaysRunAndFinish(final Runnable runnable, final SingleResultCallback callback) { + this.finish((r, e) -> { + try { + runnable.run(); + } catch (Throwable t) { + if (e != null) { + t.addSuppressed(e); + } + callback.completeExceptionally(t); + return; + } + callback.onResult(r, e); + }); + } + /** * @param function The async function to run after this supplier * @return the composition of this supplier and the function, a supplier diff --git a/driver-core/src/main/com/mongodb/internal/async/MutableValue.java b/driver-core/src/main/com/mongodb/internal/async/MutableValue.java new file mode 100644 index 00000000000..0ee793788ea --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/MutableValue.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.async; + +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.lang.Nullable; + +import static com.mongodb.assertions.Assertions.assertNotNull; + +@NotThreadSafe +public final class MutableValue { + private T value; + + public MutableValue(@Nullable final T value) { + this.value = value; + } + + public MutableValue() { + this(null); + } + + public T get() { + return assertNotNull(value); + } + + @Nullable + public T getNullable() { + return value; + } + + public void set(@Nullable final T value) { + this.value = value; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/async/function/AsyncCallbackSupplier.java b/driver-core/src/main/com/mongodb/internal/async/function/AsyncCallbackSupplier.java index 40bfd34de3d..1d98fb91a83 100644 --- a/driver-core/src/main/com/mongodb/internal/async/function/AsyncCallbackSupplier.java +++ b/driver-core/src/main/com/mongodb/internal/async/function/AsyncCallbackSupplier.java @@ -15,7 +15,7 @@ */ package com.mongodb.internal.async.function; -import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.internal.async.MutableValue; import com.mongodb.internal.async.SingleResultCallback; import java.util.function.Supplier; @@ -68,16 +68,12 @@ public interface AsyncCallbackSupplier { * This is a price we have to pay to provide a guarantee similar to that of the {@code finally} block. */ default AsyncCallbackSupplier whenComplete(final Runnable after) { - @NotThreadSafe - final class MutableBoolean { - private boolean value; - } - MutableBoolean afterExecuted = new MutableBoolean(); + MutableValue afterExecuted = new MutableValue<>(false); Runnable trackableAfter = () -> { try { after.run(); } finally { - afterExecuted.value = true; + afterExecuted.set(true); } }; return callback -> { @@ -103,7 +99,7 @@ final class MutableBoolean { primaryUnexpectedException = unexpectedException; throw unexpectedException; } finally { - if (primaryUnexpectedException != null && !afterExecuted.value) { + if (primaryUnexpectedException != null && !afterExecuted.get()) { try { trackableAfter.run(); } catch (Throwable afterException) { diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientDeleteModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientDeleteModel.java new file mode 100644 index 00000000000..f7cc0dd4e66 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientDeleteModel.java @@ -0,0 +1,39 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import org.bson.conversions.Bson; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public abstract class AbstractClientDeleteModel implements ClientWriteModel { + private final Bson filter; + private final O options; + + AbstractClientDeleteModel(final Bson filter, final O options) { + this.filter = filter; + this.options = options; + } + + public final Bson getFilter() { + return filter; + } + + public final O getOptions() { + return options; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientDeleteOptions.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientDeleteOptions.java new file mode 100644 index 00000000000..fdacf540073 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientDeleteOptions.java @@ -0,0 +1,82 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.bulk.ClientDeleteManyOptions; +import com.mongodb.client.model.bulk.ClientDeleteOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import java.util.Optional; + +import static java.util.Optional.ofNullable; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public abstract class AbstractClientDeleteOptions { + @Nullable + private Collation collation; + @Nullable + private Bson hint; + @Nullable + private String hintString; + + AbstractClientDeleteOptions() { + } + + public AbstractClientDeleteOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + /** + * @see ClientDeleteOneOptions#collation(Collation) + * @see ClientDeleteManyOptions#collation(Collation) + */ + public Optional getCollation() { + return ofNullable(collation); + } + + public AbstractClientDeleteOptions hint(@Nullable final Bson hint) { + this.hint = hint; + this.hintString = null; + return this; + } + + /** + * @see ClientDeleteOneOptions#hint(Bson) + * @see ClientDeleteManyOptions#hint(Bson) + */ + public Optional getHint() { + return ofNullable(hint); + } + + public AbstractClientDeleteOptions hintString(@Nullable final String hintString) { + this.hintString = hintString; + this.hint = null; + return this; + } + + /** + * @see ClientDeleteOneOptions#hintString(String) + * @see ClientDeleteManyOptions#hintString(String) + */ + public Optional getHintString() { + return ofNullable(hintString); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientNamespacedWriteModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientNamespacedWriteModel.java new file mode 100644 index 00000000000..25daa0bea15 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientNamespacedWriteModel.java @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.MongoNamespace; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public abstract class AbstractClientNamespacedWriteModel { + private final MongoNamespace namespace; + private final ClientWriteModel model; + + AbstractClientNamespacedWriteModel(final MongoNamespace namespace, final ClientWriteModel model) { + this.namespace = namespace; + this.model = model; + } + + public final MongoNamespace getNamespace() { + return namespace; + } + + public final ClientWriteModel getModel() { + return model; + } + + @Override + public final String toString() { + return "ClientNamespacedWriteModel{" + + "namespace=" + namespace + + ", model=" + model + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientUpdateModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientUpdateModel.java new file mode 100644 index 00000000000..c55ddfc2def --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientUpdateModel.java @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import java.util.Optional; + +import static com.mongodb.assertions.Assertions.assertTrue; +import static java.util.Optional.ofNullable; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public abstract class AbstractClientUpdateModel implements ClientWriteModel { + private final Bson filter; + @Nullable + private final Bson update; + @Nullable + private final Iterable updatePipeline; + private final O options; + + AbstractClientUpdateModel( + final Bson filter, + @Nullable + final Bson update, + @Nullable final Iterable updatePipeline, + final O options) { + this.filter = filter; + assertTrue(update == null ^ updatePipeline == null); + this.update = update; + this.updatePipeline = updatePipeline; + this.options = options; + } + + public final Bson getFilter() { + return filter; + } + + public final Optional getUpdate() { + return ofNullable(update); + } + + public final Optional> getUpdatePipeline() { + return ofNullable(updatePipeline); + } + + public final O getOptions() { + return options; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientUpdateOptions.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientUpdateOptions.java new file mode 100644 index 00000000000..508330bd8b7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientUpdateOptions.java @@ -0,0 +1,112 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.bulk.ClientUpdateManyOptions; +import com.mongodb.client.model.bulk.ClientUpdateOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import java.util.Optional; + +import static java.util.Optional.ofNullable; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public abstract class AbstractClientUpdateOptions { + @Nullable + private Iterable arrayFilters; + @Nullable + private Collation collation; + @Nullable + private Bson hint; + @Nullable + private String hintString; + @Nullable + private Boolean upsert; + + AbstractClientUpdateOptions() { + } + + public AbstractClientUpdateOptions arrayFilters(@Nullable final Iterable arrayFilters) { + this.arrayFilters = arrayFilters; + return this; + } + + /** + * @see ClientUpdateOneOptions#arrayFilters(Iterable) + * @see ClientUpdateManyOptions#arrayFilters(Iterable) + */ + public Optional> getArrayFilters() { + return ofNullable(arrayFilters); + } + + public AbstractClientUpdateOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + /** + * @see ClientUpdateOneOptions#collation(Collation) + * @see ClientUpdateManyOptions#collation(Collation) + */ + public Optional getCollation() { + return ofNullable(collation); + } + + public AbstractClientUpdateOptions hint(@Nullable final Bson hint) { + this.hint = hint; + this.hintString = null; + return this; + } + + /** + * @see ClientUpdateOneOptions#hint(Bson) + * @see ClientUpdateManyOptions#hint(Bson) + */ + public Optional getHint() { + return ofNullable(hint); + } + + public AbstractClientUpdateOptions hintString(@Nullable final String hintString) { + this.hintString = hintString; + this.hint = null; + return this; + } + + /** + * @see ClientUpdateOneOptions#hintString(String) + * @see ClientUpdateManyOptions#hintString(String) + */ + public Optional getHintString() { + return ofNullable(hintString); + } + + public AbstractClientUpdateOptions upsert(@Nullable final Boolean upsert) { + this.upsert = upsert; + return this; + } + + /** + * @see ClientUpdateOneOptions#upsert(Boolean) + * @see ClientUpdateManyOptions#upsert(Boolean) + */ + public Optional isUpsert() { + return ofNullable(upsert); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/AcknowledgedSummaryClientBulkWriteResult.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AcknowledgedSummaryClientBulkWriteResult.java new file mode 100644 index 00000000000..fb088c662ae --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AcknowledgedSummaryClientBulkWriteResult.java @@ -0,0 +1,114 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.bulk.ClientBulkWriteResult; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Optional.empty; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class AcknowledgedSummaryClientBulkWriteResult implements ClientBulkWriteResult { + private final long insertedCount; + private final long upsertedCount; + private final long matchedCount; + private final long modifiedCount; + private final long deletedCount; + + public AcknowledgedSummaryClientBulkWriteResult( + final long insertedCount, + final long upsertedCount, + final long matchedCount, + final long modifiedCount, + final long deletedCount) { + this.insertedCount = insertedCount; + this.upsertedCount = upsertedCount; + this.matchedCount = matchedCount; + this.modifiedCount = modifiedCount; + this.deletedCount = deletedCount; + } + + @Override + public boolean isAcknowledged() { + return true; + } + + @Override + public long getInsertedCount() { + return insertedCount; + } + + @Override + public long getUpsertedCount() { + return upsertedCount; + } + + @Override + public long getMatchedCount() { + return matchedCount; + } + + @Override + public long getModifiedCount() { + return modifiedCount; + } + + @Override + public long getDeletedCount() { + return deletedCount; + } + + @Override + public Optional getVerboseResults() { + return empty(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final AcknowledgedSummaryClientBulkWriteResult that = (AcknowledgedSummaryClientBulkWriteResult) o; + return insertedCount == that.insertedCount + && upsertedCount == that.upsertedCount + && matchedCount == that.matchedCount + && modifiedCount == that.modifiedCount + && deletedCount == that.deletedCount; + } + + @Override + public int hashCode() { + return Objects.hash(insertedCount, upsertedCount, matchedCount, modifiedCount, deletedCount); + } + + @Override + public String toString() { + return "AcknowledgedSummaryClientBulkWriteResult{" + + "insertedCount=" + insertedCount + + ", upsertedCount=" + upsertedCount + + ", matchedCount=" + matchedCount + + ", modifiedCount=" + modifiedCount + + ", deletedCount=" + deletedCount + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/AcknowledgedVerboseClientBulkWriteResult.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AcknowledgedVerboseClientBulkWriteResult.java new file mode 100644 index 00000000000..14e9b016d07 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AcknowledgedVerboseClientBulkWriteResult.java @@ -0,0 +1,170 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientDeleteResult; +import com.mongodb.client.model.bulk.ClientInsertOneResult; +import com.mongodb.client.model.bulk.ClientUpdateResult; + +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +import static java.util.Optional.of; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class AcknowledgedVerboseClientBulkWriteResult implements ClientBulkWriteResult { + private final AcknowledgedSummaryClientBulkWriteResult summaryResults; + private final AcknowledgedVerboseClientBulkWriteResult.VerboseResults verboseResults; + + public AcknowledgedVerboseClientBulkWriteResult( + final AcknowledgedSummaryClientBulkWriteResult summaryResults, + final Map insertResults, + final Map updateResults, + final Map deleteResults) { + this.summaryResults = summaryResults; + this.verboseResults = new AcknowledgedVerboseClientBulkWriteResult.VerboseResults(insertResults, updateResults, deleteResults); + } + + @Override + public boolean isAcknowledged() { + return true; + } + + @Override + public long getInsertedCount() { + return summaryResults.getInsertedCount(); + } + + @Override + public long getUpsertedCount() { + return summaryResults.getUpsertedCount(); + } + + @Override + public long getMatchedCount() { + return summaryResults.getMatchedCount(); + } + + @Override + public long getModifiedCount() { + return summaryResults.getModifiedCount(); + } + + @Override + public long getDeletedCount() { + return summaryResults.getDeletedCount(); + } + + @Override + public Optional getVerboseResults() { + return of(verboseResults); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final AcknowledgedVerboseClientBulkWriteResult that = (AcknowledgedVerboseClientBulkWriteResult) o; + return Objects.equals(summaryResults, that.summaryResults) + && Objects.equals(verboseResults, that.verboseResults); + } + + @Override + public int hashCode() { + return Objects.hash(summaryResults, verboseResults); + } + + @Override + public String toString() { + return "AcknowledgedVerboseClientBulkWriteResult{" + + "insertedCount=" + summaryResults.getInsertedCount() + + ", upsertedCount=" + summaryResults.getUpsertedCount() + + ", matchedCount=" + summaryResults.getMatchedCount() + + ", modifiedCount=" + summaryResults.getModifiedCount() + + ", deletedCount=" + summaryResults.getDeletedCount() + + ", insertResults=" + verboseResults.insertResults + + ", updateResults=" + verboseResults.updateResults + + ", deleteResults=" + verboseResults.deleteResults + + '}'; + } + + private static final class VerboseResults implements ClientBulkWriteResult.VerboseResults { + private final Map insertResults; + private final Map updateResults; + private final Map deleteResults; + + VerboseResults( + final Map insertResults, + final Map updateResults, + final Map deleteResults) { + this.insertResults = insertResults; + this.updateResults = updateResults; + this.deleteResults = deleteResults; + } + + @Override + public Map getInsertResults() { + return insertResults; + } + + @Override + public Map getUpdateResults() { + return updateResults; + } + + @Override + public Map getDeleteResults() { + return deleteResults; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final AcknowledgedVerboseClientBulkWriteResult.VerboseResults verbose = + (AcknowledgedVerboseClientBulkWriteResult.VerboseResults) o; + return Objects.equals(insertResults, verbose.insertResults) + && Objects.equals(updateResults, verbose.updateResults) + && Objects.equals(deleteResults, verbose.deleteResults); + } + + @Override + public int hashCode() { + return Objects.hash(insertResults, updateResults, deleteResults); + } + + @Override + public String toString() { + return "AcknowledgedVerboseClientBulkWriteResult.VerboseResults{" + + "insertResults=" + insertResults + + ", updateResults=" + updateResults + + ", deleteResults=" + deleteResults + + '}'; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ClientWriteModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ClientWriteModel.java new file mode 100644 index 00000000000..56d431ec0e8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ClientWriteModel.java @@ -0,0 +1,24 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +/** + * An individual write operation to be executed as part of a client-level bulk write operation. + *

+ * This class is not part of the public API and may be removed or changed at any time.

+ */ +public interface ClientWriteModel { +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientBulkWriteOptions.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientBulkWriteOptions.java new file mode 100644 index 00000000000..9599e1750bf --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientBulkWriteOptions.java @@ -0,0 +1,123 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.Optional; + +import static java.util.Optional.ofNullable; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientBulkWriteOptions implements ClientBulkWriteOptions { + private static final Boolean CLIENT_DEFAULT_ORDERED = true; + private static final Boolean CLIENT_DEFAULT_VERBOSE_RESULTS = false; + + @Nullable + private Boolean ordered; + @Nullable + private Boolean bypassDocumentValidation; + @Nullable + private Bson let; + @Nullable + private BsonValue comment; + @Nullable + private Boolean verboseResults; + + public ConcreteClientBulkWriteOptions() { + } + + @Override + public ClientBulkWriteOptions ordered(@Nullable final Boolean ordered) { + this.ordered = ordered; + return this; + } + + /** + * @see #ordered(Boolean) + */ + public boolean isOrdered() { + return ordered == null ? CLIENT_DEFAULT_ORDERED : ordered; + } + + @Override + public ClientBulkWriteOptions bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + /** + * @see #bypassDocumentValidation(Boolean) + */ + public Optional isBypassDocumentValidation() { + return ofNullable(bypassDocumentValidation); + } + + @Override + public ClientBulkWriteOptions let(@Nullable final Bson let) { + this.let = let; + return this; + } + + /** + * @see #let(Bson) + */ + public Optional getLet() { + return ofNullable(let); + } + + @Override + public ClientBulkWriteOptions comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + /** + * @see #comment(BsonValue) + */ + public Optional getComment() { + return ofNullable(comment); + } + + @Override + public ClientBulkWriteOptions verboseResults(@Nullable final Boolean verboseResults) { + this.verboseResults = verboseResults; + return this; + } + + /** + * @see #verboseResults(Boolean) + */ + public boolean isVerboseResults() { + return verboseResults == null ? CLIENT_DEFAULT_VERBOSE_RESULTS : verboseResults; + } + + @Override + public String toString() { + return "ClientBulkWriteOptions{" + + "ordered=" + ordered + + ", bypassDocumentValidation=" + bypassDocumentValidation + + ", let=" + let + + ", comment=" + comment + + ", verboseResults=" + verboseResults + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteManyModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteManyModel.java new file mode 100644 index 00000000000..7db1a47d053 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteManyModel.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.bulk.ClientDeleteManyOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientDeleteManyModel extends AbstractClientDeleteModel implements ClientWriteModel { + public ConcreteClientDeleteManyModel(final Bson filter, @Nullable final ClientDeleteManyOptions options) { + super(filter, options == null ? ConcreteClientDeleteManyOptions.MUTABLE_EMPTY : (ConcreteClientDeleteManyOptions) options); + } + + @Override + public String toString() { + return "ClientDeleteManyModel{" + + "filter=" + getFilter() + + ", options=" + getOptions() + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteManyOptions.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteManyOptions.java new file mode 100644 index 00000000000..381cd84fa50 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteManyOptions.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.bulk.ClientDeleteManyOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientDeleteManyOptions extends AbstractClientDeleteOptions implements ClientDeleteManyOptions { + static final ConcreteClientDeleteManyOptions MUTABLE_EMPTY = new ConcreteClientDeleteManyOptions(); + + public ConcreteClientDeleteManyOptions() { + } + + @Override + public ConcreteClientDeleteManyOptions collation(@Nullable final Collation collation) { + return (ConcreteClientDeleteManyOptions) super.collation(collation); + } + + @Override + public ConcreteClientDeleteManyOptions hint(@Nullable final Bson hint) { + return (ConcreteClientDeleteManyOptions) super.hint(hint); + } + + @Override + public ConcreteClientDeleteManyOptions hintString(@Nullable final String hintString) { + return (ConcreteClientDeleteManyOptions) super.hintString(hintString); + } + + @Override + public String toString() { + return "ClientDeleteManyOptions{" + + "collation=" + getCollation().orElse(null) + + ", hint=" + getHint().orElse(null) + + ", hintString=" + getHintString().map(s -> '\'' + s + '\'') .orElse(null) + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteOneModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteOneModel.java new file mode 100644 index 00000000000..9e969ba9eeb --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteOneModel.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.bulk.ClientDeleteOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientDeleteOneModel extends AbstractClientDeleteModel implements ClientWriteModel { + public ConcreteClientDeleteOneModel(final Bson filter, @Nullable final ClientDeleteOneOptions options) { + super(filter, options == null ? ConcreteClientDeleteOneOptions.MUTABLE_EMPTY : (ConcreteClientDeleteOneOptions) options); + } + + @Override + public String toString() { + return "ClientDeleteOneModel{" + + "filter=" + getFilter() + + ", options=" + getOptions() + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteOneOptions.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteOneOptions.java new file mode 100644 index 00000000000..3126903cfc5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteOneOptions.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.bulk.ClientDeleteOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientDeleteOneOptions extends AbstractClientDeleteOptions implements ClientDeleteOneOptions { + static final ConcreteClientDeleteOneOptions MUTABLE_EMPTY = new ConcreteClientDeleteOneOptions(); + + public ConcreteClientDeleteOneOptions() { + } + + @Override + public ConcreteClientDeleteOneOptions collation(@Nullable final Collation collation) { + return (ConcreteClientDeleteOneOptions) super.collation(collation); + } + + @Override + public ConcreteClientDeleteOneOptions hint(@Nullable final Bson hint) { + return (ConcreteClientDeleteOneOptions) super.hint(hint); + } + + @Override + public ConcreteClientDeleteOneOptions hintString(@Nullable final String hintString) { + return (ConcreteClientDeleteOneOptions) super.hintString(hintString); + } + + @Override + public String toString() { + return "ClientDeleteOneOptions{" + + "collation=" + getCollation().orElse(null) + + ", hint=" + getHint().orElse(null) + + ", hintString=" + getHintString().map(s -> '\'' + s + '\'') .orElse(null) + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteResult.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteResult.java new file mode 100644 index 00000000000..a82b8ee8b62 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteResult.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.bulk.ClientDeleteResult; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientDeleteResult implements ClientDeleteResult { + private final long deletedCount; + + public ConcreteClientDeleteResult(final long deletedCount) { + this.deletedCount = deletedCount; + } + + @Override + public long getDeletedCount() { + return deletedCount; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ConcreteClientDeleteResult that = (ConcreteClientDeleteResult) o; + return deletedCount == that.deletedCount; + } + + @Override + public int hashCode() { + return Long.hashCode(deletedCount); + } + + @Override + public String toString() { + return "ClientDeleteResult{" + + "deletedCount=" + deletedCount + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientInsertOneModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientInsertOneModel.java new file mode 100644 index 00000000000..660944fc202 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientInsertOneModel.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientInsertOneModel implements ClientWriteModel { + private final Object document; + + public ConcreteClientInsertOneModel(final Object document) { + this.document = document; + } + + public Object getDocument() { + return document; + } + + @Override + public String toString() { + return "ClientInsertOneModel{" + + "document=" + document + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientInsertOneResult.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientInsertOneResult.java new file mode 100644 index 00000000000..cc755e2c62d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientInsertOneResult.java @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.bulk.ClientInsertOneResult; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Optional.ofNullable; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientInsertOneResult implements ClientInsertOneResult { + @Nullable + private final BsonValue insertedId; + + public ConcreteClientInsertOneResult(@Nullable final BsonValue insertedId) { + this.insertedId = insertedId; + } + + @Override + public Optional getInsertedId() { + return ofNullable(insertedId); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ConcreteClientInsertOneResult that = (ConcreteClientInsertOneResult) o; + return Objects.equals(insertedId, that.insertedId); + } + + @Override + public int hashCode() { + return Objects.hashCode(insertedId); + } + + @Override + public String toString() { + return "ClientInsertOneResult{" + + "insertedId=" + insertedId + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedDeleteManyModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedDeleteManyModel.java new file mode 100644 index 00000000000..4deb566cff1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedDeleteManyModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.bulk.ClientNamespacedDeleteManyModel; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientNamespacedDeleteManyModel extends AbstractClientNamespacedWriteModel implements ClientNamespacedDeleteManyModel { + public ConcreteClientNamespacedDeleteManyModel(final MongoNamespace namespace, final ClientWriteModel model) { + super(namespace, model); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedDeleteOneModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedDeleteOneModel.java new file mode 100644 index 00000000000..db8a7ad9fde --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedDeleteOneModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.bulk.ClientNamespacedDeleteOneModel; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientNamespacedDeleteOneModel extends AbstractClientNamespacedWriteModel implements ClientNamespacedDeleteOneModel { + public ConcreteClientNamespacedDeleteOneModel(final MongoNamespace namespace, final ClientWriteModel model) { + super(namespace, model); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedInsertOneModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedInsertOneModel.java new file mode 100644 index 00000000000..e80861b947e --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedInsertOneModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.bulk.ClientNamespacedInsertOneModel; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientNamespacedInsertOneModel extends AbstractClientNamespacedWriteModel implements ClientNamespacedInsertOneModel { + public ConcreteClientNamespacedInsertOneModel(final MongoNamespace namespace, final ClientWriteModel model) { + super(namespace, model); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedReplaceOneModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedReplaceOneModel.java new file mode 100644 index 00000000000..96ea786169e --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedReplaceOneModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.bulk.ClientNamespacedReplaceOneModel; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientNamespacedReplaceOneModel extends AbstractClientNamespacedWriteModel implements ClientNamespacedReplaceOneModel { + public ConcreteClientNamespacedReplaceOneModel(final MongoNamespace namespace, final ClientWriteModel model) { + super(namespace, model); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedUpdateManyModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedUpdateManyModel.java new file mode 100644 index 00000000000..28f281287e0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedUpdateManyModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.bulk.ClientNamespacedUpdateManyModel; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientNamespacedUpdateManyModel extends AbstractClientNamespacedWriteModel implements ClientNamespacedUpdateManyModel { + public ConcreteClientNamespacedUpdateManyModel(final MongoNamespace namespace, final ClientWriteModel model) { + super(namespace, model); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedUpdateOneModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedUpdateOneModel.java new file mode 100644 index 00000000000..ad3aa0853ab --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedUpdateOneModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.bulk.ClientNamespacedUpdateOneModel; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientNamespacedUpdateOneModel extends AbstractClientNamespacedWriteModel implements ClientNamespacedUpdateOneModel { + public ConcreteClientNamespacedUpdateOneModel(final MongoNamespace namespace, final ClientWriteModel model) { + super(namespace, model); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientReplaceOneModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientReplaceOneModel.java new file mode 100644 index 00000000000..7102fe6257d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientReplaceOneModel.java @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.bulk.ClientReplaceOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientReplaceOneModel implements ClientWriteModel { + private final Bson filter; + private final Object replacement; + private final ConcreteClientReplaceOneOptions options; + + public ConcreteClientReplaceOneModel(final Bson filter, final Object replacement, @Nullable final ClientReplaceOneOptions options) { + this.filter = filter; + this.replacement = replacement; + this.options = options == null ? ConcreteClientReplaceOneOptions.MUTABLE_EMPTY : (ConcreteClientReplaceOneOptions) options; + } + + public Bson getFilter() { + return filter; + } + + public Object getReplacement() { + return replacement; + } + + public ConcreteClientReplaceOneOptions getOptions() { + return options; + } + + @Override + public String toString() { + return "ClientReplaceOneModel{" + + "filter=" + filter + + ", replacement=" + replacement + + ", options=" + options + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientReplaceOneOptions.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientReplaceOneOptions.java new file mode 100644 index 00000000000..18e9d060763 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientReplaceOneOptions.java @@ -0,0 +1,108 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.bulk.ClientReplaceOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import java.util.Optional; + +import static java.util.Optional.ofNullable; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientReplaceOneOptions implements ClientReplaceOneOptions { + static final ConcreteClientReplaceOneOptions MUTABLE_EMPTY = new ConcreteClientReplaceOneOptions(); + + @Nullable + private Collation collation; + @Nullable + private Bson hint; + @Nullable + private String hintString; + @Nullable + private Boolean upsert; + + public ConcreteClientReplaceOneOptions() { + } + + @Override + public ClientReplaceOneOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + /** + * @see #collation(Collation) + */ + public Optional getCollation() { + return ofNullable(collation); + } + + @Override + public ClientReplaceOneOptions hint(@Nullable final Bson hint) { + this.hint = hint; + this.hintString = null; + return this; + } + + /** + * @see #hint(Bson) + */ + public Optional getHint() { + return ofNullable(hint); + } + + @Override + public ClientReplaceOneOptions hintString(@Nullable final String hintString) { + this.hintString = hintString; + this.hint = null; + return this; + } + + /** + * @see #hintString(String) + */ + public Optional getHintString() { + return ofNullable(hintString); + } + + @Override + public ClientReplaceOneOptions upsert(@Nullable final Boolean upsert) { + this.upsert = upsert; + return this; + } + + /** + * @see #upsert(Boolean) + */ + public Optional isUpsert() { + return ofNullable(upsert); + } + + @Override + public String toString() { + return "ClientReplaceOneOptions{" + + "collation=" + collation + + ", hint=" + hint + + ", hintString='" + hintString + '\'' + + ", upsert=" + upsert + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateManyModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateManyModel.java new file mode 100644 index 00000000000..83d72e937f6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateManyModel.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.assertions.Assertions; +import com.mongodb.client.model.bulk.ClientUpdateManyOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientUpdateManyModel extends AbstractClientUpdateModel implements ClientWriteModel { + public ConcreteClientUpdateManyModel( + final Bson filter, + @Nullable + final Bson update, + @Nullable + final Iterable updatePipeline, + @Nullable final ClientUpdateManyOptions options) { + super(filter, update, updatePipeline, + options == null ? ConcreteClientUpdateManyOptions.MUTABLE_EMPTY : (ConcreteClientUpdateManyOptions) options); + } + + @Override + public String toString() { + return "ClientUpdateManyModel{" + + "filter=" + getFilter() + + ", update=" + getUpdate().map(Object::toString).orElseGet(() -> + getUpdatePipeline().map(Object::toString).orElseThrow(Assertions::fail)) + + ", options=" + getOptions() + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateManyOptions.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateManyOptions.java new file mode 100644 index 00000000000..755b6fb56d7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateManyOptions.java @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.bulk.ClientUpdateManyOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientUpdateManyOptions extends AbstractClientUpdateOptions implements ClientUpdateManyOptions { + static final ConcreteClientUpdateManyOptions MUTABLE_EMPTY = new ConcreteClientUpdateManyOptions(); + + public ConcreteClientUpdateManyOptions() { + } + + @Override + public ConcreteClientUpdateManyOptions arrayFilters(@Nullable final Iterable arrayFilters) { + return (ConcreteClientUpdateManyOptions) super.arrayFilters(arrayFilters); + } + + @Override + public ConcreteClientUpdateManyOptions collation(@Nullable final Collation collation) { + return (ConcreteClientUpdateManyOptions) super.collation(collation); + } + + @Override + public ConcreteClientUpdateManyOptions hint(@Nullable final Bson hint) { + return (ConcreteClientUpdateManyOptions) super.hint(hint); + } + + @Override + public ConcreteClientUpdateManyOptions hintString(@Nullable final String hintString) { + return (ConcreteClientUpdateManyOptions) super.hintString(hintString); + } + + @Override + public ConcreteClientUpdateManyOptions upsert(@Nullable final Boolean upsert) { + return (ConcreteClientUpdateManyOptions) super.upsert(upsert); + } + + @Override + public String toString() { + return "ClientUpdateManyOptions{" + + "arrayFilters=" + getArrayFilters().orElse(null) + + ", collation=" + getCollation().orElse(null) + + ", hint=" + getHint().orElse(null) + + ", hintString=" + getHintString().map(s -> '\'' + s + '\'') .orElse(null) + + ", upsert=" + isUpsert().orElse(null) + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateOneModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateOneModel.java new file mode 100644 index 00000000000..83d02669514 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateOneModel.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.assertions.Assertions; +import com.mongodb.client.model.bulk.ClientUpdateOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientUpdateOneModel extends AbstractClientUpdateModel implements ClientWriteModel { + public ConcreteClientUpdateOneModel( + final Bson filter, + @Nullable + final Bson update, + @Nullable + final Iterable updatePipeline, + @Nullable final ClientUpdateOneOptions options) { + super(filter, update, updatePipeline, + options == null ? ConcreteClientUpdateOneOptions.MUTABLE_EMPTY : (ConcreteClientUpdateOneOptions) options); + } + + @Override + public String toString() { + return "ClientUpdateOneModel{" + + "filter=" + getFilter() + + ", update=" + getUpdate().map(Object::toString).orElseGet(() -> + getUpdatePipeline().map(Object::toString).orElseThrow(Assertions::fail)) + + ", options=" + getOptions() + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateOneOptions.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateOneOptions.java new file mode 100644 index 00000000000..fdf960ed1df --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateOneOptions.java @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.bulk.ClientUpdateOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientUpdateOneOptions extends AbstractClientUpdateOptions implements ClientUpdateOneOptions { + static final ConcreteClientUpdateOneOptions MUTABLE_EMPTY = new ConcreteClientUpdateOneOptions(); + + public ConcreteClientUpdateOneOptions() { + } + + @Override + public ConcreteClientUpdateOneOptions arrayFilters(@Nullable final Iterable arrayFilters) { + return (ConcreteClientUpdateOneOptions) super.arrayFilters(arrayFilters); + } + + @Override + public ConcreteClientUpdateOneOptions collation(@Nullable final Collation collation) { + return (ConcreteClientUpdateOneOptions) super.collation(collation); + } + + @Override + public ConcreteClientUpdateOneOptions hint(@Nullable final Bson hint) { + return (ConcreteClientUpdateOneOptions) super.hint(hint); + } + + @Override + public ConcreteClientUpdateOneOptions hintString(@Nullable final String hintString) { + return (ConcreteClientUpdateOneOptions) super.hintString(hintString); + } + + @Override + public ConcreteClientUpdateOneOptions upsert(@Nullable final Boolean upsert) { + return (ConcreteClientUpdateOneOptions) super.upsert(upsert); + } + + @Override + public String toString() { + return "ClientUpdateOneOptions{" + + "arrayFilters=" + getArrayFilters().orElse(null) + + ", collation=" + getCollation().orElse(null) + + ", hint=" + getHint().orElse(null) + + ", hintString=" + getHintString().map(s -> '\'' + s + '\'') .orElse(null) + + ", upsert=" + isUpsert().orElse(null) + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateResult.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateResult.java new file mode 100644 index 00000000000..54075b792f1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateResult.java @@ -0,0 +1,87 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.bulk.ClientUpdateResult; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Optional.ofNullable; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientUpdateResult implements ClientUpdateResult { + private final long matchedCount; + private final long modifiedCount; + @Nullable + private final BsonValue upsertedId; + + public ConcreteClientUpdateResult( + final long matchedCount, + final long modifiedCount, + @Nullable final BsonValue upsertedId) { + this.matchedCount = matchedCount; + this.modifiedCount = modifiedCount; + this.upsertedId = upsertedId; + } + + @Override + public long getMatchedCount() { + return matchedCount; + } + + @Override + public long getModifiedCount() { + return modifiedCount; + } + + @Override + public Optional getUpsertedId() { + return ofNullable(upsertedId); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ConcreteClientUpdateResult that = (ConcreteClientUpdateResult) o; + return matchedCount == that.matchedCount + && modifiedCount == that.modifiedCount + && Objects.equals(upsertedId, that.upsertedId); + } + + @Override + public int hashCode() { + return Objects.hash(matchedCount, modifiedCount, upsertedId); + } + + @Override + public String toString() { + return "ClientUpdateResult{" + + "matchedCount=" + matchedCount + + ", modifiedCount=" + modifiedCount + + ", upsertedId=" + upsertedId + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/UnacknowledgedClientBulkWriteResult.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/UnacknowledgedClientBulkWriteResult.java new file mode 100644 index 00000000000..cdd649b3389 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/UnacknowledgedClientBulkWriteResult.java @@ -0,0 +1,76 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.annotations.Immutable; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; + +import java.util.Optional; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +@Immutable +public final class UnacknowledgedClientBulkWriteResult implements ClientBulkWriteResult { + public static final UnacknowledgedClientBulkWriteResult INSTANCE = new UnacknowledgedClientBulkWriteResult(); + + private UnacknowledgedClientBulkWriteResult() { + } + + @Override + public boolean isAcknowledged() { + return false; + } + + @Override + public long getInsertedCount() throws UnsupportedOperationException { + throw createUnacknowledgedResultsException(); + } + + @Override + public long getUpsertedCount() throws UnsupportedOperationException { + throw createUnacknowledgedResultsException(); + } + + @Override + public long getMatchedCount() throws UnsupportedOperationException { + throw createUnacknowledgedResultsException(); + } + + @Override + public long getModifiedCount() throws UnsupportedOperationException { + throw createUnacknowledgedResultsException(); + } + + @Override + public long getDeletedCount() throws UnsupportedOperationException { + throw createUnacknowledgedResultsException(); + } + + @Override + public Optional getVerboseResults() throws UnsupportedOperationException { + throw createUnacknowledgedResultsException(); + } + + private static UnsupportedOperationException createUnacknowledgedResultsException() { + return new UnsupportedOperationException("Cannot get information about an unacknowledged write"); + } + + @Override + public String toString() { + return "UnacknowledgedClientBulkWriteResult{}"; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/package-info.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/package-info.java new file mode 100644 index 00000000000..2d66f44646b --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Internal program elements related to {@link com.mongodb.client.model.bulk}. + */ +@NonNullApi +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/connection/AsyncConnection.java b/driver-core/src/main/com/mongodb/internal/connection/AsyncConnection.java index 2891bc28732..befc0d9aac2 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/AsyncConnection.java +++ b/driver-core/src/main/com/mongodb/internal/connection/AsyncConnection.java @@ -50,8 +50,7 @@ void commandAsync(String database, BsonDocument command, FieldNameValidator void commandAsync(String database, BsonDocument command, FieldNameValidator commandFieldNameValidator, @Nullable ReadPreference readPreference, Decoder commandResultDecoder, - OperationContext operationContext, boolean responseExpected, @Nullable SplittablePayload payload, - @Nullable FieldNameValidator payloadFieldNameValidator, SingleResultCallback callback); + OperationContext operationContext, boolean responseExpected, MessageSequences sequences, SingleResultCallback callback); void markAsPinned(Connection.PinningMode pinningMode); } diff --git a/driver-core/src/main/com/mongodb/internal/connection/BsonWriterHelper.java b/driver-core/src/main/com/mongodb/internal/connection/BsonWriterHelper.java index 12c006c7b97..63ccbf62a04 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/BsonWriterHelper.java +++ b/driver-core/src/main/com/mongodb/internal/connection/BsonWriterHelper.java @@ -16,31 +16,56 @@ package com.mongodb.internal.connection; +import com.mongodb.internal.connection.DualMessageSequences.EncodeDocumentsResult; +import com.mongodb.internal.connection.DualMessageSequences.WritersProviderAndLimitsChecker; +import com.mongodb.lang.Nullable; +import org.bson.BsonBinaryWriter; +import org.bson.BsonBinaryWriterSettings; +import org.bson.BsonContextType; import org.bson.BsonDocument; import org.bson.BsonElement; import org.bson.BsonMaximumSizeExceededException; import org.bson.BsonValue; import org.bson.BsonWriter; +import org.bson.BsonWriterSettings; +import org.bson.FieldNameValidator; import org.bson.codecs.BsonValueCodecProvider; -import org.bson.codecs.Codec; +import org.bson.codecs.Encoder; import org.bson.codecs.EncoderContext; import org.bson.codecs.configuration.CodecRegistry; import org.bson.io.BsonOutput; import java.util.List; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.internal.connection.DualMessageSequences.WritersProviderAndLimitsChecker.WriteResult.FAIL_LIMIT_EXCEEDED; +import static com.mongodb.internal.connection.DualMessageSequences.WritersProviderAndLimitsChecker.WriteResult.OK_LIMIT_NOT_REACHED; +import static com.mongodb.internal.connection.DualMessageSequences.WritersProviderAndLimitsChecker.WriteResult.OK_LIMIT_REACHED; +import static com.mongodb.internal.connection.MessageSettings.DOCUMENT_HEADROOM_SIZE; import static java.lang.String.format; import static org.bson.codecs.configuration.CodecRegistries.fromProviders; -final class BsonWriterHelper { - private static final int DOCUMENT_HEADROOM = 1024 * 16; +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class BsonWriterHelper { private static final CodecRegistry REGISTRY = fromProviders(new BsonValueCodecProvider()); private static final EncoderContext ENCODER_CONTEXT = EncoderContext.builder().build(); - static void writeElements(final BsonWriter writer, final List bsonElements) { - for (BsonElement bsonElement : bsonElements) { - writer.writeName(bsonElement.getName()); - getCodec(bsonElement.getValue()).encode(writer, bsonElement.getValue(), ENCODER_CONTEXT); + static void appendElementsToDocument( + final BsonOutput bsonOutputWithDocument, + final int documentStartPosition, + @Nullable final List bsonElements) { + if ((bsonElements == null) || bsonElements.isEmpty()) { + return; + } + try (AppendingBsonWriter writer = new AppendingBsonWriter(bsonOutputWithDocument, documentStartPosition)) { + for (BsonElement element : bsonElements) { + String name = element.getName(); + BsonValue value = element.getValue(); + writer.writeName(name); + encodeUsingRegistry(writer, value); + } } } @@ -65,16 +90,86 @@ static void writePayload(final BsonWriter writer, final BsonOutput bsonOutput, f } if (payload.getPosition() == 0) { - throw new BsonMaximumSizeExceededException(format("Payload document size is larger than maximum of %d.", - payloadSettings.getMaxDocumentSize())); + throw createBsonMaximumSizeExceededException(payloadSettings.getMaxDocumentSize()); } } + /** + * @return See {@link DualMessageSequences#encodeDocuments(WritersProviderAndLimitsChecker)}. + */ + static EncodeDocumentsResult writeDocumentsOfDualMessageSequences( + final DualMessageSequences dualMessageSequences, + final int commandDocumentSizeInBytes, + final BsonOutput firstOutput, + final BsonOutput secondOutput, + final MessageSettings messageSettings) { + BsonBinaryWriter firstWriter = createBsonBinaryWriter(firstOutput, dualMessageSequences.getFirstFieldNameValidator(), null); + BsonBinaryWriter secondWriter = createBsonBinaryWriter(secondOutput, dualMessageSequences.getSecondFieldNameValidator(), null); + // the size of operation-agnostic command fields (a.k.a. extra elements) is counted towards `messageOverheadInBytes` + int messageOverheadInBytes = 1000; + int maxSizeInBytes = messageSettings.getMaxMessageSize() - (messageOverheadInBytes + commandDocumentSizeInBytes); + int firstStart = firstOutput.getPosition(); + int secondStart = secondOutput.getPosition(); + int maxBatchCount = messageSettings.getMaxBatchCount(); + return dualMessageSequences.encodeDocuments(writeAction -> { + int firstBeforeWritePosition = firstOutput.getPosition(); + int secondBeforeWritePosition = secondOutput.getPosition(); + int batchCountAfterWrite = writeAction.doAndGetBatchCount(firstWriter, secondWriter); + assertTrue(batchCountAfterWrite <= maxBatchCount); + int writtenSizeInBytes = + firstOutput.getPosition() - firstStart + + secondOutput.getPosition() - secondStart; + if (writtenSizeInBytes < maxSizeInBytes && batchCountAfterWrite < maxBatchCount) { + return OK_LIMIT_NOT_REACHED; + } else if (writtenSizeInBytes > maxSizeInBytes) { + firstOutput.truncateToPosition(firstBeforeWritePosition); + secondOutput.truncateToPosition(secondBeforeWritePosition); + if (batchCountAfterWrite == 1) { + // we have failed to write a single document + throw createBsonMaximumSizeExceededException(messageSettings.getMaxDocumentSize()); + } + return FAIL_LIMIT_EXCEEDED; + } else { + return OK_LIMIT_REACHED; + } + }); + } + + /** + * @param messageSettings Non-{@code null} iff the document size limit must be validated. + */ + static BsonBinaryWriter createBsonBinaryWriter( + final BsonOutput out, + final FieldNameValidator validator, + @Nullable final MessageSettings messageSettings) { + return new BsonBinaryWriter( + new BsonWriterSettings(), + messageSettings == null + ? new BsonBinaryWriterSettings() + : new BsonBinaryWriterSettings(messageSettings.getMaxDocumentSize() + DOCUMENT_HEADROOM_SIZE), + out, + validator); + } + + /** + * Backpatches the document/message/sequence length into the beginning of the document/message/sequence. + * + * @param startPosition The start position of the document/message/sequence in {@code bsonOutput}. + */ + static void backpatchLength(final int startPosition, final BsonOutput bsonOutput) { + int messageLength = bsonOutput.getPosition() - startPosition; + bsonOutput.writeInt32(startPosition, messageLength); + } + + private static BsonMaximumSizeExceededException createBsonMaximumSizeExceededException(final int maxSize) { + return new BsonMaximumSizeExceededException(format("Payload document size is larger than maximum of %d.", maxSize)); + } + private static boolean writeDocument(final BsonWriter writer, final BsonOutput bsonOutput, final MessageSettings settings, final BsonDocument document, final int messageStartPosition, final int batchItemCount, final int maxSplittableDocumentSize) { int currentPosition = bsonOutput.getPosition(); - getCodec(document).encode(writer, document, ENCODER_CONTEXT); + encodeUsingRegistry(writer, document); int messageSize = bsonOutput.getPosition() - messageStartPosition; int documentSize = bsonOutput.getPosition() - currentPosition; if (exceedsLimits(settings, messageSize, documentSize, batchItemCount) @@ -85,16 +180,17 @@ private static boolean writeDocument(final BsonWriter writer, final BsonOutput b return true; } - @SuppressWarnings({"unchecked"}) - private static Codec getCodec(final BsonValue bsonValue) { - return (Codec) REGISTRY.get(bsonValue.getClass()); + static void encodeUsingRegistry(final BsonWriter writer, final BsonValue value) { + @SuppressWarnings("unchecked") + Encoder encoder = (Encoder) REGISTRY.get(value.getClass()); + encoder.encode(writer, value, ENCODER_CONTEXT); } private static MessageSettings getPayloadMessageSettings(final SplittablePayload.Type type, final MessageSettings settings) { MessageSettings payloadMessageSettings = settings; if (type != SplittablePayload.Type.INSERT) { payloadMessageSettings = createMessageSettingsBuilder(settings) - .maxDocumentSize(settings.getMaxDocumentSize() + DOCUMENT_HEADROOM) + .maxDocumentSize(settings.getMaxDocumentSize() + DOCUMENT_HEADROOM_SIZE) .build(); } return payloadMessageSettings; @@ -102,7 +198,7 @@ private static MessageSettings getPayloadMessageSettings(final SplittablePayload private static MessageSettings getDocumentMessageSettings(final MessageSettings settings) { return createMessageSettingsBuilder(settings) - .maxMessageSize(settings.getMaxDocumentSize() + DOCUMENT_HEADROOM) + .maxMessageSize(settings.getMaxDocumentSize() + DOCUMENT_HEADROOM_SIZE) .build(); } @@ -126,8 +222,50 @@ private static boolean exceedsLimits(final MessageSettings settings, final int m return false; } + /** + * A {@link BsonWriter} that allows appending key/value pairs to a document that has been fully written to a {@link BsonOutput}. + */ + private static final class AppendingBsonWriter extends LevelCountingBsonWriter implements AutoCloseable { + private static final int INITIAL_LEVEL = DEFAULT_INITIAL_LEVEL + 1; - private BsonWriterHelper() { + /** + * @param bsonOutputWithDocument A {@link BsonOutput} {@linkplain BsonOutput#getPosition() positioned} + * immediately after the end of the document. + * @param documentStartPosition The {@linkplain BsonOutput#getPosition() position} of the start of the document + * in {@code bsonOutputWithDocument}. + */ + AppendingBsonWriter(final BsonOutput bsonOutputWithDocument, final int documentStartPosition) { + super( + new InternalAppendingBsonBinaryWriter(bsonOutputWithDocument, documentStartPosition), + INITIAL_LEVEL); + } + + @Override + public void writeEndDocument() { + assertTrue(getCurrentLevel() > INITIAL_LEVEL); + super.writeEndDocument(); + } + + @Override + public void close() { + try (InternalAppendingBsonBinaryWriter writer = (InternalAppendingBsonBinaryWriter) getBsonWriter()) { + writer.writeEndDocument(); + } + } + + private static final class InternalAppendingBsonBinaryWriter extends BsonBinaryWriter { + InternalAppendingBsonBinaryWriter(final BsonOutput bsonOutputWithDocument, final int documentStartPosition) { + super(bsonOutputWithDocument); + int documentEndPosition = bsonOutputWithDocument.getPosition(); + int bsonDocumentEndingSize = 1; + int appendFromPosition = documentEndPosition - bsonDocumentEndingSize; + bsonOutputWithDocument.truncateToPosition(appendFromPosition); + setState(State.NAME); + setContext(new Context(null, BsonContextType.DOCUMENT, documentStartPosition)); + } + } } + private BsonWriterHelper() { + } } diff --git a/driver-core/src/main/com/mongodb/internal/connection/ByteBufferBsonOutput.java b/driver-core/src/main/com/mongodb/internal/connection/ByteBufferBsonOutput.java index 5cd2000d879..40df1b867fd 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/ByteBufferBsonOutput.java +++ b/driver-core/src/main/com/mongodb/internal/connection/ByteBufferBsonOutput.java @@ -25,6 +25,7 @@ import java.util.ArrayList; import java.util.List; +import static com.mongodb.assertions.Assertions.assertTrue; import static com.mongodb.assertions.Assertions.notNull; /** @@ -52,6 +53,19 @@ public ByteBufferBsonOutput(final BufferProvider bufferProvider) { this.bufferProvider = notNull("bufferProvider", bufferProvider); } + /** + * Creates a new empty {@link ByteBufferBsonOutput.Branch}, + * which gets merged into this {@link ByteBufferBsonOutput} on {@link ByteBufferBsonOutput.Branch#close()} + * by appending its data without copying it. + * If multiple branches are created, they are merged in the order they are {@linkplain ByteBufferBsonOutput.Branch#close() closed}. + * {@linkplain #close() Closing} this {@link ByteBufferBsonOutput} does not {@linkplain ByteBufferBsonOutput.Branch#close() close} the branch. + * + * @return A new {@link ByteBufferBsonOutput.Branch}. + */ + public ByteBufferBsonOutput.Branch branch() { + return new ByteBufferBsonOutput.Branch(this); + } + @Override public void writeBytes(final byte[] bytes, final int offset, final int length) { ensureOpen(); @@ -156,7 +170,9 @@ public int pipe(final OutputStream out) throws IOException { @Override public void truncateToPosition(final int newPosition) { ensureOpen(); - + if (newPosition == position) { + return; + } if (newPosition > position || newPosition < 0) { throw new IllegalArgumentException(); } @@ -174,36 +190,89 @@ public void truncateToPosition(final int newPosition) { position = newPosition; } + /** + * The {@link #flush()} method of {@link ByteBufferBsonOutput} and of its subclasses does nothing.

+ */ + @Override + public final void flush() throws IOException { + } + + /** + * {@inheritDoc} + *

+ * Idempotent.

+ */ @Override public void close() { - for (final ByteBuf cur : bufferList) { - cur.release(); + if (isOpen()) { + for (final ByteBuf cur : bufferList) { + cur.release(); + } + bufferList.clear(); + closed = true; } - bufferList.clear(); - closed = true; } private BufferPositionPair getBufferPositionPair(final int absolutePosition) { int positionInBuffer = absolutePosition; int bufferIndex = 0; - int bufferSize = INITIAL_BUFFER_SIZE; + int bufferSize = bufferList.get(bufferIndex).position(); int startPositionOfBuffer = 0; while (startPositionOfBuffer + bufferSize <= absolutePosition) { bufferIndex++; startPositionOfBuffer += bufferSize; positionInBuffer -= bufferSize; - bufferSize = bufferList.get(bufferIndex).limit(); + bufferSize = bufferList.get(bufferIndex).position(); } return new BufferPositionPair(bufferIndex, positionInBuffer); } private void ensureOpen() { - if (closed) { + if (!isOpen()) { throw new IllegalStateException("The output is closed"); } } + boolean isOpen() { + return !closed; + } + + /** + * @see #branch() + */ + private void merge(final ByteBufferBsonOutput branch) { + assertTrue(branch instanceof ByteBufferBsonOutput.Branch); + branch.bufferList.forEach(ByteBuf::retain); + bufferList.addAll(branch.bufferList); + curBufferIndex += branch.curBufferIndex + 1; + position += branch.position; + } + + public static final class Branch extends ByteBufferBsonOutput { + private final ByteBufferBsonOutput parent; + + private Branch(final ByteBufferBsonOutput parent) { + super(parent.bufferProvider); + this.parent = parent; + } + + /** + * @see #branch() + */ + @Override + public void close() { + if (isOpen()) { + try { + assertTrue(parent.isOpen()); + parent.merge(this); + } finally { + super.close(); + } + } + } + } + private static final class BufferPositionPair { private final int bufferIndex; private int position; diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java b/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java index bac2a86e61d..46eabab21bb 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java +++ b/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java @@ -23,6 +23,7 @@ import com.mongodb.ServerApi; import com.mongodb.connection.ClusterConnectionMode; import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.connection.MessageSequences.EmptyMessageSequences; import com.mongodb.internal.session.SessionContext; import com.mongodb.lang.Nullable; import org.bson.BsonArray; @@ -34,7 +35,6 @@ import org.bson.BsonString; import org.bson.ByteBuf; import org.bson.FieldNameValidator; -import org.bson.io.BsonOutput; import java.io.ByteArrayOutputStream; import java.io.UnsupportedEncodingException; @@ -45,12 +45,17 @@ import static com.mongodb.ReadPreference.primary; import static com.mongodb.ReadPreference.primaryPreferred; import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.fail; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.connection.ClusterConnectionMode.LOAD_BALANCED; import static com.mongodb.connection.ClusterConnectionMode.SINGLE; import static com.mongodb.connection.ServerType.SHARD_ROUTER; import static com.mongodb.connection.ServerType.STANDALONE; +import static com.mongodb.internal.connection.BsonWriterHelper.appendElementsToDocument; +import static com.mongodb.internal.connection.BsonWriterHelper.backpatchLength; +import static com.mongodb.internal.connection.BsonWriterHelper.writeDocumentsOfDualMessageSequences; import static com.mongodb.internal.connection.BsonWriterHelper.writePayload; import static com.mongodb.internal.connection.ByteBufBsonDocument.createList; import static com.mongodb.internal.connection.ByteBufBsonDocument.createOne; @@ -64,43 +69,57 @@ *

This class is not part of the public API and may be removed or changed at any time

*/ public final class CommandMessage extends RequestMessage { + /** + * Specifies that the `OP_MSG` section payload is a BSON document. + */ + private static final byte PAYLOAD_TYPE_0_DOCUMENT = 0; + /** + * Specifies that the `OP_MSG` section payload is a sequence of BSON documents. + */ + private static final byte PAYLOAD_TYPE_1_DOCUMENT_SEQUENCE = 1; + private final MongoNamespace namespace; private final BsonDocument command; private final FieldNameValidator commandFieldNameValidator; private final ReadPreference readPreference; private final boolean exhaustAllowed; - private final SplittablePayload payload; - private final FieldNameValidator payloadFieldNameValidator; + private final MessageSequences sequences; private final boolean responseExpected; + /** + * {@code null} iff either {@link #sequences} is not of the {@link DualMessageSequences} type, + * or it is of that type, but it has not been {@linkplain #encodeMessageBodyWithMetadata(ByteBufferBsonOutput, OperationContext) encoded}. + */ + @Nullable + private Boolean dualMessageSequencesRequireResponse; private final ClusterConnectionMode clusterConnectionMode; private final ServerApi serverApi; CommandMessage(final MongoNamespace namespace, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, final ReadPreference readPreference, final MessageSettings settings, final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi) { - this(namespace, command, commandFieldNameValidator, readPreference, settings, true, null, null, + this(namespace, command, commandFieldNameValidator, readPreference, settings, true, EmptyMessageSequences.INSTANCE, clusterConnectionMode, serverApi); } CommandMessage(final MongoNamespace namespace, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, final ReadPreference readPreference, final MessageSettings settings, final boolean exhaustAllowed, final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi) { - this(namespace, command, commandFieldNameValidator, readPreference, settings, true, exhaustAllowed, null, null, + this(namespace, command, commandFieldNameValidator, readPreference, settings, true, exhaustAllowed, EmptyMessageSequences.INSTANCE, clusterConnectionMode, serverApi); } CommandMessage(final MongoNamespace namespace, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, final ReadPreference readPreference, final MessageSettings settings, final boolean responseExpected, - @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator, + final MessageSequences sequences, final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi) { - this(namespace, command, commandFieldNameValidator, readPreference, settings, responseExpected, false, payload, - payloadFieldNameValidator, clusterConnectionMode, serverApi); + this(namespace, command, commandFieldNameValidator, readPreference, settings, responseExpected, false, + sequences, clusterConnectionMode, serverApi); } CommandMessage(final MongoNamespace namespace, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, final ReadPreference readPreference, final MessageSettings settings, final boolean responseExpected, final boolean exhaustAllowed, - @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator, + final MessageSequences sequences, final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi) { super(namespace.getFullName(), getOpCode(settings, clusterConnectionMode, serverApi), settings); this.namespace = namespace; @@ -108,9 +127,9 @@ public final class CommandMessage extends RequestMessage { this.commandFieldNameValidator = commandFieldNameValidator; this.readPreference = readPreference; this.responseExpected = responseExpected; + this.dualMessageSequencesRequireResponse = null; this.exhaustAllowed = exhaustAllowed; - this.payload = payload; - this.payloadFieldNameValidator = payloadFieldNameValidator; + this.sequences = sequences; this.clusterConnectionMode = notNull("clusterConnectionMode", clusterConnectionMode); this.serverApi = serverApi; assertTrue(useOpMsg() || responseExpected); @@ -119,8 +138,8 @@ public final class CommandMessage extends RequestMessage { /** * Create a BsonDocument representing the logical document encoded by an OP_MSG. *

- * The returned document will contain all the fields from the Body (Kind 0) Section, as well as all fields represented by - * OP_MSG Document Sequence (Kind 1) Sections. + * The returned document will contain all the fields from the `PAYLOAD_TYPE_0_DOCUMENT` section, as well as all fields represented by + * `PAYLOAD_TYPE_1_DOCUMENT_SEQUENCE` sections. */ BsonDocument getCommandDocument(final ByteBufferBsonOutput bsonOutput) { List byteBuffers = bsonOutput.getByteBuffers(); @@ -130,14 +149,14 @@ BsonDocument getCommandDocument(final ByteBufferBsonOutput bsonOutput) { byteBuf.position(getEncodingMetadata().getFirstDocumentPosition()); ByteBufBsonDocument byteBufBsonDocument = createOne(byteBuf); - // If true, it means there is at least one Kind 1:Document Sequence in the OP_MSG + // If true, it means there is at least one `PAYLOAD_TYPE_1_DOCUMENT_SEQUENCE` section in the OP_MSG if (byteBuf.hasRemaining()) { BsonDocument commandBsonDocument = byteBufBsonDocument.toBaseBsonDocument(); // Each loop iteration processes one Document Sequence // When there are no more bytes remaining, there are no more Document Sequences while (byteBuf.hasRemaining()) { - // skip reading the payload type, we know it is 1 + // skip reading the payload type, we know it is `PAYLOAD_TYPE_1` byteBuf.position(byteBuf.position() + 1); int sequenceStart = byteBuf.position(); int sequenceSizeInBytes = byteBuf.getInt(); @@ -170,7 +189,7 @@ BsonDocument getCommandDocument(final ByteBufferBsonOutput bsonOutput) { /** * Get the field name from a buffer positioned at the start of the document sequence identifier of an OP_MSG Section of type - * Document Sequence (Kind 1). + * `PAYLOAD_TYPE_1_DOCUMENT_SEQUENCE`. *

* Upon normal completion of the method, the buffer will be positioned at the start of the first BSON object in the sequence. */ @@ -192,7 +211,15 @@ boolean isResponseExpected() { if (responseExpected) { return true; } else { - return payload != null && payload.isOrdered() && payload.hasAnotherSplit(); + if (sequences instanceof SplittablePayload) { + SplittablePayload payload = (SplittablePayload) sequences; + return payload.isOrdered() && payload.hasAnotherSplit(); + } else if (sequences instanceof DualMessageSequences) { + return assertNotNull(dualMessageSequencesRequireResponse); + } else if (!(sequences instanceof EmptyMessageSequences)) { + fail(sequences.toString()); + } + return false; } } @@ -201,47 +228,74 @@ MongoNamespace getNamespace() { } @Override - protected EncodingMetadata encodeMessageBodyWithMetadata(final BsonOutput bsonOutput, final OperationContext operationContext) { + protected EncodingMetadata encodeMessageBodyWithMetadata(final ByteBufferBsonOutput bsonOutput, final OperationContext operationContext) { + int commandStartPosition = useOpMsg() ? writeOpMsg(bsonOutput, operationContext) : writeOpQuery(bsonOutput); + return new EncodingMetadata(commandStartPosition); + } + + @SuppressWarnings("try") + private int writeOpMsg(final ByteBufferBsonOutput bsonOutput, final OperationContext operationContext) { int messageStartPosition = bsonOutput.getPosition() - MESSAGE_PROLOGUE_LENGTH; - int commandStartPosition; - if (useOpMsg()) { - int flagPosition = bsonOutput.getPosition(); - bsonOutput.writeInt32(0); // flag bits - bsonOutput.writeByte(0); // payload type - commandStartPosition = bsonOutput.getPosition(); - - addDocument(command, bsonOutput, commandFieldNameValidator, getExtraElements(operationContext)); - - if (payload != null) { - bsonOutput.writeByte(1); // payload type - int payloadBsonOutputStartPosition = bsonOutput.getPosition(); - bsonOutput.writeInt32(0); // size - bsonOutput.writeCString(payload.getPayloadName()); - writePayload(new BsonBinaryWriter(bsonOutput, payloadFieldNameValidator), bsonOutput, getSettings(), - messageStartPosition, payload, getSettings().getMaxDocumentSize()); - - int payloadBsonOutputLength = bsonOutput.getPosition() - payloadBsonOutputStartPosition; - bsonOutput.writeInt32(payloadBsonOutputStartPosition, payloadBsonOutputLength); + int flagPosition = bsonOutput.getPosition(); + bsonOutput.writeInt32(0); // flag bits + bsonOutput.writeByte(PAYLOAD_TYPE_0_DOCUMENT); + int commandStartPosition = bsonOutput.getPosition(); + List extraElements = getExtraElements(operationContext); + + int commandDocumentSizeInBytes = writeDocument(command, bsonOutput, commandFieldNameValidator); + if (sequences instanceof SplittablePayload) { + appendElementsToDocument(bsonOutput, commandStartPosition, extraElements); + SplittablePayload payload = (SplittablePayload) sequences; + try (FinishOpMsgSectionWithPayloadType1 finishSection = startOpMsgSectionWithPayloadType1( + bsonOutput, payload.getPayloadName())) { + writePayload( + new BsonBinaryWriter(bsonOutput, payload.getFieldNameValidator()), + bsonOutput, getSettings(), messageStartPosition, payload, getSettings().getMaxDocumentSize()); } - - // Write the flag bits - bsonOutput.writeInt32(flagPosition, getOpMsgFlagBits()); + } else if (sequences instanceof DualMessageSequences) { + DualMessageSequences dualMessageSequences = (DualMessageSequences) sequences; + try (ByteBufferBsonOutput.Branch bsonOutputBranch2 = bsonOutput.branch(); + ByteBufferBsonOutput.Branch bsonOutputBranch1 = bsonOutput.branch()) { + DualMessageSequences.EncodeDocumentsResult encodeDocumentsResult; + try (FinishOpMsgSectionWithPayloadType1 finishSection1 = startOpMsgSectionWithPayloadType1( + bsonOutputBranch1, dualMessageSequences.getFirstSequenceId()); + FinishOpMsgSectionWithPayloadType1 finishSection2 = startOpMsgSectionWithPayloadType1( + bsonOutputBranch2, dualMessageSequences.getSecondSequenceId())) { + encodeDocumentsResult = writeDocumentsOfDualMessageSequences( + dualMessageSequences, commandDocumentSizeInBytes, bsonOutputBranch1, + bsonOutputBranch2, getSettings()); + } + dualMessageSequencesRequireResponse = encodeDocumentsResult.isServerResponseRequired(); + extraElements.addAll(encodeDocumentsResult.getExtraElements()); + appendElementsToDocument(bsonOutput, commandStartPosition, extraElements); + } + } else if (sequences instanceof EmptyMessageSequences) { + appendElementsToDocument(bsonOutput, commandStartPosition, extraElements); } else { - bsonOutput.writeInt32(0); - bsonOutput.writeCString(namespace.getFullName()); - bsonOutput.writeInt32(0); - bsonOutput.writeInt32(-1); + fail(sequences.toString()); + } + + // Write the flag bits + bsonOutput.writeInt32(flagPosition, getOpMsgFlagBits()); + return commandStartPosition; + } - commandStartPosition = bsonOutput.getPosition(); + private int writeOpQuery(final ByteBufferBsonOutput bsonOutput) { + bsonOutput.writeInt32(0); + bsonOutput.writeCString(namespace.getFullName()); + bsonOutput.writeInt32(0); + bsonOutput.writeInt32(-1); - List elements = null; - if (serverApi != null) { - elements = new ArrayList<>(3); - addServerApiElements(elements); - } - addDocument(command, bsonOutput, commandFieldNameValidator, elements); + int commandStartPosition = bsonOutput.getPosition(); + + List elements = null; + if (serverApi != null) { + elements = new ArrayList<>(3); + addServerApiElements(elements); } - return new EncodingMetadata(commandStartPosition); + writeDocument(command, bsonOutput, commandFieldNameValidator); + appendElementsToDocument(bsonOutput, commandStartPosition, elements); + return commandStartPosition; } private int getOpMsgFlagBits() { @@ -269,7 +323,7 @@ private List getExtraElements(final OperationContext operationConte SessionContext sessionContext = operationContext.getSessionContext(); TimeoutContext timeoutContext = operationContext.getTimeoutContext(); - List extraElements = new ArrayList<>(); + ArrayList extraElements = new ArrayList<>(); if (!getSettings().isCryptd()) { timeoutContext.runMaxTimeMS(maxTimeMS -> extraElements.add(new BsonElement("maxTimeMS", new BsonInt64(maxTimeMS))) @@ -341,6 +395,19 @@ private void addReadConcernDocument(final List extraElements, final } } + /** + * @param sequenceId The identifier of the sequence contained in the {@code OP_MSG} section to be written. + * @see OP_MSG + */ + private FinishOpMsgSectionWithPayloadType1 startOpMsgSectionWithPayloadType1(final ByteBufferBsonOutput bsonOutput, final String sequenceId) { + bsonOutput.writeByte(PAYLOAD_TYPE_1_DOCUMENT_SEQUENCE); + int sequenceStart = bsonOutput.getPosition(); + // size to be patched back later + bsonOutput.writeInt32(0); + bsonOutput.writeCString(sequenceId); + return () -> backpatchLength(sequenceStart, bsonOutput); + } + private static OpCode getOpCode(final MessageSettings settings, final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi) { return isServerVersionKnown(settings) || clusterConnectionMode == LOAD_BALANCED || serverApi != null @@ -351,4 +418,9 @@ private static OpCode getOpCode(final MessageSettings settings, final ClusterCon private static boolean isServerVersionKnown(final MessageSettings settings) { return settings.getMaxWireVersion() >= FOUR_DOT_ZERO_WIRE_VERSION; } + + @FunctionalInterface + private interface FinishOpMsgSectionWithPayloadType1 extends AutoCloseable { + void close(); + } } diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandProtocolImpl.java b/driver-core/src/main/com/mongodb/internal/connection/CommandProtocolImpl.java index de9e0666d40..eb4d6d49516 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/CommandProtocolImpl.java +++ b/driver-core/src/main/com/mongodb/internal/connection/CommandProtocolImpl.java @@ -26,17 +26,15 @@ import org.bson.FieldNameValidator; import org.bson.codecs.Decoder; -import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.connection.ProtocolHelper.getMessageSettings; class CommandProtocolImpl implements CommandProtocol { private final MongoNamespace namespace; private final BsonDocument command; - private final SplittablePayload payload; + private final MessageSequences sequences; private final ReadPreference readPreference; private final FieldNameValidator commandFieldNameValidator; - private final FieldNameValidator payloadFieldNameValidator; private final Decoder commandResultDecoder; private final boolean responseExpected; private final ClusterConnectionMode clusterConnectionMode; @@ -44,8 +42,7 @@ class CommandProtocolImpl implements CommandProtocol { CommandProtocolImpl(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final boolean responseExpected, - @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator, - final ClusterConnectionMode clusterConnectionMode, final OperationContext operationContext) { + final MessageSequences sequences, final ClusterConnectionMode clusterConnectionMode, final OperationContext operationContext) { notNull("database", database); this.namespace = new MongoNamespace(notNull("database", database), MongoNamespace.COMMAND_COLLECTION_NAME); this.command = notNull("command", command); @@ -53,13 +50,9 @@ class CommandProtocolImpl implements CommandProtocol { this.readPreference = readPreference; this.commandResultDecoder = notNull("commandResultDecoder", commandResultDecoder); this.responseExpected = responseExpected; - this.payload = payload; - this.payloadFieldNameValidator = payloadFieldNameValidator; + this.sequences = sequences; this.clusterConnectionMode = notNull("clusterConnectionMode", clusterConnectionMode); this.operationContext = operationContext; - - isTrueArgument("payloadFieldNameValidator cannot be null if there is a payload.", - payload == null || payloadFieldNameValidator != null); } @Nullable @@ -87,13 +80,13 @@ public void executeAsync(final InternalConnection connection, final SingleResult @Override public CommandProtocolImpl withSessionContext(final SessionContext sessionContext) { return new CommandProtocolImpl<>(namespace.getDatabaseName(), command, commandFieldNameValidator, readPreference, - commandResultDecoder, responseExpected, payload, payloadFieldNameValidator, clusterConnectionMode, + commandResultDecoder, responseExpected, sequences, clusterConnectionMode, operationContext.withSessionContext(sessionContext)); } private CommandMessage getCommandMessage(final InternalConnection connection) { return new CommandMessage(namespace, command, commandFieldNameValidator, readPreference, - getMessageSettings(connection.getDescription(), connection.getInitialServerDescription()), responseExpected, payload, - payloadFieldNameValidator, clusterConnectionMode, operationContext.getServerApi()); + getMessageSettings(connection.getDescription(), connection.getInitialServerDescription()), responseExpected, + sequences, clusterConnectionMode, operationContext.getServerApi()); } } diff --git a/driver-core/src/main/com/mongodb/internal/connection/CompressedMessage.java b/driver-core/src/main/com/mongodb/internal/connection/CompressedMessage.java index 9880ef3fb0b..6764135daa1 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/CompressedMessage.java +++ b/driver-core/src/main/com/mongodb/internal/connection/CompressedMessage.java @@ -17,7 +17,6 @@ package com.mongodb.internal.connection; import org.bson.ByteBuf; -import org.bson.io.BsonOutput; import java.util.List; @@ -37,7 +36,7 @@ class CompressedMessage extends RequestMessage { } @Override - protected EncodingMetadata encodeMessageBodyWithMetadata(final BsonOutput bsonOutput, final OperationContext operationContext) { + protected EncodingMetadata encodeMessageBodyWithMetadata(final ByteBufferBsonOutput bsonOutput, final OperationContext operationContext) { bsonOutput.writeInt32(wrappedOpcode.getValue()); bsonOutput.writeInt32(getWrappedMessageSize(wrappedMessageBuffers) - MESSAGE_HEADER_LENGTH); bsonOutput.writeByte(compressor.getId()); diff --git a/driver-core/src/main/com/mongodb/internal/connection/Connection.java b/driver-core/src/main/com/mongodb/internal/connection/Connection.java index 95094b240c1..219fb9ae6b9 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/Connection.java +++ b/driver-core/src/main/com/mongodb/internal/connection/Connection.java @@ -51,7 +51,7 @@ T command(String database, BsonDocument command, FieldNameValidator fieldNam @Nullable T command(String database, BsonDocument command, FieldNameValidator commandFieldNameValidator, @Nullable ReadPreference readPreference, Decoder commandResultDecoder, OperationContext operationContext, - boolean responseExpected, @Nullable SplittablePayload payload, @Nullable FieldNameValidator payloadFieldNameValidator); + boolean responseExpected, MessageSequences sequences); enum PinningMode { diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultServer.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultServer.java index 8f3d0f09fd9..008cdbefcb7 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/DefaultServer.java +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultServer.java @@ -302,9 +302,9 @@ public T command(final String database, final BsonDocument command, final Fi public T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext, final boolean responseExpected, - @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator) { + final MessageSequences sequences) { return wrapped.command(database, command, commandFieldNameValidator, readPreference, commandResultDecoder, operationContext, - responseExpected, payload, payloadFieldNameValidator); + responseExpected, sequences); } @Override @@ -364,10 +364,10 @@ public void commandAsync(final String database, final BsonDocument command, @Override public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, - final OperationContext operationContext, final boolean responseExpected, @Nullable final SplittablePayload payload, - @Nullable final FieldNameValidator payloadFieldNameValidator, final SingleResultCallback callback) { + final OperationContext operationContext, final boolean responseExpected, final MessageSequences sequences, + final SingleResultCallback callback) { wrapped.commandAsync(database, command, commandFieldNameValidator, readPreference, commandResultDecoder, - operationContext, responseExpected, payload, payloadFieldNameValidator, callback); + operationContext, responseExpected, sequences, callback); } @Override diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultServerConnection.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultServerConnection.java index 01d5f587fdc..143ef5b76ae 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/DefaultServerConnection.java +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultServerConnection.java @@ -20,6 +20,7 @@ import com.mongodb.connection.ClusterConnectionMode; import com.mongodb.connection.ConnectionDescription; import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.connection.MessageSequences.EmptyMessageSequences; import com.mongodb.internal.diagnostics.logging.Logger; import com.mongodb.internal.diagnostics.logging.Loggers; import com.mongodb.internal.session.SessionContext; @@ -70,18 +71,17 @@ public ConnectionDescription getDescription() { @Override public T command(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext) { - return command(database, command, fieldNameValidator, readPreference, commandResultDecoder, operationContext, true, null, null); + return command(database, command, fieldNameValidator, readPreference, commandResultDecoder, operationContext, true, EmptyMessageSequences.INSTANCE); } @Nullable @Override public T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, - final OperationContext operationContext, final boolean responseExpected, - @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator) { + final OperationContext operationContext, final boolean responseExpected, final MessageSequences sequences) { return executeProtocol( new CommandProtocolImpl<>(database, command, commandFieldNameValidator, readPreference, commandResultDecoder, - responseExpected, payload, payloadFieldNameValidator, clusterConnectionMode, operationContext), + responseExpected, sequences, clusterConnectionMode, operationContext), operationContext.getSessionContext()); } @@ -90,16 +90,15 @@ public void commandAsync(final String database, final BsonDocument command, @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext, final SingleResultCallback callback) { commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder, - operationContext, true, null, null, callback); + operationContext, true, EmptyMessageSequences.INSTANCE, callback); } @Override public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext, - final boolean responseExpected, @Nullable final SplittablePayload payload, - @Nullable final FieldNameValidator payloadFieldNameValidator, final SingleResultCallback callback) { + final boolean responseExpected, final MessageSequences sequences, final SingleResultCallback callback) { executeProtocolAsync(new CommandProtocolImpl<>(database, command, commandFieldNameValidator, readPreference, - commandResultDecoder, responseExpected, payload, payloadFieldNameValidator, clusterConnectionMode, operationContext), + commandResultDecoder, responseExpected, sequences, clusterConnectionMode, operationContext), operationContext.getSessionContext(), callback); } diff --git a/driver-core/src/main/com/mongodb/internal/connection/DualMessageSequences.java b/driver-core/src/main/com/mongodb/internal/connection/DualMessageSequences.java new file mode 100644 index 00000000000..0c5a3430c22 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DualMessageSequences.java @@ -0,0 +1,122 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.bson.BsonBinaryWriter; +import org.bson.BsonElement; +import org.bson.FieldNameValidator; + +import java.util.List; + +/** + * Two sequences that may either be coupled or independent. + *

+ * This class is not part of the public API and may be removed or changed at any time.

+ */ +public abstract class DualMessageSequences extends MessageSequences { + + private final String firstSequenceId; + private final FieldNameValidator firstFieldNameValidator; + private final String secondSequenceId; + private final FieldNameValidator secondFieldNameValidator; + + protected DualMessageSequences( + final String firstSequenceId, + final FieldNameValidator firstFieldNameValidator, + final String secondSequenceId, + final FieldNameValidator secondFieldNameValidator) { + this.firstSequenceId = firstSequenceId; + this.firstFieldNameValidator = firstFieldNameValidator; + this.secondSequenceId = secondSequenceId; + this.secondFieldNameValidator = secondFieldNameValidator; + } + + FieldNameValidator getFirstFieldNameValidator() { + return firstFieldNameValidator; + } + + FieldNameValidator getSecondFieldNameValidator() { + return secondFieldNameValidator; + } + + String getFirstSequenceId() { + return firstSequenceId; + } + + String getSecondSequenceId() { + return secondSequenceId; + } + + protected abstract EncodeDocumentsResult encodeDocuments(WritersProviderAndLimitsChecker writersProviderAndLimitsChecker); + + /** + * @see #tryWrite(WriteAction) + */ + public interface WritersProviderAndLimitsChecker { + /** + * Provides writers to the specified {@link WriteAction}, + * {@linkplain WriteAction#doAndGetBatchCount(BsonBinaryWriter, BsonBinaryWriter) executes} it, + * checks the {@linkplain MessageSettings limits}. + *

+ * May be called multiple times per {@link #encodeDocuments(WritersProviderAndLimitsChecker)}.

+ */ + WriteResult tryWrite(WriteAction write); + + /** + * @see #doAndGetBatchCount(BsonBinaryWriter, BsonBinaryWriter) + */ + interface WriteAction { + /** + * Writes documents to the sequences using the provided writers. + * + * @return The resulting batch count since the beginning of {@link #encodeDocuments(WritersProviderAndLimitsChecker)}. + * It is generally allowed to be greater than {@link MessageSettings#getMaxBatchCount()}. + */ + int doAndGetBatchCount(BsonBinaryWriter firstWriter, BsonBinaryWriter secondWriter); + } + + enum WriteResult { + FAIL_LIMIT_EXCEEDED, + OK_LIMIT_REACHED, + OK_LIMIT_NOT_REACHED + } + } + + public static final class EncodeDocumentsResult { + private final boolean serverResponseRequired; + private final List extraElements; + + /** + * @param extraElements See {@link #getExtraElements()}. + */ + public EncodeDocumentsResult(final boolean serverResponseRequired, final List extraElements) { + this.serverResponseRequired = serverResponseRequired; + this.extraElements = extraElements; + } + + boolean isServerResponseRequired() { + return serverResponseRequired; + } + + /** + * {@linkplain BsonElement Key/value pairs} to be added to the document contained in the {@code OP_MSG} section with payload type 0. + */ + List getExtraElements() { + return extraElements; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ElementExtendingBsonWriter.java b/driver-core/src/main/com/mongodb/internal/connection/ElementExtendingBsonWriter.java deleted file mode 100644 index d0ed5234d50..00000000000 --- a/driver-core/src/main/com/mongodb/internal/connection/ElementExtendingBsonWriter.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2008-present MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.internal.connection; - -import org.bson.BsonBinaryWriter; -import org.bson.BsonElement; -import org.bson.BsonReader; - -import java.util.List; - -import static com.mongodb.internal.connection.BsonWriterHelper.writeElements; - -/** - *

This class is not part of the public API and may be removed or changed at any time

- */ -public class ElementExtendingBsonWriter extends LevelCountingBsonWriter { - private final BsonBinaryWriter writer; - private final List extraElements; - - - public ElementExtendingBsonWriter(final BsonBinaryWriter writer, final List extraElements) { - super(writer); - this.writer = writer; - this.extraElements = extraElements; - } - - @Override - public void writeEndDocument() { - if (getCurrentLevel() == 0) { - writeElements(writer, extraElements); - } - super.writeEndDocument(); - } - - @Override - public void pipe(final BsonReader reader) { - if (getCurrentLevel() == -1) { - writer.pipe(reader, extraElements); - } else { - writer.pipe(reader); - } - } -} diff --git a/driver-core/src/main/com/mongodb/internal/connection/IdHoldingBsonWriter.java b/driver-core/src/main/com/mongodb/internal/connection/IdHoldingBsonWriter.java index 4120dbdfb17..c73a5d4fd86 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/IdHoldingBsonWriter.java +++ b/driver-core/src/main/com/mongodb/internal/connection/IdHoldingBsonWriter.java @@ -92,11 +92,11 @@ public void writeStartDocument() { @Override public void writeEndDocument() { if (isWritingId()) { - if (getIdBsonWriterCurrentLevel() >= 0) { + if (getIdBsonWriterCurrentLevel() > DEFAULT_INITIAL_LEVEL) { getIdBsonWriter().writeEndDocument(); } - if (getIdBsonWriterCurrentLevel() == -1) { + if (getIdBsonWriterCurrentLevel() == DEFAULT_INITIAL_LEVEL) { if (id != null && id.isJavaScriptWithScope()) { id = new BsonJavaScriptWithScope(id.asJavaScriptWithScope().getCode(), new RawBsonDocument(getBytes())); } else if (id == null) { @@ -105,7 +105,7 @@ public void writeEndDocument() { } } - if (getCurrentLevel() == 0 && id == null) { + if (getCurrentLevel() == DEFAULT_INITIAL_LEVEL + 1 && id == null) { id = fallbackId == null ? new BsonObjectId() : fallbackId; writeObjectId(ID_FIELD_NAME, id.asObjectId().getValue()); } @@ -115,7 +115,7 @@ public void writeEndDocument() { @Override public void writeStartArray() { if (isWritingId()) { - if (getIdBsonWriterCurrentLevel() == -1) { + if (getIdBsonWriterCurrentLevel() == DEFAULT_INITIAL_LEVEL) { idFieldIsAnArray = true; getIdBsonWriter().writeStartDocument(); getIdBsonWriter().writeName(ID_FIELD_NAME); @@ -129,7 +129,7 @@ public void writeStartArray() { public void writeStartArray(final String name) { setCurrentFieldName(name); if (isWritingId()) { - if (getIdBsonWriterCurrentLevel() == -1) { + if (getIdBsonWriterCurrentLevel() == DEFAULT_INITIAL_LEVEL) { getIdBsonWriter().writeStartDocument(); } getIdBsonWriter().writeStartArray(name); @@ -141,7 +141,7 @@ public void writeStartArray(final String name) { public void writeEndArray() { if (isWritingId()) { getIdBsonWriter().writeEndArray(); - if (getIdBsonWriterCurrentLevel() == 0 && idFieldIsAnArray) { + if (getIdBsonWriterCurrentLevel() == DEFAULT_INITIAL_LEVEL + 1 && idFieldIsAnArray) { getIdBsonWriter().writeEndDocument(); id = new RawBsonDocument(getBytes()).get(ID_FIELD_NAME); } @@ -308,7 +308,7 @@ public void writeMinKey() { @Override public void writeName(final String name) { setCurrentFieldName(name); - if (getIdBsonWriterCurrentLevel() >= 0) { + if (getIdBsonWriterCurrentLevel() > DEFAULT_INITIAL_LEVEL) { getIdBsonWriter().writeName(name); } super.writeName(name); @@ -433,13 +433,13 @@ private void setCurrentFieldName(final String name) { } private boolean isWritingId() { - return getIdBsonWriterCurrentLevel() >= 0 || (getCurrentLevel() == 0 && currentFieldName != null + return getIdBsonWriterCurrentLevel() > DEFAULT_INITIAL_LEVEL || (getCurrentLevel() == DEFAULT_INITIAL_LEVEL + 1 && currentFieldName != null && currentFieldName.equals(ID_FIELD_NAME)); } private void addBsonValue(final Supplier value, final Runnable writeValue) { if (isWritingId()) { - if (getIdBsonWriterCurrentLevel() >= 0) { + if (getIdBsonWriterCurrentLevel() > DEFAULT_INITIAL_LEVEL) { writeValue.run(); } else { id = value.get(); @@ -448,7 +448,7 @@ private void addBsonValue(final Supplier value, final Runnable writeV } private int getIdBsonWriterCurrentLevel() { - return idBsonBinaryWriter == null ? -1 : idBsonBinaryWriter.getCurrentLevel(); + return idBsonBinaryWriter == null ? DEFAULT_INITIAL_LEVEL : idBsonBinaryWriter.getCurrentLevel(); } private LevelCountingBsonWriter getIdBsonWriter() { diff --git a/driver-core/src/main/com/mongodb/internal/connection/LevelCountingBsonWriter.java b/driver-core/src/main/com/mongodb/internal/connection/LevelCountingBsonWriter.java index 44889765fbf..3e9d0324bd7 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/LevelCountingBsonWriter.java +++ b/driver-core/src/main/com/mongodb/internal/connection/LevelCountingBsonWriter.java @@ -20,10 +20,21 @@ abstract class LevelCountingBsonWriter extends BsonWriterDecorator { - private int level = -1; + static final int DEFAULT_INITIAL_LEVEL = -1; + + private int level; LevelCountingBsonWriter(final BsonWriter bsonWriter) { + this(bsonWriter, DEFAULT_INITIAL_LEVEL); + } + + /** + * @param initialLevel This parameter allows initializing the {@linkplain #getCurrentLevel() current level} + * with a value different from {@link #DEFAULT_INITIAL_LEVEL}. + */ + LevelCountingBsonWriter(final BsonWriter bsonWriter, final int initialLevel) { super(bsonWriter); + level = initialLevel; } int getCurrentLevel() { diff --git a/driver-core/src/main/com/mongodb/internal/connection/MessageSequences.java b/driver-core/src/main/com/mongodb/internal/connection/MessageSequences.java new file mode 100644 index 00000000000..19600007404 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/MessageSequences.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +/** + * Zero or more identifiable sequences contained in the {@code OP_MSG} section with payload type 1. + *

+ * This class is not part of the public API and may be removed or changed at any time.

+ * @see OP_MSG + */ +public abstract class MessageSequences { + public static final class EmptyMessageSequences extends MessageSequences { + public static final EmptyMessageSequences INSTANCE = new EmptyMessageSequences(); + + private EmptyMessageSequences() { + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/MessageSettings.java b/driver-core/src/main/com/mongodb/internal/connection/MessageSettings.java index 7a5734bc140..91fd862ce4b 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/MessageSettings.java +++ b/driver-core/src/main/com/mongodb/internal/connection/MessageSettings.java @@ -42,6 +42,13 @@ public final class MessageSettings { * {@code maxWriteBatchSize}. */ private static final int DEFAULT_MAX_BATCH_COUNT = 1000; + /** + * The headroom for documents that are not intended to be stored in a database. + * A command document is an example of such a document. + * This headroom allows a command document to specify a document that is intended to be stored in a database, + * even if the specified document is of the maximum size. + */ + static final int DOCUMENT_HEADROOM_SIZE = 16 * (1 << 10); private final int maxDocumentSize; private final int maxMessageSize; diff --git a/driver-core/src/main/com/mongodb/internal/connection/RequestMessage.java b/driver-core/src/main/com/mongodb/internal/connection/RequestMessage.java index 86e2ebd1dbe..dd09a59f763 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/RequestMessage.java +++ b/driver-core/src/main/com/mongodb/internal/connection/RequestMessage.java @@ -18,24 +18,16 @@ import com.mongodb.lang.Nullable; import org.bson.BsonBinaryWriter; -import org.bson.BsonBinaryWriterSettings; import org.bson.BsonDocument; -import org.bson.BsonElement; -import org.bson.BsonWriter; -import org.bson.BsonWriterSettings; import org.bson.FieldNameValidator; -import org.bson.codecs.BsonValueCodecProvider; -import org.bson.codecs.Codec; -import org.bson.codecs.Encoder; -import org.bson.codecs.EncoderContext; -import org.bson.codecs.configuration.CodecRegistry; import org.bson.io.BsonOutput; -import java.util.List; import java.util.concurrent.atomic.AtomicInteger; import static com.mongodb.assertions.Assertions.notNull; -import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static com.mongodb.internal.connection.BsonWriterHelper.backpatchLength; +import static com.mongodb.internal.connection.BsonWriterHelper.createBsonBinaryWriter; +import static com.mongodb.internal.connection.BsonWriterHelper.encodeUsingRegistry; /** * Abstract base class for all MongoDB Wire Protocol request messages. @@ -46,12 +38,6 @@ abstract class RequestMessage { static final int MESSAGE_PROLOGUE_LENGTH = 16; - // Allow an extra 16K to the maximum allowed size of a query or command document, so that, for example, - // a 16M document can be upserted via findAndModify - private static final int DOCUMENT_HEADROOM = 16 * 1024; - - private static final CodecRegistry REGISTRY = fromProviders(new BsonValueCodecProvider()); - private final String collectionName; private final MessageSettings settings; private final int id; @@ -128,12 +114,12 @@ public MessageSettings getSettings() { * @param bsonOutput the output * @param operationContext the session context */ - public void encode(final BsonOutput bsonOutput, final OperationContext operationContext) { + public void encode(final ByteBufferBsonOutput bsonOutput, final OperationContext operationContext) { notNull("operationContext", operationContext); int messageStartPosition = bsonOutput.getPosition(); writeMessagePrologue(bsonOutput); EncodingMetadata encodingMetadata = encodeMessageBodyWithMetadata(bsonOutput, operationContext); - backpatchMessageLength(messageStartPosition, bsonOutput); + backpatchLength(messageStartPosition, bsonOutput); this.encodingMetadata = encodingMetadata; } @@ -165,23 +151,13 @@ protected void writeMessagePrologue(final BsonOutput bsonOutput) { * @param operationContext the session context * @return the encoding metadata */ - protected abstract EncodingMetadata encodeMessageBodyWithMetadata(BsonOutput bsonOutput, OperationContext operationContext); - - protected void addDocument(final BsonDocument document, final BsonOutput bsonOutput, - final FieldNameValidator validator, @Nullable final List extraElements) { - addDocument(document, getCodec(document), EncoderContext.builder().build(), bsonOutput, validator, - settings.getMaxDocumentSize() + DOCUMENT_HEADROOM, extraElements); - } + protected abstract EncodingMetadata encodeMessageBodyWithMetadata(ByteBufferBsonOutput bsonOutput, OperationContext operationContext); - /** - * Backpatches the message length into the beginning of the message. - * - * @param startPosition the start position of the message - * @param bsonOutput the output - */ - protected void backpatchMessageLength(final int startPosition, final BsonOutput bsonOutput) { - int messageLength = bsonOutput.getPosition() - startPosition; - bsonOutput.writeInt32(bsonOutput.getPosition() - messageLength, messageLength); + protected int writeDocument(final BsonDocument document, final BsonOutput bsonOutput, final FieldNameValidator validator) { + BsonBinaryWriter writer = createBsonBinaryWriter(bsonOutput, validator, getSettings()); + int documentStart = bsonOutput.getPosition(); + encodeUsingRegistry(writer, document); + return bsonOutput.getPosition() - documentStart; } /** @@ -192,20 +168,4 @@ protected void backpatchMessageLength(final int startPosition, final BsonOutput protected String getCollectionName() { return collectionName; } - - @SuppressWarnings("unchecked") - Codec getCodec(final BsonDocument document) { - return (Codec) REGISTRY.get(document.getClass()); - } - - private void addDocument(final T obj, final Encoder encoder, final EncoderContext encoderContext, - final BsonOutput bsonOutput, final FieldNameValidator validator, final int maxDocumentSize, - @Nullable final List extraElements) { - BsonBinaryWriter bsonBinaryWriter = new BsonBinaryWriter(new BsonWriterSettings(), new BsonBinaryWriterSettings(maxDocumentSize), - bsonOutput, validator); - BsonWriter bsonWriter = extraElements == null - ? bsonBinaryWriter - : new ElementExtendingBsonWriter(bsonBinaryWriter, extraElements); - encoder.encode(bsonWriter, obj, encoderContext); - } } diff --git a/driver-core/src/main/com/mongodb/internal/connection/SplittablePayload.java b/driver-core/src/main/com/mongodb/internal/connection/SplittablePayload.java index 55bbac03b8b..9e52894f720 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/SplittablePayload.java +++ b/driver-core/src/main/com/mongodb/internal/connection/SplittablePayload.java @@ -26,6 +26,7 @@ import org.bson.BsonObjectId; import org.bson.BsonValue; import org.bson.BsonWriter; +import org.bson.FieldNameValidator; import org.bson.codecs.BsonValueCodecProvider; import org.bson.codecs.Codec; import org.bson.codecs.Encoder; @@ -54,8 +55,9 @@ * *

This class is not part of the public API and may be removed or changed at any time

*/ -public final class SplittablePayload { +public final class SplittablePayload extends MessageSequences { private static final CodecRegistry REGISTRY = fromProviders(new BsonValueCodecProvider()); + private final FieldNameValidator fieldNameValidator; private final WriteRequestEncoder writeRequestEncoder = new WriteRequestEncoder(); private final Type payloadType; private final List writeRequestWithIndexes; @@ -94,10 +96,19 @@ public enum Type { * @param payloadType the payload type * @param writeRequestWithIndexes the writeRequests */ - public SplittablePayload(final Type payloadType, final List writeRequestWithIndexes, final boolean ordered) { + public SplittablePayload( + final Type payloadType, + final List writeRequestWithIndexes, + final boolean ordered, + final FieldNameValidator fieldNameValidator) { this.payloadType = notNull("batchType", payloadType); this.writeRequestWithIndexes = notNull("writeRequests", writeRequestWithIndexes); this.ordered = ordered; + this.fieldNameValidator = notNull("fieldNameValidator", fieldNameValidator); + } + + public FieldNameValidator getFieldNameValidator() { + return fieldNameValidator; } /** @@ -175,7 +186,7 @@ boolean isOrdered() { public SplittablePayload getNextSplit() { isTrue("hasAnotherSplit", hasAnotherSplit()); List nextPayLoad = writeRequestWithIndexes.subList(position, writeRequestWithIndexes.size()); - return new SplittablePayload(payloadType, nextPayLoad, ordered); + return new SplittablePayload(payloadType, nextPayLoad, ordered, fieldNameValidator); } /** @@ -204,7 +215,8 @@ public void encode(final BsonWriter writer, final WriteRequestWithIndex writeReq writer, // Reuse `writeRequestDocumentId` if it may have been generated // by `IdHoldingBsonWriter` in a previous attempt. - // If its type is not `BsonObjectId`, we know it could not have been generated. + // If its type is not `BsonObjectId`, which happens only if `_id` was specified by the application, + // we know it could not have been generated. writeRequestDocumentId instanceof BsonObjectId ? writeRequestDocumentId.asObjectId() : null); getCodec(document).encode(idHoldingBsonWriter, document, EncoderContext.builder().isEncodingCollectibleDocument(true).build()); diff --git a/driver-core/src/main/com/mongodb/internal/connection/SplittablePayloadBsonWriter.java b/driver-core/src/main/com/mongodb/internal/connection/SplittablePayloadBsonWriter.java index ecff2c95a0c..e679a3b557c 100644 --- a/driver-core/src/main/com/mongodb/internal/connection/SplittablePayloadBsonWriter.java +++ b/driver-core/src/main/com/mongodb/internal/connection/SplittablePayloadBsonWriter.java @@ -63,7 +63,7 @@ public void writeStartDocument() { @Override public void writeEndDocument() { - if (getCurrentLevel() == 0 && payload.hasPayload()) { + if (getCurrentLevel() == DEFAULT_INITIAL_LEVEL + 1 && payload.hasPayload()) { writePayloadArray(writer, bsonOutput, settings, messageStartPosition, payload, maxSplittableDocumentSize); } super.writeEndDocument(); diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java index b3781fc66ff..f158b3944ae 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java @@ -322,7 +322,7 @@ static AsyncCallbackSupplier decorateReadWithRetriesAsync(final RetryStat static AsyncCallbackSupplier decorateWriteWithRetriesAsync(final RetryState retryState, final OperationContext operationContext, final AsyncCallbackSupplier asyncWriteFunction) { return new RetryingAsyncCallbackSupplier<>(retryState, onRetryableWriteAttemptFailure(operationContext), - CommandOperationHelper::shouldAttemptToRetryWrite, callback -> { + CommandOperationHelper::loggingShouldAttemptToRetryWriteAndAddRetryableLabel, callback -> { logRetryExecute(retryState, operationContext); asyncWriteFunction.get(callback); }); @@ -344,7 +344,7 @@ static CommandReadTransformerAsync> asyncS } static AsyncBatchCursor cursorDocumentToAsyncBatchCursor(final TimeoutMode timeoutMode, final BsonDocument cursorDocument, - final int batchSize, final Decoder decoder, final BsonValue comment, final AsyncConnectionSource source, + final int batchSize, final Decoder decoder, @Nullable final BsonValue comment, final AsyncConnectionSource source, final AsyncConnection connection) { return new AsyncCommandBatchCursor<>(timeoutMode, cursorDocument, batchSize, 0, decoder, comment, source, connection); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java index 70c847668ab..a70af7c64fd 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperations.java @@ -44,6 +44,9 @@ import com.mongodb.client.model.SearchIndexModel; import com.mongodb.client.model.UpdateOptions; import com.mongodb.client.model.WriteModel; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; import com.mongodb.client.model.changestream.FullDocument; import com.mongodb.client.model.changestream.FullDocumentBeforeChange; import com.mongodb.internal.TimeoutSettings; @@ -294,6 +297,12 @@ public AsyncWriteOperation bulkWrite(final List clientBulkWriteOperation( + final List clientWriteModels, + @Nullable final ClientBulkWriteOptions options) { + return operations.clientBulkWriteOperation(clientWriteModels, options); + } + public AsyncReadOperation commandRead(final Bson command, final Class resultClass) { return operations.commandRead(command, resultClass); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/BatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/BatchCursor.java index 5f86eb1f8fb..1463798ef64 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/BatchCursor.java +++ b/driver-core/src/main/com/mongodb/internal/operation/BatchCursor.java @@ -25,6 +25,12 @@ import java.util.Iterator; import java.util.List; +import static java.util.Spliterator.IMMUTABLE; +import static java.util.Spliterator.ORDERED; +import static java.util.Spliterators.spliteratorUnknownSize; +import static java.util.stream.Collectors.toList; +import static java.util.stream.StreamSupport.stream; + /** * MongoDB returns query results as batches, and this interface provideds an iterator over those batches. The first call to * the {@code next} method will return the first batch, and subsequent calls will trigger a request to get the next batch @@ -98,4 +104,9 @@ public interface BatchCursor extends Iterator>, Closeable { ServerCursor getServerCursor(); ServerAddress getServerAddress(); + + default List> exhaust() { + return stream(spliteratorUnknownSize(this, ORDERED | IMMUTABLE), false) + .collect(toList()); + } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java b/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java index 1bca4734eff..1064bee14d3 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java +++ b/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java @@ -64,7 +64,7 @@ import static com.mongodb.internal.bulk.WriteRequest.Type.REPLACE; import static com.mongodb.internal.bulk.WriteRequest.Type.UPDATE; import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; -import static com.mongodb.internal.operation.MixedBulkWriteOperation.commandWriteConcern; +import static com.mongodb.internal.operation.CommandOperationHelper.commandWriteConcern; import static com.mongodb.internal.operation.OperationHelper.LOGGER; import static com.mongodb.internal.operation.OperationHelper.isRetryableWrite; import static com.mongodb.internal.operation.WriteConcernHelper.createWriteConcernError; @@ -111,7 +111,7 @@ static BulkWriteBatch createBulkWriteBatch(final MongoNamespace namespace, } if (canRetryWrites && !writeRequestsAreRetryable) { canRetryWrites = false; - LOGGER.debug("retryWrites set but one or more writeRequests do not support retryable writes"); + logWriteModelDoesNotSupportRetries(); } return new BulkWriteBatch(namespace, connectionDescription, ordered, writeConcern, bypassDocumentValidation, canRetryWrites, new BulkWriteBatchCombiner(connectionDescription.getServerAddress(), ordered, writeConcern), @@ -154,7 +154,7 @@ private BulkWriteBatch(final MongoNamespace namespace, final ConnectionDescripti this.indexMap = indexMap; this.unprocessed = unprocessedItems; - this.payload = new SplittablePayload(getPayloadType(batchType), payloadItems, ordered); + this.payload = new SplittablePayload(getPayloadType(batchType), payloadItems, ordered, getFieldNameValidator()); this.operationContext = operationContext; this.comment = comment; this.variables = variables; @@ -270,7 +270,7 @@ BulkWriteBatch getNextBatch() { } } - FieldNameValidator getFieldNameValidator() { + private FieldNameValidator getFieldNameValidator() { if (batchType == UPDATE || batchType == REPLACE) { Map rootMap; if (batchType == REPLACE) { @@ -385,4 +385,8 @@ private static boolean isRetryable(final WriteRequest writeRequest) { } return true; } + + static void logWriteModelDoesNotSupportRetries() { + LOGGER.debug("retryWrites set but one or more writeRequests do not support retryable writes"); + } } diff --git a/driver-core/src/main/com/mongodb/internal/operation/ClientBulkWriteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ClientBulkWriteOperation.java new file mode 100644 index 00000000000..ccd7f272e95 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/ClientBulkWriteOperation.java @@ -0,0 +1,1338 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.operation; + +import com.mongodb.ClientBulkWriteException; +import com.mongodb.MongoClientException; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoServerException; +import com.mongodb.MongoSocketException; +import com.mongodb.MongoWriteConcernException; +import com.mongodb.ServerAddress; +import com.mongodb.WriteConcern; +import com.mongodb.WriteError; +import com.mongodb.assertions.Assertions; +import com.mongodb.bulk.WriteConcernError; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientDeleteResult; +import com.mongodb.client.model.bulk.ClientInsertOneResult; +import com.mongodb.client.model.bulk.ClientNamespacedReplaceOneModel; +import com.mongodb.client.model.bulk.ClientNamespacedUpdateOneModel; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.client.model.bulk.ClientUpdateResult; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.AsyncSupplier; +import com.mongodb.internal.async.MutableValue; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.async.function.AsyncCallbackSupplier; +import com.mongodb.internal.async.function.RetryState; +import com.mongodb.internal.binding.AsyncConnectionSource; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.ConnectionSource; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.internal.client.model.bulk.AbstractClientDeleteModel; +import com.mongodb.internal.client.model.bulk.AbstractClientDeleteOptions; +import com.mongodb.internal.client.model.bulk.AbstractClientNamespacedWriteModel; +import com.mongodb.internal.client.model.bulk.AbstractClientUpdateModel; +import com.mongodb.internal.client.model.bulk.AbstractClientUpdateOptions; +import com.mongodb.internal.client.model.bulk.AcknowledgedSummaryClientBulkWriteResult; +import com.mongodb.internal.client.model.bulk.AcknowledgedVerboseClientBulkWriteResult; +import com.mongodb.internal.client.model.bulk.ClientWriteModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientBulkWriteOptions; +import com.mongodb.internal.client.model.bulk.ConcreteClientDeleteManyModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientDeleteOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientDeleteResult; +import com.mongodb.internal.client.model.bulk.ConcreteClientInsertOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientInsertOneResult; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedDeleteManyModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedDeleteOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedInsertOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedReplaceOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedUpdateManyModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedUpdateOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientReplaceOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientReplaceOneOptions; +import com.mongodb.internal.client.model.bulk.ConcreteClientUpdateManyModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientUpdateOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientUpdateResult; +import com.mongodb.internal.client.model.bulk.UnacknowledgedClientBulkWriteResult; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.DualMessageSequences; +import com.mongodb.internal.connection.IdHoldingBsonWriter; +import com.mongodb.internal.connection.MongoWriteConcernWithResponseException; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.operation.retry.AttachmentKeys; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import com.mongodb.internal.validator.ReplacingDocumentFieldNameValidator; +import com.mongodb.internal.validator.UpdateFieldNameValidator; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBinaryWriter; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonElement; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonObjectId; +import org.bson.BsonValue; +import org.bson.BsonWriter; +import org.bson.FieldNameValidator; +import org.bson.codecs.Encoder; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.function.Supplier; +import java.util.stream.Stream; + +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.fail; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PACKAGE; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static com.mongodb.internal.async.AsyncRunnable.beginAsync; +import static com.mongodb.internal.connection.DualMessageSequences.WritersProviderAndLimitsChecker.WriteResult.FAIL_LIMIT_EXCEEDED; +import static com.mongodb.internal.connection.DualMessageSequences.WritersProviderAndLimitsChecker.WriteResult.OK_LIMIT_NOT_REACHED; +import static com.mongodb.internal.operation.AsyncOperationHelper.cursorDocumentToAsyncBatchCursor; +import static com.mongodb.internal.operation.AsyncOperationHelper.decorateWriteWithRetriesAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncSourceAndConnection; +import static com.mongodb.internal.operation.BulkWriteBatch.logWriteModelDoesNotSupportRetries; +import static com.mongodb.internal.operation.CommandOperationHelper.commandWriteConcern; +import static com.mongodb.internal.operation.CommandOperationHelper.initialRetryState; +import static com.mongodb.internal.operation.CommandOperationHelper.shouldAttemptToRetryWriteAndAddRetryableLabel; +import static com.mongodb.internal.operation.CommandOperationHelper.transformWriteException; +import static com.mongodb.internal.operation.OperationHelper.isRetryableWrite; +import static com.mongodb.internal.operation.SyncOperationHelper.cursorDocumentToBatchCursor; +import static com.mongodb.internal.operation.SyncOperationHelper.decorateWriteWithRetries; +import static com.mongodb.internal.operation.SyncOperationHelper.withSourceAndConnection; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.Optional.ofNullable; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toSet; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ClientBulkWriteOperation implements WriteOperation, AsyncWriteOperation { + private static final ConcreteClientBulkWriteOptions EMPTY_OPTIONS = new ConcreteClientBulkWriteOptions(); + private static final String BULK_WRITE_COMMAND_NAME = "bulkWrite"; + private static final EncoderContext DEFAULT_ENCODER_CONTEXT = EncoderContext.builder().build(); + private static final EncoderContext COLLECTIBLE_DOCUMENT_ENCODER_CONTEXT = EncoderContext.builder() + .isEncodingCollectibleDocument(true).build(); + private static final int INITIAL_BATCH_MODEL_START_INDEX = 0; + private static final int SERVER_DEFAULT_CURSOR_BATCH_SIZE = 0; + + private final List models; + private final ConcreteClientBulkWriteOptions options; + private final WriteConcern writeConcernSetting; + private final boolean retryWritesSetting; + private final CodecRegistry codecRegistry; + + /** + * @param retryWritesSetting See {@link MongoClientSettings#getRetryWrites()}. + */ + public ClientBulkWriteOperation( + final List models, + @Nullable final ClientBulkWriteOptions options, + final WriteConcern writeConcernSetting, + final boolean retryWritesSetting, + final CodecRegistry codecRegistry) { + this.models = models; + this.options = options == null ? EMPTY_OPTIONS : (ConcreteClientBulkWriteOptions) options; + this.writeConcernSetting = writeConcernSetting; + this.retryWritesSetting = retryWritesSetting; + this.codecRegistry = codecRegistry; + } + + @Override + public ClientBulkWriteResult execute(final WriteBinding binding) throws ClientBulkWriteException { + WriteConcern effectiveWriteConcern = validateAndGetEffectiveWriteConcern(binding.getOperationContext().getSessionContext()); + ResultAccumulator resultAccumulator = new ResultAccumulator(); + MongoException transformedTopLevelError = null; + + try { + executeAllBatches(effectiveWriteConcern, binding, resultAccumulator); + } catch (MongoException topLevelError) { + transformedTopLevelError = transformWriteException(topLevelError); + } + return resultAccumulator.build(transformedTopLevelError, effectiveWriteConcern); + } + + + @Override + public void executeAsync(final AsyncWriteBinding binding, + final SingleResultCallback finalCallback) { + WriteConcern effectiveWriteConcern = validateAndGetEffectiveWriteConcern(binding.getOperationContext().getSessionContext()); + ResultAccumulator resultAccumulator = new ResultAccumulator(); + MutableValue transformedTopLevelError = new MutableValue<>(); + + beginAsync().thenSupply(c -> { + executeAllBatchesAsync(effectiveWriteConcern, binding, resultAccumulator, c); + }).onErrorIf(topLevelError -> topLevelError instanceof MongoException, (topLevelError, c) -> { + transformedTopLevelError.set(transformWriteException((MongoException) topLevelError)); + c.complete(c); + }).thenApply((ignored, c) -> { + c.complete(resultAccumulator.build(transformedTopLevelError.getNullable(), effectiveWriteConcern)); + }).finish(finalCallback); + } + + /** + * To execute a batch means: + *
    + *
  • execute a `bulkWrite` command, which creates a cursor;
  • + *
  • consume the cursor, which may involve executing `getMore` commands.
  • + *
+ * + * @throws MongoException When a {@linkplain ClientBulkWriteException#getCause() top-level error} happens. + */ + private void executeAllBatches( + final WriteConcern effectiveWriteConcern, + final WriteBinding binding, + final ResultAccumulator resultAccumulator) throws MongoException { + Integer nextBatchStartModelIndex = INITIAL_BATCH_MODEL_START_INDEX; + + do { + nextBatchStartModelIndex = executeBatch(nextBatchStartModelIndex, effectiveWriteConcern, binding, resultAccumulator); + } while (nextBatchStartModelIndex != null); + } + + /** + * @see #executeAllBatches(WriteConcern, WriteBinding, ResultAccumulator) + */ + private void executeAllBatchesAsync( + final WriteConcern effectiveWriteConcern, + final AsyncWriteBinding binding, + final ResultAccumulator resultAccumulator, + final SingleResultCallback finalCallback) { + MutableValue nextBatchStartModelIndex = new MutableValue<>(INITIAL_BATCH_MODEL_START_INDEX); + + beginAsync().thenRunDoWhileLoop(iterationCallback -> { + beginAsync().thenSupply(c -> { + executeBatchAsync(nextBatchStartModelIndex.get(), effectiveWriteConcern, binding, resultAccumulator, c); + }).thenApply((nextBatchStartModelIdx, c) -> { + nextBatchStartModelIndex.set(nextBatchStartModelIdx); + c.complete(c); + }).finish(iterationCallback); + }, () -> nextBatchStartModelIndex.getNullable() != null + ).finish(finalCallback); + } + + /** + * @return The start model index of the next batch, provided that the operation + * {@linkplain ExhaustiveClientBulkWriteCommandOkResponse#operationMayContinue(ConcreteClientBulkWriteOptions) may continue} + * and there are unexecuted {@linkplain ClientNamespacedWriteModel models} left. + */ + @Nullable + private Integer executeBatch( + final int batchStartModelIndex, + final WriteConcern effectiveWriteConcern, + final WriteBinding binding, + final ResultAccumulator resultAccumulator) { + List unexecutedModels = models.subList(batchStartModelIndex, models.size()); + assertFalse(unexecutedModels.isEmpty()); + OperationContext operationContext = binding.getOperationContext(); + SessionContext sessionContext = operationContext.getSessionContext(); + TimeoutContext timeoutContext = operationContext.getTimeoutContext(); + RetryState retryState = initialRetryState(retryWritesSetting, timeoutContext); + BatchEncoder batchEncoder = new BatchEncoder(); + + Supplier retryingBatchExecutor = decorateWriteWithRetries( + retryState, operationContext, + // Each batch re-selects a server and re-checks out a connection because this is simpler, + // and it is allowed by https://jira.mongodb.org/browse/DRIVERS-2502. + // If connection pinning is required, `binding` handles that, + // and `ClientSession`, `TransactionContext` are aware of that. + () -> withSourceAndConnection(binding::getWriteConnectionSource, true, + (connectionSource, connection) -> { + ConnectionDescription connectionDescription = connection.getDescription(); + boolean effectiveRetryWrites = isRetryableWrite( + retryWritesSetting, effectiveWriteConcern, connectionDescription, sessionContext); + retryState.breakAndThrowIfRetryAnd(() -> !effectiveRetryWrites); + resultAccumulator.onNewServerAddress(connectionDescription.getServerAddress()); + retryState.attach(AttachmentKeys.maxWireVersion(), connectionDescription.getMaxWireVersion(), true) + .attach(AttachmentKeys.commandDescriptionSupplier(), () -> BULK_WRITE_COMMAND_NAME, false); + ClientBulkWriteCommand bulkWriteCommand = createBulkWriteCommand( + retryState, effectiveRetryWrites, effectiveWriteConcern, sessionContext, unexecutedModels, batchEncoder, + () -> retryState.attach(AttachmentKeys.retryableCommandFlag(), true, true)); + return executeBulkWriteCommandAndExhaustOkResponse( + retryState, connectionSource, connection, bulkWriteCommand, effectiveWriteConcern, operationContext); + }) + ); + + try { + ExhaustiveClientBulkWriteCommandOkResponse bulkWriteCommandOkResponse = retryingBatchExecutor.get(); + return resultAccumulator.onBulkWriteCommandOkResponseOrNoResponse( + batchStartModelIndex, bulkWriteCommandOkResponse, batchEncoder.intoEncodedBatchInfo()); + } catch (MongoWriteConcernWithResponseException mongoWriteConcernWithOkResponseException) { + return resultAccumulator.onBulkWriteCommandOkResponseWithWriteConcernError( + batchStartModelIndex, mongoWriteConcernWithOkResponseException, batchEncoder.intoEncodedBatchInfo()); + } catch (MongoCommandException bulkWriteCommandException) { + resultAccumulator.onBulkWriteCommandErrorResponse(bulkWriteCommandException); + throw bulkWriteCommandException; + } catch (MongoException mongoException) { + // The server does not have a chance to add "RetryableWriteError" label to `e`, + // and if it is the last attempt failure, `RetryingSyncSupplier` also may not have a chance + // to add the label. So we do that explicitly. + shouldAttemptToRetryWriteAndAddRetryableLabel(retryState, mongoException); + resultAccumulator.onBulkWriteCommandErrorWithoutResponse(mongoException); + throw mongoException; + } + } + + /** + * @see #executeBatch(int, WriteConcern, WriteBinding, ResultAccumulator) + */ + private void executeBatchAsync( + final int batchStartModelIndex, + final WriteConcern effectiveWriteConcern, + final AsyncWriteBinding binding, + final ResultAccumulator resultAccumulator, + final SingleResultCallback finalCallback) { + List unexecutedModels = models.subList(batchStartModelIndex, models.size()); + assertFalse(unexecutedModels.isEmpty()); + OperationContext operationContext = binding.getOperationContext(); + SessionContext sessionContext = operationContext.getSessionContext(); + TimeoutContext timeoutContext = operationContext.getTimeoutContext(); + RetryState retryState = initialRetryState(retryWritesSetting, timeoutContext); + BatchEncoder batchEncoder = new BatchEncoder(); + + AsyncCallbackSupplier retryingBatchExecutor = decorateWriteWithRetriesAsync( + retryState, operationContext, + // Each batch re-selects a server and re-checks out a connection because this is simpler, + // and it is allowed by https://jira.mongodb.org/browse/DRIVERS-2502. + // If connection pinning is required, `binding` handles that, + // and `ClientSession`, `TransactionContext` are aware of that. + funcCallback -> withAsyncSourceAndConnection(binding::getWriteConnectionSource, true, funcCallback, + (connectionSource, connection, resultCallback) -> { + ConnectionDescription connectionDescription = connection.getDescription(); + boolean effectiveRetryWrites = isRetryableWrite( + retryWritesSetting, effectiveWriteConcern, connectionDescription, sessionContext); + retryState.breakAndThrowIfRetryAnd(() -> !effectiveRetryWrites); + resultAccumulator.onNewServerAddress(connectionDescription.getServerAddress()); + retryState.attach(AttachmentKeys.maxWireVersion(), connectionDescription.getMaxWireVersion(), true) + .attach(AttachmentKeys.commandDescriptionSupplier(), () -> BULK_WRITE_COMMAND_NAME, false); + ClientBulkWriteCommand bulkWriteCommand = createBulkWriteCommand( + retryState, effectiveRetryWrites, effectiveWriteConcern, sessionContext, unexecutedModels, batchEncoder, + () -> retryState.attach(AttachmentKeys.retryableCommandFlag(), true, true)); + executeBulkWriteCommandAndExhaustOkResponseAsync( + retryState, connectionSource, connection, bulkWriteCommand, effectiveWriteConcern, operationContext, resultCallback); + }) + ); + + beginAsync().thenSupply(callback -> { + retryingBatchExecutor.get(callback); + }).thenApply((bulkWriteCommandOkResponse, callback) -> { + callback.complete(resultAccumulator.onBulkWriteCommandOkResponseOrNoResponse( + batchStartModelIndex, bulkWriteCommandOkResponse, batchEncoder.intoEncodedBatchInfo())); + }).onErrorIf(throwable -> true, (t, callback) -> { + if (t instanceof MongoWriteConcernWithResponseException) { + MongoWriteConcernWithResponseException mongoWriteConcernWithOkResponseException = (MongoWriteConcernWithResponseException) t; + callback.complete(resultAccumulator.onBulkWriteCommandOkResponseWithWriteConcernError( + batchStartModelIndex, mongoWriteConcernWithOkResponseException, batchEncoder.intoEncodedBatchInfo())); + } else if (t instanceof MongoCommandException) { + MongoCommandException bulkWriteCommandException = (MongoCommandException) t; + resultAccumulator.onBulkWriteCommandErrorResponse(bulkWriteCommandException); + callback.completeExceptionally(t); + } else if (t instanceof MongoException) { + MongoException mongoException = (MongoException) t; + // The server does not have a chance to add "RetryableWriteError" label to `e`, + // and if it is the last attempt failure, `RetryingSyncSupplier` also may not have a chance + // to add the label. So we do that explicitly. + shouldAttemptToRetryWriteAndAddRetryableLabel(retryState, mongoException); + resultAccumulator.onBulkWriteCommandErrorWithoutResponse(mongoException); + callback.completeExceptionally(mongoException); + } else { + callback.completeExceptionally(t); + } + }).finish(finalCallback); + } + + /** + * @throws MongoWriteConcernWithResponseException This internal exception must be handled to avoid it being observed by an application. + * It {@linkplain MongoWriteConcernWithResponseException#getResponse() bears} the OK response to the {@code bulkWriteCommand}, + * which must be + * {@linkplain ResultAccumulator#onBulkWriteCommandOkResponseWithWriteConcernError(int, MongoWriteConcernWithResponseException, BatchEncoder.EncodedBatchInfo) accumulated} + * iff this exception is the failed result of retries. + */ + @Nullable + private ExhaustiveClientBulkWriteCommandOkResponse executeBulkWriteCommandAndExhaustOkResponse( + final RetryState retryState, + final ConnectionSource connectionSource, + final Connection connection, + final ClientBulkWriteCommand bulkWriteCommand, + final WriteConcern effectiveWriteConcern, + final OperationContext operationContext) throws MongoWriteConcernWithResponseException { + BsonDocument bulkWriteCommandOkResponse = connection.command( + "admin", + bulkWriteCommand.getCommandDocument(), + NoOpFieldNameValidator.INSTANCE, + null, + CommandResultDocumentCodec.create(codecRegistry.get(BsonDocument.class), CommandBatchCursorHelper.FIRST_BATCH), + operationContext, + effectiveWriteConcern.isAcknowledged(), + bulkWriteCommand.getOpsAndNsInfo()); + if (bulkWriteCommandOkResponse == null) { + return null; + } + List> cursorExhaustBatches = doWithRetriesDisabledForCommand(retryState, "getMore", () -> + exhaustBulkWriteCommandOkResponseCursor(connectionSource, connection, bulkWriteCommandOkResponse)); + return createExhaustiveClientBulkWriteCommandOkResponse( + bulkWriteCommandOkResponse, + cursorExhaustBatches, + connection.getDescription()); + } + + /** + * @see #executeBulkWriteCommandAndExhaustOkResponse(RetryState, ConnectionSource, Connection, ClientBulkWriteCommand, WriteConcern, OperationContext) + */ + private void executeBulkWriteCommandAndExhaustOkResponseAsync( + final RetryState retryState, + final AsyncConnectionSource connectionSource, + final AsyncConnection connection, + final ClientBulkWriteCommand bulkWriteCommand, + final WriteConcern effectiveWriteConcern, + final OperationContext operationContext, + final SingleResultCallback finalCallback) { + beginAsync().thenSupply(callback -> { + connection.commandAsync( + "admin", + bulkWriteCommand.getCommandDocument(), + NoOpFieldNameValidator.INSTANCE, + null, + CommandResultDocumentCodec.create(codecRegistry.get(BsonDocument.class), CommandBatchCursorHelper.FIRST_BATCH), + operationContext, + effectiveWriteConcern.isAcknowledged(), + bulkWriteCommand.getOpsAndNsInfo(), callback); + }).thenApply((bulkWriteCommandOkResponse, callback) -> { + if (bulkWriteCommandOkResponse == null) { + callback.complete((ExhaustiveClientBulkWriteCommandOkResponse) null); + return; + } + beginAsync().>>thenSupply(c -> { + doWithRetriesDisabledForCommandAsync(retryState, "getMore", (c1) -> { + exhaustBulkWriteCommandOkResponseCursorAsync(connectionSource, connection, bulkWriteCommandOkResponse, c1); + }, c); + }).thenApply((cursorExhaustBatches, c) -> { + c.complete(createExhaustiveClientBulkWriteCommandOkResponse( + bulkWriteCommandOkResponse, + cursorExhaustBatches, + connection.getDescription())); + }).finish(callback); + }).finish(finalCallback); + } + + private static ExhaustiveClientBulkWriteCommandOkResponse createExhaustiveClientBulkWriteCommandOkResponse( + final BsonDocument bulkWriteCommandOkResponse, final List> cursorExhaustBatches, + final ConnectionDescription connectionDescription) { + ExhaustiveClientBulkWriteCommandOkResponse exhaustiveBulkWriteCommandOkResponse = + new ExhaustiveClientBulkWriteCommandOkResponse( + bulkWriteCommandOkResponse, cursorExhaustBatches); + + // `Connection.command` does not throw `MongoWriteConcernException`, so we have to construct it ourselves + MongoWriteConcernException writeConcernException = Exceptions.createWriteConcernException( + bulkWriteCommandOkResponse, connectionDescription.getServerAddress()); + if (writeConcernException != null) { + throw new MongoWriteConcernWithResponseException(writeConcernException, exhaustiveBulkWriteCommandOkResponse); + } + return exhaustiveBulkWriteCommandOkResponse; + } + + private R doWithRetriesDisabledForCommand( + final RetryState retryState, + final String commandDescription, + final Supplier actionWithCommand) { + Optional originalRetryableCommandFlag = retryState.attachment(AttachmentKeys.retryableCommandFlag()); + Supplier originalCommandDescriptionSupplier = retryState.attachment(AttachmentKeys.commandDescriptionSupplier()) + .orElseThrow(Assertions::fail); + + try { + retryState.attach(AttachmentKeys.retryableCommandFlag(), false, true) + .attach(AttachmentKeys.commandDescriptionSupplier(), () -> commandDescription, false); + return actionWithCommand.get(); + } finally { + originalRetryableCommandFlag.ifPresent(value -> retryState.attach(AttachmentKeys.retryableCommandFlag(), value, true)); + retryState.attach(AttachmentKeys.commandDescriptionSupplier(), originalCommandDescriptionSupplier, false); + } + } + + private void doWithRetriesDisabledForCommandAsync( + final RetryState retryState, + final String commandDescription, + final AsyncSupplier actionWithCommand, + final SingleResultCallback finalCallback) { + Optional originalRetryableCommandFlag = retryState.attachment(AttachmentKeys.retryableCommandFlag()); + Supplier originalCommandDescriptionSupplier = retryState.attachment(AttachmentKeys.commandDescriptionSupplier()) + .orElseThrow(Assertions::fail); + + beginAsync().thenSupply(c -> { + retryState.attach(AttachmentKeys.retryableCommandFlag(), false, true) + .attach(AttachmentKeys.commandDescriptionSupplier(), () -> commandDescription, false); + actionWithCommand.finish(c); + }).thenAlwaysRunAndFinish(() -> { + originalRetryableCommandFlag.ifPresent(value -> retryState.attach(AttachmentKeys.retryableCommandFlag(), value, true)); + retryState.attach(AttachmentKeys.commandDescriptionSupplier(), originalCommandDescriptionSupplier, false); + }, finalCallback); + } + + private List> exhaustBulkWriteCommandOkResponseCursor( + final ConnectionSource connectionSource, + final Connection connection, + final BsonDocument response) { + try (CommandBatchCursor cursor = cursorDocumentToBatchCursor( + TimeoutMode.CURSOR_LIFETIME, + response, + SERVER_DEFAULT_CURSOR_BATCH_SIZE, + codecRegistry.get(BsonDocument.class), + options.getComment().orElse(null), + connectionSource, + connection)) { + + return cursor.exhaust(); + } + } + + private void exhaustBulkWriteCommandOkResponseCursorAsync(final AsyncConnectionSource connectionSource, + final AsyncConnection connection, + final BsonDocument bulkWriteCommandOkResponse, + final SingleResultCallback>> finalCallback) { + AsyncBatchCursor cursor = cursorDocumentToAsyncBatchCursor( + TimeoutMode.CURSOR_LIFETIME, + bulkWriteCommandOkResponse, + SERVER_DEFAULT_CURSOR_BATCH_SIZE, + codecRegistry.get(BsonDocument.class), + options.getComment().orElse(null), + connectionSource, + connection); + + beginAsync().>>thenSupply(callback -> { + cursor.exhaust(callback); + }).thenAlwaysRunAndFinish(() -> { + cursor.close(); + }, finalCallback); + } + + + private ClientBulkWriteCommand createBulkWriteCommand( + final RetryState retryState, + final boolean effectiveRetryWrites, + final WriteConcern effectiveWriteConcern, + final SessionContext sessionContext, + final List unexecutedModels, + final BatchEncoder batchEncoder, + final Runnable retriesEnabler) { + BsonDocument commandDocument = new BsonDocument(BULK_WRITE_COMMAND_NAME, new BsonInt32(1)) + .append("errorsOnly", BsonBoolean.valueOf(!options.isVerboseResults())) + .append("ordered", BsonBoolean.valueOf(options.isOrdered())); + options.isBypassDocumentValidation().ifPresent(value -> + commandDocument.append("bypassDocumentValidation", BsonBoolean.valueOf(value))); + options.getComment().ifPresent(value -> + commandDocument.append("comment", value)); + options.getLet().ifPresent(let -> + commandDocument.append("let", let.toBsonDocument(BsonDocument.class, codecRegistry))); + commandWriteConcern(effectiveWriteConcern, sessionContext).ifPresent(value-> + commandDocument.append("writeConcern", value.asDocument())); + return new ClientBulkWriteCommand( + commandDocument, + new ClientBulkWriteCommand.OpsAndNsInfo( + effectiveRetryWrites, unexecutedModels, + batchEncoder, + options, + () -> { + retriesEnabler.run(); + return retryState.isFirstAttempt() + ? sessionContext.advanceTransactionNumber() + : sessionContext.getTransactionNumber(); + })); + } + + private WriteConcern validateAndGetEffectiveWriteConcern(final SessionContext sessionContext) { + WriteConcern effectiveWriteConcern = CommandOperationHelper.validateAndGetEffectiveWriteConcern(writeConcernSetting, sessionContext); + if (!effectiveWriteConcern.isAcknowledged()) { + if (options.isVerboseResults()) { + throw new MongoClientException("Cannot request unacknowledged write concern and verbose results"); + } + if (options.isOrdered()) { + throw new MongoClientException("Cannot request unacknowledged write concern and ordered writes"); + } + } + return effectiveWriteConcern; + } + + private void encodeUsingRegistry(final BsonWriter writer, final T value) { + encodeUsingRegistry(writer, value, DEFAULT_ENCODER_CONTEXT); + } + + private void encodeUsingRegistry(final BsonWriter writer, final T value, final EncoderContext encoderContext) { + @SuppressWarnings("unchecked") + Encoder encoder = (Encoder) codecRegistry.get(value.getClass()); + encoder.encode(writer, value, encoderContext); + } + + private static AbstractClientNamespacedWriteModel getNamespacedModel( + final List models, final int index) { + return (AbstractClientNamespacedWriteModel) models.get(index); + } + + public static final class Exceptions { + public static Optional serverAddressFromException(@Nullable final MongoException exception) { + ServerAddress serverAddress = null; + if (exception instanceof MongoServerException) { + serverAddress = ((MongoServerException) exception).getServerAddress(); + } else if (exception instanceof MongoSocketException) { + serverAddress = ((MongoSocketException) exception).getServerAddress(); + } + return ofNullable(serverAddress); + } + + @Nullable + private static MongoWriteConcernException createWriteConcernException( + final BsonDocument response, + final ServerAddress serverAddress) { + final String writeConcernErrorFieldName = "writeConcernError"; + if (!response.containsKey(writeConcernErrorFieldName)) { + return null; + } + BsonDocument writeConcernErrorDocument = response.getDocument(writeConcernErrorFieldName); + WriteConcernError writeConcernError = WriteConcernHelper.createWriteConcernError(writeConcernErrorDocument); + Set errorLabels = response.getArray("errorLabels", new BsonArray()).stream() + .map(i -> i.asString().getValue()) + .collect(toSet()); + return new MongoWriteConcernException(writeConcernError, null, serverAddress, errorLabels); + } + } + + private static final class ExhaustiveClientBulkWriteCommandOkResponse { + /** + * The number of unsuccessful individual write operations. + */ + private final int nErrors; + private final int nInserted; + private final int nUpserted; + private final int nMatched; + private final int nModified; + private final int nDeleted; + private final List cursorExhaust; + + ExhaustiveClientBulkWriteCommandOkResponse( + final BsonDocument bulkWriteCommandOkResponse, + final List> cursorExhaustBatches) { + this.nErrors = bulkWriteCommandOkResponse.getInt32("nErrors").getValue(); + this.nInserted = bulkWriteCommandOkResponse.getInt32("nInserted").getValue(); + this.nUpserted = bulkWriteCommandOkResponse.getInt32("nUpserted").getValue(); + this.nMatched = bulkWriteCommandOkResponse.getInt32("nMatched").getValue(); + this.nModified = bulkWriteCommandOkResponse.getInt32("nModified").getValue(); + this.nDeleted = bulkWriteCommandOkResponse.getInt32("nDeleted").getValue(); + if (cursorExhaustBatches.isEmpty()) { + cursorExhaust = emptyList(); + } else if (cursorExhaustBatches.size() == 1) { + cursorExhaust = cursorExhaustBatches.get(0); + } else { + cursorExhaust = cursorExhaustBatches.stream().flatMap(Collection::stream).collect(toList()); + } + } + + boolean operationMayContinue(final ConcreteClientBulkWriteOptions options) { + return nErrors == 0 || !options.isOrdered(); + } + + int getNErrors() { + return nErrors; + } + + int getNInserted() { + return nInserted; + } + + int getNUpserted() { + return nUpserted; + } + + int getNMatched() { + return nMatched; + } + + int getNModified() { + return nModified; + } + + int getNDeleted() { + return nDeleted; + } + + List getCursorExhaust() { + return cursorExhaust; + } + } + + /** + * Accumulates results of the operation as it is being executed + * for {@linkplain #build(MongoException, WriteConcern) building} them when the operation completes. + */ + private final class ResultAccumulator { + @Nullable + private ServerAddress serverAddress; + private final ArrayList batchResults; + + ResultAccumulator() { + serverAddress = null; + batchResults = new ArrayList<>(); + } + + /** + *
    + *
  • Either builds and returns {@link ClientBulkWriteResult};
  • + *
  • or builds and throws {@link ClientBulkWriteException};
  • + *
  • or throws {@code topLevelError}.
  • + *
+ */ + ClientBulkWriteResult build(@Nullable final MongoException topLevelError, final WriteConcern effectiveWriteConcern) throws MongoException { + boolean verboseResultsSetting = options.isVerboseResults(); + boolean batchResultsHaveResponses = false; + boolean batchResultsHaveInfoAboutSuccessfulIndividualOperations = false; + long insertedCount = 0; + long upsertedCount = 0; + long matchedCount = 0; + long modifiedCount = 0; + long deletedCount = 0; + Map insertResults = verboseResultsSetting ? new HashMap<>() : emptyMap(); + Map updateResults = verboseResultsSetting ? new HashMap<>() : emptyMap(); + Map deleteResults = verboseResultsSetting ? new HashMap<>() : emptyMap(); + ArrayList writeConcernErrors = new ArrayList<>(); + Map writeErrors = new HashMap<>(); + for (BatchResult batchResult : batchResults) { + if (batchResult.hasResponse()) { + batchResultsHaveResponses = true; + MongoWriteConcernException writeConcernException = batchResult.getWriteConcernException(); + if (writeConcernException != null) { + writeConcernErrors.add(writeConcernException.getWriteConcernError()); + } + int batchStartModelIndex = batchResult.getBatchStartModelIndex(); + ExhaustiveClientBulkWriteCommandOkResponse response = batchResult.getResponse(); + boolean orderedSetting = options.isOrdered(); + int nErrors = response.getNErrors(); + batchResultsHaveInfoAboutSuccessfulIndividualOperations = batchResultsHaveInfoAboutSuccessfulIndividualOperations + || (orderedSetting && nErrors == 0) + || (!orderedSetting && nErrors < batchResult.getBatchModelsCount()); + insertedCount += response.getNInserted(); + upsertedCount += response.getNUpserted(); + matchedCount += response.getNMatched(); + modifiedCount += response.getNModified(); + deletedCount += response.getNDeleted(); + Map insertModelDocumentIds = batchResult.getInsertModelDocumentIds(); + for (BsonDocument individualOperationResponse : response.getCursorExhaust()) { + int individualOperationIndexInBatch = individualOperationResponse.getInt32("idx").getValue(); + int writeModelIndex = batchStartModelIndex + individualOperationIndexInBatch; + if (individualOperationResponse.getNumber("ok").intValue() == 1) { + assertTrue(verboseResultsSetting); + AbstractClientNamespacedWriteModel writeModel = getNamespacedModel(models, writeModelIndex); + if (writeModel instanceof ConcreteClientNamespacedInsertOneModel) { + insertResults.put( + writeModelIndex, + new ConcreteClientInsertOneResult(insertModelDocumentIds.get(individualOperationIndexInBatch))); + } else if (writeModel instanceof ConcreteClientNamespacedUpdateOneModel + || writeModel instanceof ConcreteClientNamespacedUpdateManyModel + || writeModel instanceof ConcreteClientNamespacedReplaceOneModel) { + BsonDocument upsertedIdDocument = individualOperationResponse.getDocument("upserted", null); + updateResults.put( + writeModelIndex, + new ConcreteClientUpdateResult( + individualOperationResponse.getInt32("n").getValue(), + individualOperationResponse.getInt32("nModified").getValue(), + upsertedIdDocument == null ? null : upsertedIdDocument.get("_id"))); + } else if (writeModel instanceof ConcreteClientNamespacedDeleteOneModel + || writeModel instanceof ConcreteClientNamespacedDeleteManyModel) { + deleteResults.put( + writeModelIndex, + new ConcreteClientDeleteResult(individualOperationResponse.getInt32("n").getValue())); + } else { + fail(writeModel.getClass().toString()); + } + } else { + batchResultsHaveInfoAboutSuccessfulIndividualOperations = batchResultsHaveInfoAboutSuccessfulIndividualOperations + || (orderedSetting && individualOperationIndexInBatch > 0); + WriteError individualOperationWriteError = new WriteError( + individualOperationResponse.getInt32("code").getValue(), + individualOperationResponse.getString("errmsg").getValue(), + individualOperationResponse.getDocument("errInfo", new BsonDocument())); + writeErrors.put(writeModelIndex, individualOperationWriteError); + } + } + } + } + if (topLevelError == null && writeConcernErrors.isEmpty() && writeErrors.isEmpty()) { + if (effectiveWriteConcern.isAcknowledged()) { + AcknowledgedSummaryClientBulkWriteResult summaryResult = new AcknowledgedSummaryClientBulkWriteResult( + insertedCount, upsertedCount, matchedCount, modifiedCount, deletedCount); + return verboseResultsSetting + ? new AcknowledgedVerboseClientBulkWriteResult(summaryResult, insertResults, updateResults, deleteResults) + : summaryResult; + } else { + return UnacknowledgedClientBulkWriteResult.INSTANCE; + } + } else if (batchResultsHaveResponses) { + AcknowledgedSummaryClientBulkWriteResult partialSummaryResult = batchResultsHaveInfoAboutSuccessfulIndividualOperations + ? new AcknowledgedSummaryClientBulkWriteResult(insertedCount, upsertedCount, matchedCount, modifiedCount, deletedCount) + : null; + throw new ClientBulkWriteException( + topLevelError, + writeConcernErrors, + writeErrors, + verboseResultsSetting && partialSummaryResult != null + ? new AcknowledgedVerboseClientBulkWriteResult(partialSummaryResult, insertResults, updateResults, deleteResults) + : partialSummaryResult, + assertNotNull(serverAddress)); + } else { + throw assertNotNull(topLevelError); + } + } + + void onNewServerAddress(final ServerAddress serverAddress) { + this.serverAddress = serverAddress; + } + + @Nullable + Integer onBulkWriteCommandOkResponseOrNoResponse( + final int batchStartModelIndex, + @Nullable + final ExhaustiveClientBulkWriteCommandOkResponse response, + final BatchEncoder.EncodedBatchInfo encodedBatchInfo) { + return onBulkWriteCommandOkResponseOrNoResponse(batchStartModelIndex, response, null, encodedBatchInfo); + } + + /** + * @return See {@link #executeBatch(int, WriteConcern, WriteBinding, ResultAccumulator)}. + */ + @Nullable + Integer onBulkWriteCommandOkResponseWithWriteConcernError( + final int batchStartModelIndex, + final MongoWriteConcernWithResponseException exception, + final BatchEncoder.EncodedBatchInfo encodedBatchInfo) { + MongoWriteConcernException writeConcernException = (MongoWriteConcernException) exception.getCause(); + onNewServerAddress(writeConcernException.getServerAddress()); + ExhaustiveClientBulkWriteCommandOkResponse response = (ExhaustiveClientBulkWriteCommandOkResponse) exception.getResponse(); + return onBulkWriteCommandOkResponseOrNoResponse(batchStartModelIndex, response, writeConcernException, encodedBatchInfo); + } + + /** + * @return See {@link #executeBatch(int, WriteConcern, WriteBinding, ResultAccumulator)}. + */ + @Nullable + private Integer onBulkWriteCommandOkResponseOrNoResponse( + final int batchStartModelIndex, + @Nullable + final ExhaustiveClientBulkWriteCommandOkResponse response, + @Nullable + final MongoWriteConcernException writeConcernException, + final BatchEncoder.EncodedBatchInfo encodedBatchInfo) { + BatchResult batchResult = response == null + ? BatchResult.noResponse(batchStartModelIndex, encodedBatchInfo) + : BatchResult.okResponse(batchStartModelIndex, encodedBatchInfo, response, writeConcernException); + batchResults.add(batchResult); + int potentialNextBatchStartModelIndex = batchStartModelIndex + batchResult.getBatchModelsCount(); + return (response == null || response.operationMayContinue(options)) + ? potentialNextBatchStartModelIndex == models.size() ? null : potentialNextBatchStartModelIndex + : null; + } + + void onBulkWriteCommandErrorResponse(final MongoCommandException exception) { + onNewServerAddress(exception.getServerAddress()); + } + + void onBulkWriteCommandErrorWithoutResponse(final MongoException exception) { + Exceptions.serverAddressFromException(exception).ifPresent(this::onNewServerAddress); + } + } + + public static final class ClientBulkWriteCommand { + private final BsonDocument commandDocument; + private final OpsAndNsInfo opsAndNsInfo; + + ClientBulkWriteCommand( + final BsonDocument commandDocument, + final OpsAndNsInfo opsAndNsInfo) { + this.commandDocument = commandDocument; + this.opsAndNsInfo = opsAndNsInfo; + } + + BsonDocument getCommandDocument() { + return commandDocument; + } + + OpsAndNsInfo getOpsAndNsInfo() { + return opsAndNsInfo; + } + + public static final class OpsAndNsInfo extends DualMessageSequences { + private final boolean effectiveRetryWrites; + private final List models; + private final BatchEncoder batchEncoder; + private final ConcreteClientBulkWriteOptions options; + private final Supplier doIfCommandIsRetryableAndAdvanceGetTxnNumber; + + @VisibleForTesting(otherwise = PACKAGE) + public OpsAndNsInfo( + final boolean effectiveRetryWrites, + final List models, + final BatchEncoder batchEncoder, + final ConcreteClientBulkWriteOptions options, + final Supplier doIfCommandIsRetryableAndAdvanceGetTxnNumber) { + super("ops", new OpsFieldNameValidator(models), "nsInfo", NoOpFieldNameValidator.INSTANCE); + this.effectiveRetryWrites = effectiveRetryWrites; + this.models = models; + this.batchEncoder = batchEncoder; + this.options = options; + this.doIfCommandIsRetryableAndAdvanceGetTxnNumber = doIfCommandIsRetryableAndAdvanceGetTxnNumber; + } + + @Override + public EncodeDocumentsResult encodeDocuments(final WritersProviderAndLimitsChecker writersProviderAndLimitsChecker) { + // We must call `batchEncoder.reset` lazily, that is here, and not eagerly before a command retry attempt, + // because a retry attempt may fail before encoding, + // in which case we need the information gathered by `batchEncoder` at a previous attempt. + batchEncoder.reset(); + LinkedHashMap indexedNamespaces = new LinkedHashMap<>(); + WritersProviderAndLimitsChecker.WriteResult writeResult = OK_LIMIT_NOT_REACHED; + boolean commandIsRetryable = effectiveRetryWrites; + int maxModelIndexInBatch = -1; + for (int modelIndexInBatch = 0; modelIndexInBatch < models.size() && writeResult == OK_LIMIT_NOT_REACHED; modelIndexInBatch++) { + AbstractClientNamespacedWriteModel namespacedModel = getNamespacedModel(models, modelIndexInBatch); + MongoNamespace namespace = namespacedModel.getNamespace(); + int indexedNamespacesSizeBeforeCompute = indexedNamespaces.size(); + int namespaceIndexInBatch = indexedNamespaces.computeIfAbsent(namespace, k -> indexedNamespacesSizeBeforeCompute); + boolean writeNewNamespace = indexedNamespaces.size() != indexedNamespacesSizeBeforeCompute; + int finalModelIndexInBatch = modelIndexInBatch; + writeResult = writersProviderAndLimitsChecker.tryWrite((opsWriter, nsInfoWriter) -> { + batchEncoder.encodeWriteModel(opsWriter, namespacedModel.getModel(), finalModelIndexInBatch, namespaceIndexInBatch); + if (writeNewNamespace) { + nsInfoWriter.writeStartDocument(); + nsInfoWriter.writeString("ns", namespace.getFullName()); + nsInfoWriter.writeEndDocument(); + } + return finalModelIndexInBatch + 1; + }); + if (writeResult == FAIL_LIMIT_EXCEEDED) { + batchEncoder.reset(finalModelIndexInBatch); + } else { + maxModelIndexInBatch = finalModelIndexInBatch; + if (commandIsRetryable && doesNotSupportRetries(namespacedModel)) { + commandIsRetryable = false; + logWriteModelDoesNotSupportRetries(); + } + } + } + return new EncodeDocumentsResult( + // we will execute more batches, so we must request a response to maintain the order of individual write operations + options.isOrdered() && maxModelIndexInBatch < models.size() - 1, + commandIsRetryable + ? singletonList(new BsonElement("txnNumber", new BsonInt64(doIfCommandIsRetryableAndAdvanceGetTxnNumber.get()))) + : emptyList()); + } + + private static boolean doesNotSupportRetries(final AbstractClientNamespacedWriteModel model) { + return model instanceof ConcreteClientNamespacedUpdateManyModel || model instanceof ConcreteClientNamespacedDeleteManyModel; + } + + /** + * The server supports only the {@code update} individual write operation in the {@code ops} array field, while the driver supports + * {@link ClientNamespacedUpdateOneModel}, {@link ClientNamespacedUpdateOneModel}, {@link ClientNamespacedReplaceOneModel}. + * The difference between updating and replacing is only in the document specified via the {@code updateMods} field: + *
    + *
  • if the name of the first field starts with {@code '$'}, then the document is interpreted as specifying update operators;
  • + *
  • if the name of the first field does not start with {@code '$'}, then the document is interpreted as a replacement.
  • + *
+ * + * @see + * Update vs. replace document validation + */ + private static final class OpsFieldNameValidator implements FieldNameValidator { + private static final Set OPERATION_DISCRIMINATOR_FIELD_NAMES = Stream.of("insert", "update", "delete").collect(toSet()); + + private final List models; + private final ReplacingUpdateModsFieldValidator replacingValidator; + private final UpdatingUpdateModsFieldValidator updatingValidator; + private int currentIndividualOperationIndex; + + OpsFieldNameValidator(final List models) { + this.models = models; + replacingValidator = new ReplacingUpdateModsFieldValidator(); + updatingValidator = new UpdatingUpdateModsFieldValidator(); + currentIndividualOperationIndex = -1; + } + + @Override + public boolean validate(final String fieldName) { + if (OPERATION_DISCRIMINATOR_FIELD_NAMES.contains(fieldName)) { + currentIndividualOperationIndex++; + } + return true; + } + + @Override + public FieldNameValidator getValidatorForField(final String fieldName) { + if (fieldName.equals("updateMods")) { + return currentIndividualOperationIsReplace() ? replacingValidator.reset() : updatingValidator.reset(); + } + return NoOpFieldNameValidator.INSTANCE; + } + + private boolean currentIndividualOperationIsReplace() { + return getNamespacedModel(models, currentIndividualOperationIndex) instanceof ConcreteClientNamespacedReplaceOneModel; + } + + private static final class ReplacingUpdateModsFieldValidator implements FieldNameValidator { + private boolean firstFieldSinceLastReset; + + ReplacingUpdateModsFieldValidator() { + firstFieldSinceLastReset = true; + } + + @Override + public boolean validate(final String fieldName) { + if (firstFieldSinceLastReset) { + // we must validate only the first field, and leave the rest up to the server + firstFieldSinceLastReset = false; + return ReplacingDocumentFieldNameValidator.INSTANCE.validate(fieldName); + } + return true; + } + + @Override + public String getValidationErrorMessage(final String fieldName) { + return ReplacingDocumentFieldNameValidator.INSTANCE.getValidationErrorMessage(fieldName); + } + + @Override + public FieldNameValidator getValidatorForField(final String fieldName) { + return NoOpFieldNameValidator.INSTANCE; + } + + ReplacingUpdateModsFieldValidator reset() { + firstFieldSinceLastReset = true; + return this; + } + } + + private static final class UpdatingUpdateModsFieldValidator implements FieldNameValidator { + private final UpdateFieldNameValidator delegate; + private boolean firstFieldSinceLastReset; + + UpdatingUpdateModsFieldValidator() { + delegate = new UpdateFieldNameValidator(); + firstFieldSinceLastReset = true; + } + + @Override + public boolean validate(final String fieldName) { + if (firstFieldSinceLastReset) { + // we must validate only the first field, and leave the rest up to the server + firstFieldSinceLastReset = false; + return delegate.validate(fieldName); + } + return true; + } + + @Override + public String getValidationErrorMessage(final String fieldName) { + return delegate.getValidationErrorMessage(fieldName); + } + + @Override + public FieldNameValidator getValidatorForField(final String fieldName) { + return NoOpFieldNameValidator.INSTANCE; + } + + @Override + public void start() { + delegate.start(); + } + + @Override + public void end() { + delegate.end(); + } + + UpdatingUpdateModsFieldValidator reset() { + delegate.reset(); + firstFieldSinceLastReset = true; + return this; + } + } + } + } + } + + static final class BatchResult { + private final int batchStartModelIndex; + private final BatchEncoder.EncodedBatchInfo encodedBatchInfo; + @Nullable + private final ExhaustiveClientBulkWriteCommandOkResponse response; + @Nullable + private final MongoWriteConcernException writeConcernException; + + static BatchResult okResponse( + final int batchStartModelIndex, + final BatchEncoder.EncodedBatchInfo encodedBatchInfo, + final ExhaustiveClientBulkWriteCommandOkResponse response, + @Nullable final MongoWriteConcernException writeConcernException) { + return new BatchResult(batchStartModelIndex, encodedBatchInfo, assertNotNull(response), writeConcernException); + } + + static BatchResult noResponse(final int batchStartModelIndex, final BatchEncoder.EncodedBatchInfo encodedBatchInfo) { + return new BatchResult(batchStartModelIndex, encodedBatchInfo, null, null); + } + + private BatchResult( + final int batchStartModelIndex, + final BatchEncoder.EncodedBatchInfo encodedBatchInfo, + @Nullable final ExhaustiveClientBulkWriteCommandOkResponse response, + @Nullable final MongoWriteConcernException writeConcernException) { + this.batchStartModelIndex = batchStartModelIndex; + this.encodedBatchInfo = encodedBatchInfo; + this.response = response; + this.writeConcernException = writeConcernException; + } + + int getBatchStartModelIndex() { + return batchStartModelIndex; + } + + /** + * @see BatchEncoder.EncodedBatchInfo#getModelsCount() + */ + int getBatchModelsCount() { + return encodedBatchInfo.getModelsCount(); + } + + boolean hasResponse() { + return response != null; + } + + ExhaustiveClientBulkWriteCommandOkResponse getResponse() { + return assertNotNull(response); + } + + @Nullable + MongoWriteConcernException getWriteConcernException() { + assertTrue(hasResponse()); + return writeConcernException; + } + + /** + * @see BatchEncoder.EncodedBatchInfo#getInsertModelDocumentIds() + */ + Map getInsertModelDocumentIds() { + assertTrue(hasResponse()); + return encodedBatchInfo.getInsertModelDocumentIds(); + } + } + + /** + * Exactly one instance must be used per {@linkplain #executeBatch(int, WriteConcern, WriteBinding, ResultAccumulator) batch}. + */ + @VisibleForTesting(otherwise = PRIVATE) + public final class BatchEncoder { + private EncodedBatchInfo encodedBatchInfo; + + @VisibleForTesting(otherwise = PACKAGE) + public BatchEncoder() { + encodedBatchInfo = new EncodedBatchInfo(); + } + + /** + * Must be called at most once. + * Must not be called before calling + * {@link #encodeWriteModel(BsonBinaryWriter, ClientWriteModel, int, int)} at least once. + * Renders {@code this} unusable. + */ + EncodedBatchInfo intoEncodedBatchInfo() { + EncodedBatchInfo result = assertNotNull(encodedBatchInfo); + encodedBatchInfo = null; + assertTrue(result.getModelsCount() > 0); + return result; + } + + void reset() { + // we must not reset anything but `modelsCount` + assertNotNull(encodedBatchInfo).modelsCount = 0; + } + + void reset(final int modelIndexInBatch) { + assertNotNull(encodedBatchInfo).modelsCount -= 1; + encodedBatchInfo.insertModelDocumentIds.remove(modelIndexInBatch); + } + + void encodeWriteModel( + final BsonBinaryWriter writer, + final ClientWriteModel model, + final int modelIndexInBatch, + final int namespaceIndexInBatch) { + assertNotNull(encodedBatchInfo).modelsCount++; + writer.writeStartDocument(); + if (model instanceof ConcreteClientInsertOneModel) { + writer.writeInt32("insert", namespaceIndexInBatch); + encodeWriteModelInternals(writer, (ConcreteClientInsertOneModel) model, modelIndexInBatch); + } else if (model instanceof ConcreteClientUpdateOneModel) { + writer.writeInt32("update", namespaceIndexInBatch); + writer.writeBoolean("multi", false); + encodeWriteModelInternals(writer, (ConcreteClientUpdateOneModel) model); + } else if (model instanceof ConcreteClientUpdateManyModel) { + writer.writeInt32("update", namespaceIndexInBatch); + writer.writeBoolean("multi", true); + encodeWriteModelInternals(writer, (ConcreteClientUpdateManyModel) model); + } else if (model instanceof ConcreteClientReplaceOneModel) { + writer.writeInt32("update", namespaceIndexInBatch); + encodeWriteModelInternals(writer, (ConcreteClientReplaceOneModel) model); + } else if (model instanceof ConcreteClientDeleteOneModel) { + writer.writeInt32("delete", namespaceIndexInBatch); + writer.writeBoolean("multi", false); + encodeWriteModelInternals(writer, (ConcreteClientDeleteOneModel) model); + } else if (model instanceof ConcreteClientDeleteManyModel) { + writer.writeInt32("delete", namespaceIndexInBatch); + writer.writeBoolean("multi", true); + encodeWriteModelInternals(writer, (ConcreteClientDeleteManyModel) model); + } else { + throw fail(model.getClass().toString()); + } + writer.writeEndDocument(); + } + + private void encodeWriteModelInternals( + final BsonBinaryWriter writer, + final ConcreteClientInsertOneModel model, + final int modelIndexInBatch) { + writer.writeName("document"); + Object document = model.getDocument(); + assertNotNull(encodedBatchInfo).insertModelDocumentIds.compute(modelIndexInBatch, (k, knownModelDocumentId) -> { + IdHoldingBsonWriter documentIdHoldingBsonWriter = new IdHoldingBsonWriter( + writer, + // Reuse `knownModelDocumentId` if it may have been generated by `IdHoldingBsonWriter` in a previous attempt. + // If its type is not `BsonObjectId`, which happens only if `_id` was specified by the application, + // we know it could not have been generated. + knownModelDocumentId instanceof BsonObjectId ? knownModelDocumentId.asObjectId() : null); + encodeUsingRegistry(documentIdHoldingBsonWriter, document, COLLECTIBLE_DOCUMENT_ENCODER_CONTEXT); + return documentIdHoldingBsonWriter.getId(); + }); + } + + private void encodeWriteModelInternals(final BsonWriter writer, final AbstractClientUpdateModel model) { + writer.writeName("filter"); + encodeUsingRegistry(writer, model.getFilter()); + model.getUpdate().ifPresent(value -> { + writer.writeName("updateMods"); + encodeUsingRegistry(writer, value); + }); + model.getUpdatePipeline().ifPresent(value -> { + writer.writeStartArray("updateMods"); + value.forEach(pipelineStage -> encodeUsingRegistry(writer, pipelineStage)); + writer.writeEndArray(); + }); + AbstractClientUpdateOptions options = model.getOptions(); + options.getArrayFilters().ifPresent(value -> { + writer.writeStartArray("arrayFilters"); + value.forEach(filter -> encodeUsingRegistry(writer, filter)); + writer.writeEndArray(); + }); + options.getCollation().ifPresent(value -> { + writer.writeName("collation"); + encodeUsingRegistry(writer, value.asDocument()); + }); + options.getHint().ifPresent(hint -> { + writer.writeName("hint"); + encodeUsingRegistry(writer, hint); + }); + options.getHintString().ifPresent(value -> writer.writeString("hint", value)); + options.isUpsert().ifPresent(value -> writer.writeBoolean("upsert", value)); + } + + private void encodeWriteModelInternals(final BsonBinaryWriter writer, final ConcreteClientReplaceOneModel model) { + writer.writeBoolean("multi", false); + writer.writeName("filter"); + encodeUsingRegistry(writer, model.getFilter()); + writer.writeName("updateMods"); + encodeUsingRegistry(writer, model.getReplacement(), COLLECTIBLE_DOCUMENT_ENCODER_CONTEXT); + ConcreteClientReplaceOneOptions options = model.getOptions(); + options.getCollation().ifPresent(value -> { + writer.writeName("collation"); + encodeUsingRegistry(writer, value.asDocument()); + }); + options.getHint().ifPresent(value -> { + writer.writeName("hint"); + encodeUsingRegistry(writer, value); + }); + options.getHintString().ifPresent(value -> writer.writeString("hint", value)); + options.isUpsert().ifPresent(value -> writer.writeBoolean("upsert", value)); + } + + private void encodeWriteModelInternals(final BsonWriter writer, final AbstractClientDeleteModel model) { + writer.writeName("filter"); + encodeUsingRegistry(writer, model.getFilter()); + AbstractClientDeleteOptions options = model.getOptions(); + options.getCollation().ifPresent(value -> { + writer.writeName("collation"); + encodeUsingRegistry(writer, value.asDocument()); + }); + options.getHint().ifPresent(value -> { + writer.writeName("hint"); + encodeUsingRegistry(writer, value); + }); + options.getHintString().ifPresent(value -> writer.writeString("hint", value)); + } + + final class EncodedBatchInfo { + private final HashMap insertModelDocumentIds; + private int modelsCount; + + private EncodedBatchInfo() { + insertModelDocumentIds = new HashMap<>(); + modelsCount = 0; + } + + /** + * The key of each entry is the index of a model in the + * {@linkplain #executeBatch(int, WriteConcern, WriteBinding, ResultAccumulator) batch}, + * the value is either the "_id" field value from {@linkplain ConcreteClientInsertOneModel#getDocument()}, + * or the value we generated for this field if the field is absent. + */ + Map getInsertModelDocumentIds() { + return insertModelDocumentIds; + } + + int getModelsCount() { + return modelsCount; + } + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java index 4c428131853..2861bcf9ad5 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java @@ -25,6 +25,7 @@ import com.mongodb.MongoSecurityException; import com.mongodb.MongoServerException; import com.mongodb.MongoSocketException; +import com.mongodb.WriteConcern; import com.mongodb.assertions.Assertions; import com.mongodb.connection.ConnectionDescription; import com.mongodb.connection.ServerDescription; @@ -33,20 +34,40 @@ import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.operation.OperationHelper.ResourceSupplierInternalException; import com.mongodb.internal.operation.retry.AttachmentKeys; +import com.mongodb.internal.session.SessionContext; import com.mongodb.lang.Nullable; import org.bson.BsonDocument; import java.util.List; +import java.util.Optional; import java.util.function.BinaryOperator; import java.util.function.Supplier; import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.internal.operation.OperationHelper.LOGGER; import static java.lang.String.format; import static java.util.Arrays.asList; @SuppressWarnings("overloads") final class CommandOperationHelper { + static WriteConcern validateAndGetEffectiveWriteConcern(final WriteConcern writeConcernSetting, final SessionContext sessionContext) + throws MongoClientException { + boolean activeTransaction = sessionContext.hasActiveTransaction(); + WriteConcern effectiveWriteConcern = activeTransaction + ? WriteConcern.ACKNOWLEDGED + : writeConcernSetting; + if (sessionContext.hasSession() && !sessionContext.isImplicitSession() && !activeTransaction && !effectiveWriteConcern.isAcknowledged()) { + throw new MongoClientException("Unacknowledged writes are not supported when using an explicit session"); + } + return effectiveWriteConcern; + } + + static Optional commandWriteConcern(final WriteConcern effectiveWriteConcern, final SessionContext sessionContext) { + return effectiveWriteConcern.isServerDefault() || sessionContext.hasActiveTransaction() + ? Optional.empty() + : Optional.of(effectiveWriteConcern); + } interface CommandCreator { BsonDocument create( @@ -153,7 +174,26 @@ static boolean shouldAttemptToRetryRead(final RetryState retryState, final Throw return decision; } - static boolean shouldAttemptToRetryWrite(final RetryState retryState, final Throwable attemptFailure) { + static boolean loggingShouldAttemptToRetryWriteAndAddRetryableLabel(final RetryState retryState, final Throwable attemptFailure) { + Throwable attemptFailureNotToBeRetried = getAttemptFailureNotToRetryOrAddRetryableLabel(retryState, attemptFailure); + boolean decision = attemptFailureNotToBeRetried == null; + if (!decision && retryState.attachment(AttachmentKeys.retryableCommandFlag()).orElse(false)) { + logUnableToRetry( + retryState.attachment(AttachmentKeys.commandDescriptionSupplier()).orElse(null), + assertNotNull(attemptFailureNotToBeRetried)); + } + return decision; + } + + static boolean shouldAttemptToRetryWriteAndAddRetryableLabel(final RetryState retryState, final Throwable attemptFailure) { + return getAttemptFailureNotToRetryOrAddRetryableLabel(retryState, attemptFailure) != null; + } + + /** + * @return {@code null} if the decision is {@code true}. Otherwise, returns the {@link Throwable} that must not be retried. + */ + @Nullable + private static Throwable getAttemptFailureNotToRetryOrAddRetryableLabel(final RetryState retryState, final Throwable attemptFailure) { Throwable failure = attemptFailure instanceof ResourceSupplierInternalException ? attemptFailure.getCause() : attemptFailure; boolean decision = false; MongoException exceptionRetryableRegardlessOfCommand = null; @@ -170,11 +210,9 @@ static boolean shouldAttemptToRetryWrite(final RetryState retryState, final Thro } else if (decideRetryableAndAddRetryableWriteErrorLabel(failure, retryState.attachment(AttachmentKeys.maxWireVersion()) .orElse(null))) { decision = true; - } else { - logUnableToRetry(retryState.attachment(AttachmentKeys.commandDescriptionSupplier()).orElse(null), failure); } } - return decision; + return decision ? null : assertNotNull(failure); } static boolean isRetryWritesEnabled(@Nullable final BsonDocument command) { diff --git a/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java index a32ce6d5153..06d392bceb2 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java +++ b/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java @@ -16,7 +16,6 @@ package com.mongodb.internal.operation; -import com.mongodb.MongoClientException; import com.mongodb.MongoException; import com.mongodb.MongoNamespace; import com.mongodb.WriteConcern; @@ -63,8 +62,10 @@ import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncSourceAndConnection; import static com.mongodb.internal.operation.CommandOperationHelper.addRetryableWriteErrorLabel; import static com.mongodb.internal.operation.CommandOperationHelper.logRetryExecute; +import static com.mongodb.internal.operation.CommandOperationHelper.loggingShouldAttemptToRetryWriteAndAddRetryableLabel; import static com.mongodb.internal.operation.CommandOperationHelper.onRetryableWriteAttemptFailure; import static com.mongodb.internal.operation.CommandOperationHelper.transformWriteException; +import static com.mongodb.internal.operation.CommandOperationHelper.validateAndGetEffectiveWriteConcern; import static com.mongodb.internal.operation.OperationHelper.LOGGER; import static com.mongodb.internal.operation.OperationHelper.isRetryableWrite; import static com.mongodb.internal.operation.OperationHelper.validateWriteRequests; @@ -164,7 +165,7 @@ private boolean shouldAttemptToRetryWrite(final RetryState retryState, final Thr if (bulkWriteTracker.lastAttempt()) { return false; } - boolean decision = CommandOperationHelper.shouldAttemptToRetryWrite(retryState, attemptFailure); + boolean decision = loggingShouldAttemptToRetryWriteAndAddRetryableLabel(retryState, attemptFailure); if (decision) { /* The attempt counter maintained by `RetryState` is updated after (in the happens-before order) testing a retry predicate, * and only if the predicate completes normally. Here we maintain attempt counters manually, and we emulate the @@ -274,7 +275,7 @@ private BulkWriteResult executeBulkWriteBatch( if (currentBulkWriteTracker.lastAttempt()) { addRetryableWriteErrorLabel(writeConcernBasedError, maxWireVersion); addErrorLabelsToWriteConcern(result.getDocument("writeConcernError"), writeConcernBasedError.getErrorLabels()); - } else if (CommandOperationHelper.shouldAttemptToRetryWrite(retryState, writeConcernBasedError)) { + } else if (loggingShouldAttemptToRetryWriteAndAddRetryableLabel(retryState, writeConcernBasedError)) { throw new MongoWriteConcernWithResponseException(writeConcernBasedError, result); } } @@ -328,7 +329,7 @@ private void executeBulkWriteBatchAsync( addRetryableWriteErrorLabel(writeConcernBasedError, maxWireVersion); addErrorLabelsToWriteConcern(result.getDocument("writeConcernError"), writeConcernBasedError.getErrorLabels()); - } else if (CommandOperationHelper.shouldAttemptToRetryWrite(retryState, writeConcernBasedError)) { + } else if (loggingShouldAttemptToRetryWriteAndAddRetryableLabel(retryState, writeConcernBasedError)) { iterationCallback.onResult(null, new MongoWriteConcernWithResponseException(writeConcernBasedError, result)); return; @@ -420,8 +421,7 @@ private BsonDocument executeCommand( final Connection connection, final BulkWriteBatch batch) { return connection.command(namespace.getDatabaseName(), batch.getCommand(), NoOpFieldNameValidator.INSTANCE, null, batch.getDecoder(), - operationContext, shouldExpectResponse(batch, effectiveWriteConcern), - batch.getPayload(), batch.getFieldNameValidator()); + operationContext, shouldExpectResponse(batch, effectiveWriteConcern), batch.getPayload()); } private void executeCommandAsync( @@ -431,26 +431,7 @@ private void executeCommandAsync( final BulkWriteBatch batch, final SingleResultCallback callback) { connection.commandAsync(namespace.getDatabaseName(), batch.getCommand(), NoOpFieldNameValidator.INSTANCE, null, batch.getDecoder(), - operationContext, shouldExpectResponse(batch, effectiveWriteConcern), - batch.getPayload(), batch.getFieldNameValidator(), callback); - } - - private static WriteConcern validateAndGetEffectiveWriteConcern(final WriteConcern writeConcernSetting, final SessionContext sessionContext) - throws MongoClientException { - boolean activeTransaction = sessionContext.hasActiveTransaction(); - WriteConcern effectiveWriteConcern = activeTransaction - ? WriteConcern.ACKNOWLEDGED - : writeConcernSetting; - if (sessionContext.hasSession() && !sessionContext.isImplicitSession() && !activeTransaction && !effectiveWriteConcern.isAcknowledged()) { - throw new MongoClientException("Unacknowledged writes are not supported when using an explicit session"); - } - return effectiveWriteConcern; - } - - static Optional commandWriteConcern(final WriteConcern effectiveWriteConcern, final SessionContext sessionContext) { - return effectiveWriteConcern.isServerDefault() || sessionContext.hasActiveTransaction() - ? Optional.empty() - : Optional.of(effectiveWriteConcern); + operationContext, shouldExpectResponse(batch, effectiveWriteConcern), batch.getPayload(), callback); } private boolean shouldExpectResponse(final BulkWriteBatch batch, final WriteConcern effectiveWriteConcern) { diff --git a/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java index 04318635a06..b7a4997e639 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java @@ -16,7 +16,9 @@ package com.mongodb.internal.operation; +import com.mongodb.ClientBulkWriteException; import com.mongodb.MongoClientException; +import com.mongodb.MongoException; import com.mongodb.WriteConcern; import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.Collation; @@ -47,7 +49,10 @@ import static com.mongodb.internal.operation.ServerVersionHelper.serverIsLessThanVersionFourDotTwo; import static java.lang.String.format; -final class OperationHelper { +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class OperationHelper { public static final Logger LOGGER = Loggers.getLogger("operation"); static void validateCollationAndWriteConcern(@Nullable final Collation collation, final WriteConcern writeConcern) { @@ -202,6 +207,21 @@ static void setNonTailableCursorMaxTimeSupplier(final TimeoutMode timeoutMode, f } } + /** + * Returns the {@link MongoException} that carries or should carry + * the {@linkplain MongoException#getCode() error code} and {@linkplain MongoException#getErrorLabels() error labels}. + * This method is needed because exceptions like {@link ClientBulkWriteException} do not carry that data themselves. + */ + public static MongoException unwrap(final MongoException exception) { + MongoException result = exception; + if (exception instanceof ClientBulkWriteException) { + MongoException topLevelError = ((ClientBulkWriteException) exception).getCause(); + result = topLevelError == null ? exception : topLevelError; + } + return result; + } + + /** * This internal exception is used to *
    diff --git a/driver-core/src/main/com/mongodb/internal/operation/Operations.java b/driver-core/src/main/com/mongodb/internal/operation/Operations.java index ecdd215ba91..88af67a1204 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/Operations.java +++ b/driver-core/src/main/com/mongodb/internal/operation/Operations.java @@ -54,6 +54,8 @@ import com.mongodb.client.model.UpdateOptions; import com.mongodb.client.model.ValidationOptions; import com.mongodb.client.model.WriteModel; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; import com.mongodb.client.model.changestream.FullDocument; import com.mongodb.client.model.changestream.FullDocumentBeforeChange; import com.mongodb.internal.bulk.DeleteRequest; @@ -727,6 +729,12 @@ ChangeStreamOperation changeStream(final FullDocument fullDoc .retryReads(retryReads); } + ClientBulkWriteOperation clientBulkWriteOperation( + final List clientWriteModels, + @Nullable final ClientBulkWriteOptions options) { + return new ClientBulkWriteOperation(clientWriteModels, options, writeConcern, retryWrites, codecRegistry); + } + private Codec getCodec() { return codecRegistry.get(documentClass); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java index 62da7cde2c8..6d013df59ba 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java +++ b/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java @@ -303,7 +303,7 @@ static T createReadCommandAndExecute( static Supplier decorateWriteWithRetries(final RetryState retryState, final OperationContext operationContext, final Supplier writeFunction) { return new RetryingSyncSupplier<>(retryState, onRetryableWriteAttemptFailure(operationContext), - CommandOperationHelper::shouldAttemptToRetryWrite, () -> { + CommandOperationHelper::loggingShouldAttemptToRetryWriteAndAddRetryableLabel, () -> { logRetryExecute(retryState, operationContext); return writeFunction.get(); }); @@ -334,8 +334,8 @@ static CommandReadTransformer> singleBatchCurso connection.getDescription().getServerAddress()); } - static BatchCursor cursorDocumentToBatchCursor(final TimeoutMode timeoutMode, final BsonDocument cursorDocument, - final int batchSize, final Decoder decoder, final BsonValue comment, final ConnectionSource source, + static CommandBatchCursor cursorDocumentToBatchCursor(final TimeoutMode timeoutMode, final BsonDocument cursorDocument, + final int batchSize, final Decoder decoder, @Nullable final BsonValue comment, final ConnectionSource source, final Connection connection) { return new CommandBatchCursor<>(timeoutMode, cursorDocument, batchSize, 0, decoder, comment, source, connection); } diff --git a/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java b/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java index 952a35fe7fe..72f738ec971 100644 --- a/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java +++ b/driver-core/src/main/com/mongodb/internal/operation/SyncOperations.java @@ -44,8 +44,11 @@ import com.mongodb.client.model.SearchIndexModel; import com.mongodb.client.model.UpdateOptions; import com.mongodb.client.model.WriteModel; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; import com.mongodb.client.model.changestream.FullDocument; import com.mongodb.client.model.changestream.FullDocumentBeforeChange; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.client.model.AggregationLevel; import com.mongodb.internal.client.model.FindOptions; @@ -359,4 +362,10 @@ public ReadOperation> changeStream(final FullDocu return operations.changeStream(fullDocument, fullDocumentBeforeChange, pipeline, decoder, changeStreamLevel, batchSize, collation, comment, resumeToken, startAtOperationTime, startAfter, showExpandedEvents); } + + public WriteOperation clientBulkWriteOperation( + final List clientWriteModels, + @Nullable final ClientBulkWriteOptions options) { + return operations.clientBulkWriteOperation(clientWriteModels, options); + } } diff --git a/driver-core/src/main/com/mongodb/internal/session/SessionContext.java b/driver-core/src/main/com/mongodb/internal/session/SessionContext.java index 6c55c526d45..4a8902799ec 100644 --- a/driver-core/src/main/com/mongodb/internal/session/SessionContext.java +++ b/driver-core/src/main/com/mongodb/internal/session/SessionContext.java @@ -48,7 +48,7 @@ public interface SessionContext { /** * Advance the transaction number. * - * @return the next transaction number for the session + * @return the next non-negative transaction number for the session */ long advanceTransactionNumber(); diff --git a/driver-core/src/main/com/mongodb/internal/time/TimePoint.java b/driver-core/src/main/com/mongodb/internal/time/TimePoint.java index d0b95970511..811065d13a6 100644 --- a/driver-core/src/main/com/mongodb/internal/time/TimePoint.java +++ b/driver-core/src/main/com/mongodb/internal/time/TimePoint.java @@ -28,6 +28,7 @@ import static com.mongodb.assertions.Assertions.assertNotNull; import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.NANOSECONDS; /** @@ -234,7 +235,7 @@ public int hashCode() { public String toString() { String remainingMs = isInfinite() ? "infinite" - : "" + TimeUnit.MILLISECONDS.convert(currentNanos() - assertNotNull(nanos), NANOSECONDS); + : "" + remaining(MILLISECONDS); return "TimePoint{" + "nanos=" + nanos + ", remainingMs=" + remainingMs diff --git a/driver-core/src/main/com/mongodb/internal/validator/UpdateFieldNameValidator.java b/driver-core/src/main/com/mongodb/internal/validator/UpdateFieldNameValidator.java index 40762bfb5fb..fc59b0cc312 100644 --- a/driver-core/src/main/com/mongodb/internal/validator/UpdateFieldNameValidator.java +++ b/driver-core/src/main/com/mongodb/internal/validator/UpdateFieldNameValidator.java @@ -48,7 +48,7 @@ public FieldNameValidator getValidatorForField(final String fieldName) { @Override public void start() { - encounteredField = false; + reset(); } @Override @@ -57,4 +57,9 @@ public void end() { throw new IllegalArgumentException("Invalid BSON document for an update. The document may not be empty."); } } + + public UpdateFieldNameValidator reset() { + encounteredField = false; + return this; + } } diff --git a/driver-core/src/test/functional/com/mongodb/ClusterFixture.java b/driver-core/src/test/functional/com/mongodb/ClusterFixture.java index a889856f394..dde9682de8d 100644 --- a/driver-core/src/test/functional/com/mongodb/ClusterFixture.java +++ b/driver-core/src/test/functional/com/mongodb/ClusterFixture.java @@ -125,7 +125,7 @@ public final class ClusterFixture { private static final String MONGODB_OCSP_SHOULD_SUCCEED = "org.mongodb.test.ocsp.tls.should.succeed"; private static final String DEFAULT_DATABASE_NAME = "JavaDriverTest"; private static final int COMMAND_NOT_FOUND_ERROR_CODE = 59; - public static final long TIMEOUT = 60L; + public static final long TIMEOUT = 120L; public static final Duration TIMEOUT_DURATION = Duration.ofSeconds(TIMEOUT); public static final TimeoutSettings TIMEOUT_SETTINGS = new TimeoutSettings(30_000, 10_000, 0, null, SECONDS.toMillis(5)); diff --git a/driver-core/src/test/functional/com/mongodb/client/syncadapter/SyncConnection.java b/driver-core/src/test/functional/com/mongodb/client/syncadapter/SyncConnection.java index 1cc3904749d..0a96d5ab0cf 100644 --- a/driver-core/src/test/functional/com/mongodb/client/syncadapter/SyncConnection.java +++ b/driver-core/src/test/functional/com/mongodb/client/syncadapter/SyncConnection.java @@ -19,8 +19,8 @@ import com.mongodb.connection.ConnectionDescription; import com.mongodb.internal.connection.AsyncConnection; import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.MessageSequences; import com.mongodb.internal.connection.OperationContext; -import com.mongodb.internal.connection.SplittablePayload; import org.bson.BsonDocument; import org.bson.FieldNameValidator; import org.bson.codecs.Decoder; @@ -65,11 +65,10 @@ public T command(final String database, final BsonDocument command, final Fi @Override public T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, final ReadPreference readPreference, final Decoder commandResultDecoder, - final OperationContext operationContext, final boolean responseExpected, final SplittablePayload payload, - final FieldNameValidator payloadFieldNameValidator) { + final OperationContext operationContext, final boolean responseExpected, final MessageSequences sequences) { SupplyingCallback callback = new SupplyingCallback<>(); wrapped.commandAsync(database, command, commandFieldNameValidator, readPreference, commandResultDecoder, operationContext, - responseExpected, payload, payloadFieldNameValidator, callback); + responseExpected, sequences, callback); return callback.get(); } diff --git a/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java b/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java index adce165ee51..3e58712ca9c 100644 --- a/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java +++ b/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java @@ -357,9 +357,17 @@ public void replaceOne(final Bson filter, final Bson update, final boolean isUps } public void deleteOne(final Bson filter) { + delete(filter, false); + } + + public void deleteMany(final Bson filter) { + delete(filter, true); + } + + private void delete(final Bson filter, final boolean multi) { new MixedBulkWriteOperation(namespace, - singletonList(new DeleteRequest(filter.toBsonDocument(Document.class, registry))), - true, WriteConcern.ACKNOWLEDGED, false) + singletonList(new DeleteRequest(filter.toBsonDocument(Document.class, registry)).multi(multi)), + true, WriteConcern.ACKNOWLEDGED, false) .execute(getBinding()); } diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java index a272f8b0f67..88dc199ee29 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java @@ -21,8 +21,10 @@ import com.mongodb.MongoQueryException; import com.mongodb.ReadPreference; import com.mongodb.ServerCursor; +import com.mongodb.async.FutureResultCallback; import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.Filters; import com.mongodb.client.model.OperationTest; import com.mongodb.internal.binding.AsyncConnectionSource; import com.mongodb.internal.connection.AsyncConnection; @@ -103,6 +105,69 @@ void cleanup() { }); } + @Test + @DisplayName("should exhaust cursor with multiple batches") + void shouldExhaustCursorAsyncWithMultipleBatches() { + // given + BsonDocument commandResult = executeFindCommand(0, 3); // Fetch in batches of size 3 + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + // when + FutureResultCallback>> futureCallback = new FutureResultCallback<>(); + cursor.exhaust(futureCallback); + + // then + List> resultBatches = futureCallback.get(5, TimeUnit.SECONDS); + + assertTrue(cursor.isClosed(), "Expected cursor to be closed."); + assertEquals(4, resultBatches.size(), "Expected 4 batches for 10 documents with batch size of 3."); + + int totalDocuments = resultBatches.stream().mapToInt(List::size).sum(); + assertEquals(10, totalDocuments, "Expected a total of 10 documents."); + } + + @Test + @DisplayName("should exhaust cursor with closed cursor") + void shouldExhaustCursorAsyncWithClosedCursor() { + // given + BsonDocument commandResult = executeFindCommand(0, 3); + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + cursor.close(); + + // when + FutureResultCallback>> futureCallback = new FutureResultCallback<>(); + cursor.exhaust(futureCallback); + + //then + IllegalStateException illegalStateException = assertThrows(IllegalStateException.class, () -> { + futureCallback.get(5, TimeUnit.SECONDS); + }, "Expected an exception when operating on a closed cursor."); + assertEquals("Cursor has been closed", illegalStateException.getMessage()); + } + + @Test + @DisplayName("should exhaust cursor with empty cursor") + void shouldExhaustCursorAsyncWithEmptyCursor() { + // given + getCollectionHelper().deleteMany(Filters.empty()); + + BsonDocument commandResult = executeFindCommand(0, 3); // No documents to fetch + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + // when + FutureResultCallback>> futureCallback = new FutureResultCallback<>(); + cursor.exhaust(futureCallback); + + // then + List> resultBatches = futureCallback.get(5, TimeUnit.SECONDS); + assertTrue(resultBatches.isEmpty(), "Expected no batches for an empty cursor."); + assertTrue(cursor.isClosed(), "Expected cursor to be closed."); + } + @Test @DisplayName("server cursor should not be null") void theServerCursorShouldNotBeNull() { diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java index 57caf3bdbfc..d9861c71659 100644 --- a/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java @@ -22,6 +22,7 @@ import com.mongodb.ServerCursor; import com.mongodb.client.cursor.TimeoutMode; import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.Filters; import com.mongodb.client.model.OperationTest; import com.mongodb.internal.binding.ConnectionSource; import com.mongodb.internal.connection.Connection; @@ -101,6 +102,55 @@ void cleanup() { }); } + @Test + @DisplayName("should exhaust cursor with multiple batches") + void shouldExhaustCursorWithMultipleBatches() { + // given + BsonDocument commandResult = executeFindCommand(0, 3); // Fetch in batches of size 3 + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + // when + List> result = cursor.exhaust(); + + // then + assertEquals(4, result.size(), "Expected 4 batches for 10 documents with batch size of 3."); + + int totalDocuments = result.stream().mapToInt(List::size).sum(); + assertEquals(10, totalDocuments, "Expected a total of 10 documents."); + } + + @Test + @DisplayName("should exhaust cursor with closed cursor") + void shouldExhaustCursorWithClosedCursor() { + // given + BsonDocument commandResult = executeFindCommand(0, 3); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + cursor.close(); + + // when & then + IllegalStateException illegalStateException = assertThrows(IllegalStateException.class, cursor::exhaust); + assertEquals("Cursor has been closed", illegalStateException.getMessage()); + } + + @Test + @DisplayName("should exhaust cursor with empty cursor") + void shouldExhaustCursorWithEmptyCursor() { + // given + getCollectionHelper().deleteMany(Filters.empty()); + + BsonDocument commandResult = executeFindCommand(0, 3); // No documents to fetch + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + // when + List> result = cursor.exhaust(); + + // then + assertTrue(result.isEmpty(), "Expected no batches for an empty cursor."); + } + @Test @DisplayName("server cursor should not be null") void theServerCursorShouldNotBeNull() { diff --git a/driver-core/src/test/resources/unified-test-format/command-monitoring/unacknowledged-client-bulkWrite.json b/driver-core/src/test/resources/unified-test-format/command-monitoring/unacknowledged-client-bulkWrite.json new file mode 100644 index 00000000000..61bb00726c0 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/command-monitoring/unacknowledged-client-bulkWrite.json @@ -0,0 +1,220 @@ +{ + "description": "unacknowledged-client-bulkWrite", + "schemaVersion": "1.7", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client", + "useMultipleMongoses": false, + "observeEvents": [ + "commandStartedEvent", + "commandSucceededEvent", + "commandFailedEvent" + ], + "uriOptions": { + "w": 0 + } + } + }, + { + "database": { + "id": "database", + "client": "client", + "databaseName": "command-monitoring-tests" + } + }, + { + "collection": { + "id": "collection", + "database": "database", + "collectionName": "test" + } + } + ], + "initialData": [ + { + "collectionName": "test", + "databaseName": "command-monitoring-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "command-monitoring-tests.test" + }, + "tests": [ + { + "description": "A successful mixed client bulkWrite", + "operations": [ + { + "object": "client", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "command-monitoring-tests.test", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "command-monitoring-tests.test", + "filter": { + "_id": 3 + }, + "update": { + "$set": { + "x": 333 + } + } + } + } + ], + "ordered": false + }, + "expectResult": { + "insertedCount": { + "$$unsetOrMatches": 0 + }, + "upsertedCount": { + "$$unsetOrMatches": 0 + }, + "matchedCount": { + "$$unsetOrMatches": 0 + }, + "modifiedCount": { + "$$unsetOrMatches": 0 + }, + "deletedCount": { + "$$unsetOrMatches": 0 + }, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + }, + { + "object": "collection", + "name": "find", + "arguments": { + "filter": {} + }, + "expectResult": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 333 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "expectEvents": [ + { + "client": "client", + "ignoreExtraEvents": true, + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 3 + }, + "updateMods": { + "$set": { + "x": 333 + } + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "command-monitoring-tests.test" + } + ] + } + } + }, + { + "commandSucceededEvent": { + "commandName": "bulkWrite", + "reply": { + "ok": 1, + "nInserted": { + "$$exists": false + }, + "nMatched": { + "$$exists": false + }, + "nModified": { + "$$exists": false + }, + "nUpserted": { + "$$exists": false + }, + "nDeleted": { + "$$exists": false + } + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-delete-options.json b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-delete-options.json new file mode 100644 index 00000000000..d9987897dcd --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-delete-options.json @@ -0,0 +1,268 @@ +{ + "description": "client bulkWrite delete options", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "collation": { + "locale": "simple" + }, + "hint": "_id_" + }, + "tests": [ + { + "description": "client bulk write delete with collation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "collation": { + "locale": "simple" + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "collation": { + "locale": "simple" + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 3, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "1": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "delete": 0, + "filter": { + "_id": 1 + }, + "collation": { + "locale": "simple" + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "collation": { + "locale": "simple" + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [] + } + ] + }, + { + "description": "client bulk write delete with hint", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 3, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "1": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "delete": 0, + "filter": { + "_id": 1 + }, + "hint": "_id_", + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_", + "multi": true + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-errorResponse.json b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-errorResponse.json new file mode 100644 index 00000000000..b828aad3b93 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-errorResponse.json @@ -0,0 +1,69 @@ +{ + "description": "client bulkWrite errorResponse", + "schemaVersion": "1.12", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite operations support errorResponse assertions", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 8 + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-errors.json b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-errors.json new file mode 100644 index 00000000000..015bd95c990 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-errors.json @@ -0,0 +1,513 @@ +{ + "description": "client bulkWrite errors", + "schemaVersion": "1.21", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "retryWrites": false + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "writeConcernErrorCode": 91, + "writeConcernErrorMessage": "Replication is being shut down", + "undefinedVarCode": 17276 + }, + "tests": [ + { + "description": "an individual operation fails during an ordered bulkWrite", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 1, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + } + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "an individual operation fails during an unordered bulkWrite", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true, + "ordered": false + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 2, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "2": { + "deletedCount": 1 + } + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "detailed results are omitted from error when verboseResults is false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 1, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ] + }, + { + "description": "a top-level failure occurs during a bulkWrite", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 8 + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "x": 1 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "errorCode": 8 + } + } + ] + }, + { + "description": "a bulk write with only errors does not report a partial result", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": {} + }, + "writeErrors": { + "0": { + "code": 17276 + } + } + } + } + ] + }, + { + "description": "a write concern error occurs during a bulkWrite", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 10 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 10 + } + }, + "updateResults": {}, + "deleteResults": {} + }, + "writeConcernErrors": [ + { + "code": 91, + "message": "Replication is being shut down" + } + ] + } + } + ] + }, + { + "description": "an empty list of write models is a client-side error", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [], + "verboseResults": true + }, + "expectError": { + "isClientError": true + } + } + ] + }, + { + "description": "Requesting unacknowledged write with verboseResults is a client-side error", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 10 + } + } + } + ], + "verboseResults": true, + "ordered": false, + "writeConcern": { + "w": 0 + } + }, + "expectError": { + "isClientError": true, + "errorContains": "Cannot request unacknowledged write concern and verbose results" + } + } + ] + }, + { + "description": "Requesting unacknowledged write with ordered is a client-side error", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 10 + } + } + } + ], + "writeConcern": { + "w": 0 + } + }, + "expectError": { + "isClientError": true, + "errorContains": "Cannot request unacknowledged write concern and ordered writes" + } + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-mixed-namespaces.json b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-mixed-namespaces.json new file mode 100644 index 00000000000..55f06189233 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-mixed-namespaces.json @@ -0,0 +1,315 @@ +{ + "description": "client bulkWrite with mixed namespaces", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "db1" + } + }, + { + "collection": { + "id": "collection2", + "database": "database1", + "collectionName": "coll2" + } + } + ], + "initialData": [ + { + "databaseName": "db0", + "collectionName": "coll0", + "documents": [] + }, + { + "databaseName": "db0", + "collectionName": "coll1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + }, + { + "databaseName": "db1", + "collectionName": "coll2", + "documents": [ + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "_yamlAnchors": { + "db0Coll0Namespace": "db0.coll0", + "db0Coll1Namespace": "db0.coll1", + "db1Coll2Namespace": "db1.coll2" + }, + "tests": [ + { + "description": "client bulkWrite with mixed namespaces", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "db0.coll0", + "document": { + "_id": 1 + } + } + }, + { + "insertOne": { + "namespace": "db0.coll0", + "document": { + "_id": 2 + } + } + }, + { + "updateOne": { + "namespace": "db0.coll1", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "namespace": "db1.coll2", + "filter": { + "_id": 3 + } + } + }, + { + "deleteOne": { + "namespace": "db0.coll1", + "filter": { + "_id": 2 + } + } + }, + { + "replaceOne": { + "namespace": "db1.coll2", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 45 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 2, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 2, + "insertResults": { + "0": { + "insertedId": 1 + }, + "1": { + "insertedId": 2 + } + }, + "updateResults": { + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "5": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + }, + "4": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1 + } + }, + { + "insert": 0, + "document": { + "_id": 2 + } + }, + { + "update": 1, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "delete": 2, + "filter": { + "_id": 3 + }, + "multi": false + }, + { + "delete": 1, + "filter": { + "_id": 2 + }, + "multi": false + }, + { + "update": 2, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 45 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "db0.coll0" + }, + { + "ns": "db0.coll1" + }, + { + "ns": "db1.coll2" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "db0", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + { + "databaseName": "db0", + "collectionName": "coll1", + "documents": [ + { + "_id": 1, + "x": 12 + } + ] + }, + { + "databaseName": "db1", + "collectionName": "coll2", + "documents": [ + { + "_id": 4, + "x": 45 + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-options.json b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-options.json new file mode 100644 index 00000000000..708fe4e85b0 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-options.json @@ -0,0 +1,716 @@ +{ + "description": "client bulkWrite top-level options", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "writeConcernClient", + "uriOptions": { + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "comment": { + "bulk": "write" + }, + "let": { + "id1": 1, + "id2": 2 + }, + "writeConcern": { + "w": "majority" + } + }, + "tests": [ + { + "description": "client bulkWrite comment", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "comment": { + "bulk": "write" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "comment": { + "bulk": "write" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite bypassDocumentValidation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "bypassDocumentValidation": true, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "bypassDocumentValidation": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite let", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id1" + ] + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + } + ], + "let": { + "id1": 1, + "id2": 2 + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 1, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "1": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "let": { + "id1": 1, + "id2": 2 + }, + "ops": [ + { + "update": 0, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id1" + ] + } + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 12 + } + ] + } + ] + }, + { + "description": "client bulkWrite bypassDocumentValidation: false is sent", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "bypassDocumentValidation": false, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "bypassDocumentValidation": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite writeConcern", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "writeConcern": { + "w": "majority" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": "majority" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite inherits writeConcern from client", + "operations": [ + { + "object": "writeConcernClient", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "writeConcernClient", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": 1 + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite writeConcern option overrides client writeConcern", + "operations": [ + { + "object": "writeConcernClient", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "writeConcern": { + "w": "majority" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "writeConcernClient", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": "majority" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-ordered.json b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-ordered.json new file mode 100644 index 00000000000..6fb10d992f0 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-ordered.json @@ -0,0 +1,291 @@ +{ + "description": "client bulkWrite with ordered option", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with ordered: false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true, + "ordered": false + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "client bulkWrite with ordered: true", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true, + "ordered": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "client bulkWrite defaults to ordered: true", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-partialResults.json b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-partialResults.json new file mode 100644 index 00000000000..b35e94a2ea2 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-partialResults.json @@ -0,0 +1,540 @@ +{ + "description": "client bulkWrite partial results", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0" + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "newDocument": { + "_id": 2, + "x": 22 + } + }, + "tests": [ + { + "description": "partialResult is unset when first operation fails during an ordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": true, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": { + "insertedCount": { + "$$exists": false + }, + "upsertedCount": { + "$$exists": false + }, + "matchedCount": { + "$$exists": false + }, + "modifiedCount": { + "$$exists": false + }, + "deletedCount": { + "$$exists": false + }, + "insertResults": { + "$$exists": false + }, + "updateResults": { + "$$exists": false + }, + "deleteResults": { + "$$exists": false + } + } + } + } + } + ] + }, + { + "description": "partialResult is unset when first operation fails during an ordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": true, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": { + "insertedCount": { + "$$exists": false + }, + "upsertedCount": { + "$$exists": false + }, + "matchedCount": { + "$$exists": false + }, + "modifiedCount": { + "$$exists": false + }, + "deletedCount": { + "$$exists": false + }, + "insertResults": { + "$$exists": false + }, + "updateResults": { + "$$exists": false + }, + "deleteResults": { + "$$exists": false + } + } + } + } + } + ] + }, + { + "description": "partialResult is set when second operation fails during an ordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": true, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 2 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + } + ] + }, + { + "description": "partialResult is set when second operation fails during an ordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": true, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + } + ] + }, + { + "description": "partialResult is unset when all operations fail during an unordered bulk write", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": false + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": { + "insertedCount": { + "$$exists": false + }, + "upsertedCount": { + "$$exists": false + }, + "matchedCount": { + "$$exists": false + }, + "modifiedCount": { + "$$exists": false + }, + "deletedCount": { + "$$exists": false + }, + "insertResults": { + "$$exists": false + }, + "updateResults": { + "$$exists": false + }, + "deleteResults": { + "$$exists": false + } + } + } + } + } + ] + }, + { + "description": "partialResult is set when first operation fails during an unordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": false, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "1": { + "insertedId": 2 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + } + ] + }, + { + "description": "partialResult is set when first operation fails during an unordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + } + ], + "ordered": false, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + } + ] + }, + { + "description": "partialResult is set when second operation fails during an unordered bulk write (verbose)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": false, + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 2 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + } + ] + }, + { + "description": "partialResult is set when first operation fails during an unordered bulk write (summary)", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 2, + "x": 22 + } + } + }, + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "ordered": false, + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-results.json b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-results.json new file mode 100644 index 00000000000..accf5a9cbf5 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-results.json @@ -0,0 +1,833 @@ +{ + "description": "client bulkWrite results", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with verboseResults: true returns detailed results", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "0": { + "insertedId": 8 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "3": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 4 + } + }, + "deleteResults": { + "4": { + "deletedCount": 1 + }, + "5": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite with verboseResults: false omits detailed results", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": false + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite defaults to verboseResults: false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-update-options.json b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-update-options.json new file mode 100644 index 00000000000..ce6241c6812 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-update-options.json @@ -0,0 +1,949 @@ +{ + "description": "client bulkWrite update options", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "collation": { + "locale": "simple" + }, + "hint": "_id_" + }, + "tests": [ + { + "description": "client bulkWrite update with arrayFilters", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array.$[i]": 4 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ] + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array.$[i]": 5 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array.$[i]": 4 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ], + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array.$[i]": 5 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ], + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 4, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 5, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 5, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with collation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "collation": { + "locale": "simple" + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "collation": { + "locale": "simple" + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "collation": { + "locale": "simple" + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "collation": { + "locale": "simple" + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "collation": { + "locale": "simple" + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "collation": { + "locale": "simple" + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with hint", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "hint": "_id_" + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "hint": "_id_" + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "hint": "_id_" + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "hint": "_id_", + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "hint": "_id_", + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "hint": "_id_", + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with upsert", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "upsert": true + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 6 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "upsert": true + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 5 + }, + "1": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 6 + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 5 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "upsert": true, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 6 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "upsert": true, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 5, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 6, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-update-pipeline.json b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-update-pipeline.json new file mode 100644 index 00000000000..9dba5ee6c57 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-update-pipeline.json @@ -0,0 +1,258 @@ +{ + "description": "client bulkWrite update pipeline", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1 + }, + { + "_id": 2, + "x": 2 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite updateOne with pipeline", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": [ + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": [ + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateMany with pipeline", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": {}, + "update": [ + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": {}, + "updateMods": [ + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-update-validation.json b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-update-validation.json new file mode 100644 index 00000000000..617e711338a --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/crud/client-bulkWrite-update-validation.json @@ -0,0 +1,216 @@ +{ + "description": "client-bulkWrite-update-validation", + "schemaVersion": "1.1", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite replaceOne prohibits atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "replacement": { + "$set": { + "x": 22 + } + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateOne requires atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "x": 22 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateMany requires atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 44 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/server-selection/logging/operation-id.json b/driver-core/src/test/resources/unified-test-format/server-selection/logging/operation-id.json index 276e4b8d6d9..72ebff60d80 100644 --- a/driver-core/src/test/resources/unified-test-format/server-selection/logging/operation-id.json +++ b/driver-core/src/test/resources/unified-test-format/server-selection/logging/operation-id.json @@ -47,6 +47,9 @@ } } ], + "_yamlAnchors": { + "namespace": "logging-tests.server-selection" + }, "tests": [ { "description": "Successful bulkWrite operation: log messages have operationIds", @@ -224,6 +227,192 @@ ] } ] + }, + { + "description": "Successful client bulkWrite operation: log messages have operationIds", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "topologyDescriptionChangedEvent": {} + }, + "count": 2 + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "logging-tests.server-selection", + "document": { + "x": 1 + } + } + } + ] + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection succeeded", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + } + ] + } + ] + }, + { + "description": "Failed client bulkWrite operation: log messages have operationIds", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "failPointClient", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "alwaysOn", + "data": { + "failCommands": [ + "hello", + "ismaster" + ], + "appName": "loggingClient", + "closeConnection": true + } + } + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "client": "client", + "event": { + "serverDescriptionChangedEvent": { + "newDescription": { + "type": "Unknown" + } + } + }, + "count": 1 + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "logging-tests.server-selection", + "document": { + "x": 1 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectLogMessages": [ + { + "client": "client", + "messages": [ + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection started", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + }, + { + "level": "info", + "component": "serverSelection", + "data": { + "message": "Waiting for suitable server to become available", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + }, + { + "level": "debug", + "component": "serverSelection", + "data": { + "message": "Server selection failed", + "operationId": { + "$$type": [ + "int", + "long" + ] + }, + "operation": "bulkWrite" + } + } + ] + } + ] } ] } diff --git a/driver-core/src/test/resources/unified-test-format/transactions/client-bulkWrite.json b/driver-core/src/test/resources/unified-test-format/transactions/client-bulkWrite.json new file mode 100644 index 00000000000..4a8d013f8d5 --- /dev/null +++ b/driver-core/src/test/resources/unified-test-format/transactions/client-bulkWrite.json @@ -0,0 +1,593 @@ +{ + "description": "client bulkWrite transactions", + "schemaVersion": "1.4", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ], + "serverless": "forbid" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client_with_wmajority", + "uriOptions": { + "w": "majority" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "session": { + "id": "session_with_wmajority", + "client": "client_with_wmajority" + } + } + ], + "_yamlAnchors": { + "namespace": "transaction-tests.coll0" + }, + "initialData": [ + { + "databaseName": "transaction-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + ], + "tests": [ + { + "description": "client bulkWrite in a transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "transaction-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "transaction-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "0": { + "insertedId": 8 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "3": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 4 + } + }, + "deleteResults": { + "4": { + "deletedCount": 1 + }, + "5": { + "deletedCount": 2 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "transaction-tests.coll0" + } + ] + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client writeConcern ignored for client bulkWrite in transaction", + "operations": [ + { + "object": "session_with_wmajority", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 1 + } + } + }, + { + "object": "client_with_wmajority", + "name": "clientBulkWrite", + "arguments": { + "session": "session_with_wmajority", + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + }, + { + "object": "session_with_wmajority", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client_with_wmajority", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "lsid": { + "$$sessionLsid": "session_with_wmajority" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + } + ], + "nsInfo": [ + { + "ns": "transaction-tests.coll0" + } + ] + } + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session_with_wmajority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": 1 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite with writeConcern in a transaction causes a transaction error", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "session": "session0", + "writeConcern": { + "w": 1 + }, + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "isClientError": true, + "errorContains": "Cannot set write concern after starting a transaction" + } + } + ] + } + ] +} diff --git a/driver-core/src/test/resources/unified-test-format/transactions/mongos-pin-auto.json b/driver-core/src/test/resources/unified-test-format/transactions/mongos-pin-auto.json index 93eac8bb773..27db5204011 100644 --- a/driver-core/src/test/resources/unified-test-format/transactions/mongos-pin-auto.json +++ b/driver-core/src/test/resources/unified-test-format/transactions/mongos-pin-auto.json @@ -2004,6 +2004,104 @@ } ] }, + { + "description": "remain pinned after non-transient Interrupted error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] + }, { "description": "unpin after transient connection error on insertOne insert", "operations": [ @@ -5175,6 +5273,202 @@ ] } ] + }, + { + "description": "unpin after transient connection error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] } ] } diff --git a/driver-core/src/test/resources/versioned-api/crud-api-version-1.json b/driver-core/src/test/resources/versioned-api/crud-api-version-1.json index a387d0587e0..23ef59a6d98 100644 --- a/driver-core/src/test/resources/versioned-api/crud-api-version-1.json +++ b/driver-core/src/test/resources/versioned-api/crud-api-version-1.json @@ -50,7 +50,8 @@ }, "apiDeprecationErrors": true } - ] + ], + "namespace": "versioned-api-tests.test" }, "initialData": [ { @@ -426,6 +427,86 @@ } ] }, + { + "description": "client bulkWrite appends declared API version", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "serverless": "forbid" + } + ], + "operations": [ + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "versioned-api-tests.test", + "document": { + "_id": 6, + "x": 6 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 6 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 6, + "x": 6 + } + } + ], + "nsInfo": [ + { + "ns": "versioned-api-tests.test" + } + ], + "apiVersion": "1", + "apiStrict": { + "$$unsetOrMatches": false + }, + "apiDeprecationErrors": true + } + } + } + ] + } + ] + }, { "description": "countDocuments appends declared API version", "operations": [ diff --git a/driver-core/src/test/unit/com/mongodb/MongoBaseInterfaceAssertions.java b/driver-core/src/test/unit/com/mongodb/MongoBaseInterfaceAssertions.java new file mode 100644 index 00000000000..93f784b0506 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/MongoBaseInterfaceAssertions.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.reflections.Reflections; + +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; + +public final class MongoBaseInterfaceAssertions { + + private MongoBaseInterfaceAssertions() { + //NOP + } + + public static void assertSubtypeReturn(final Class baseClass) { + Reflections reflections = new Reflections("com.mongodb"); + Set> subtypes = reflections.getSubTypesOf(baseClass).stream() + .filter(aClass -> Modifier.isPublic(aClass.getModifiers())) + .filter(aClass -> !aClass.getPackage().getName().contains(".internal")) + .collect(Collectors.toSet()); + + Method[] baseMethods = baseClass.getDeclaredMethods(); + + for (Class subtype : subtypes) { + for (Method baseMethod : baseMethods) { + Method method = assertDoesNotThrow( + () -> subtype.getDeclaredMethod(baseMethod.getName(), baseMethod.getParameterTypes()), + String.format( + "`%s` does not override `%s`. The methods must be copied into the implementing class/interface.", + subtype, + baseMethod + ) + ); + + assertEquals( + subtype, + method.getReturnType(), + String.format( + "Method `%s` in `%s` does not return `%s`. " + + "The return type must match the defining class/interface.", + method, + subtype, + subtype + ) + ); + } + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientDeleteOptionsTest.java b/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientDeleteOptionsTest.java new file mode 100644 index 00000000000..e9832c24b21 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientDeleteOptionsTest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.MongoBaseInterfaceAssertions; +import org.junit.jupiter.api.Test; + +class BaseClientDeleteOptionsTest { + + @Test + void testAllSubInterfacesOverrideMethods() { + MongoBaseInterfaceAssertions.assertSubtypeReturn(BaseClientDeleteOptions.class); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientUpdateOptionsTest.java b/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientUpdateOptionsTest.java new file mode 100644 index 00000000000..43ba8e0967e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientUpdateOptionsTest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.MongoBaseInterfaceAssertions; +import org.junit.jupiter.api.Test; + +class BaseClientUpdateOptionsTest { + + @Test + void testAllSubInterfacesOverrideMethods() { + MongoBaseInterfaceAssertions.assertSubtypeReturn(BaseClientUpdateOptions.class); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientUpsertableWriteModelOptionsTest.java b/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientUpsertableWriteModelOptionsTest.java new file mode 100644 index 00000000000..5992a508574 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientUpsertableWriteModelOptionsTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.MongoBaseInterfaceAssertions; +import org.junit.jupiter.api.Test; + +final class BaseClientUpsertableWriteModelOptionsTest { + @Test + void testAllSubInterfacesOverrideMethods() { + MongoBaseInterfaceAssertions.assertSubtypeReturn(BaseClientUpsertableWriteModelOptions.class); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientWriteModelOptionsTest.java b/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientWriteModelOptionsTest.java new file mode 100644 index 00000000000..66fec81632e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientWriteModelOptionsTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.MongoBaseInterfaceAssertions; +import org.junit.jupiter.api.Test; + +final class BaseClientWriteModelOptionsTest { + @Test + void testAllSubInterfacesOverrideMethods() { + MongoBaseInterfaceAssertions.assertSubtypeReturn(BaseClientWriteModelOptions.class); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsAbstractTest.java b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsAbstractTest.java index 65636e2f842..9a9b7552d3e 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsAbstractTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsAbstractTest.java @@ -748,6 +748,26 @@ void testRetryLoop() { }); } + @Test + void testDoWhileLoop() { + assertBehavesSameVariations(67, + () -> { + do { + plain(0); + sync(1); + } while (plainTest(2)); + }, + (finalCallback) -> { + beginAsync().thenRunDoWhileLoop( + callback -> { + plain(0); + async(1, callback); + }, + () -> plainTest(2) + ).finish(finalCallback); + }); + } + @Test void testFinallyWithPlainInsideTry() { // (in try: normal flow + exception + exception) * (in finally: normal + exception) = 6 @@ -793,6 +813,51 @@ void testFinallyWithPlainOutsideTry() { }); } + @Test + void testSupplyFinallyWithPlainInsideTry() { + assertBehavesSameVariations(6, + () -> { + try { + plain(1); + return syncReturns(2); + } finally { + plain(3); + } + }, + (callback) -> { + beginAsync().thenSupply(c -> { + plain(1); + asyncReturns(2, c); + }).thenAlwaysRunAndFinish(() -> { + plain(3); + }, callback); + }); + } + + @Test + void testSupplyFinallyWithPlainOutsideTry() { + assertBehavesSameVariations(5, + () -> { + plain(1); + try { + return syncReturns(2); + } finally { + plain(3); + } + }, + (callback) -> { + beginAsync().thenSupply(c -> { + plain(1); + beginAsync().thenSupply(c2 -> { + asyncReturns(2, c2); + }).thenAlwaysRunAndFinish(() -> { + plain(3); + }, c); + }).finish(callback); + }); + } + + @Test void testUsedAsLambda() { assertBehavesSameVariations(4, diff --git a/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTestBase.java b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTestBase.java index 1229dbcfcad..10a58152d9f 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTestBase.java +++ b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTestBase.java @@ -256,8 +256,9 @@ private void assertBehavesSame(final Supplier sync, final Runnable betwee await(wasCalledFuture, "Callback should have been called"); // The following code can be used to debug variations: -// System.out.println("===VARIATION START"); +// System.out.println("===VARIATION START: " + invocationTracker.getVariationCount()); // System.out.println("sync: " + expectedEvents); +// System.out.println("sync size: " + expectedEvents.size()); // System.out.println("callback called?: " + wasCalledFuture.isDone()); // System.out.println("value -- sync: " + expectedValue + " -- async: " + actualValue.get()); // System.out.println("excep -- sync: " + expectedException + " -- async: " + actualException.get()); diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufferBsonOutputSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufferBsonOutputSpecification.groovy deleted file mode 100644 index 311279038ea..00000000000 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufferBsonOutputSpecification.groovy +++ /dev/null @@ -1,420 +0,0 @@ -/* - * Copyright 2008-present MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.internal.connection - -import util.spock.annotations.Slow -import org.bson.BsonSerializationException -import org.bson.types.ObjectId -import spock.lang.Specification - -import java.security.SecureRandom - -class ByteBufferBsonOutputSpecification extends Specification { - def 'constructor should throw if buffer provider is null'() { - when: - new ByteBufferBsonOutput(null) - - then: - thrown(IllegalArgumentException) - } - - def 'position and size should be 0 after constructor'() { - when: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - - then: - bsonOutput.position == 0 - bsonOutput.size == 0 - } - - def 'should write a byte'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - - when: - bsonOutput.writeByte(11) - - then: - getBytes(bsonOutput) == [11] as byte[] - bsonOutput.position == 1 - bsonOutput.size == 1 - } - - def 'should write bytes'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - - when: - bsonOutput.writeBytes([1, 2, 3, 4] as byte[]) - - then: - getBytes(bsonOutput) == [1, 2, 3, 4] as byte[] - bsonOutput.position == 4 - bsonOutput.size == 4 - } - - def 'should write bytes from offset until length'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - - when: - bsonOutput.writeBytes([0, 1, 2, 3, 4, 5] as byte[], 1, 4) - - then: - getBytes(bsonOutput) == [1, 2, 3, 4] as byte[] - bsonOutput.position == 4 - bsonOutput.size == 4 - } - - def 'should write a little endian Int32'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - - when: - bsonOutput.writeInt32(0x1020304) - - then: - getBytes(bsonOutput) == [4, 3, 2, 1] as byte[] - bsonOutput.position == 4 - bsonOutput.size == 4 - } - - def 'should write a little endian Int64'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - - when: - bsonOutput.writeInt64(0x102030405060708L) - - then: - getBytes(bsonOutput) == [8, 7, 6, 5, 4, 3, 2, 1] as byte[] - bsonOutput.position == 8 - bsonOutput.size == 8 - } - - def 'should write a double'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - - when: - bsonOutput.writeDouble(Double.longBitsToDouble(0x102030405060708L)) - - then: - getBytes(bsonOutput) == [8, 7, 6, 5, 4, 3, 2, 1] as byte[] - bsonOutput.position == 8 - bsonOutput.size == 8 - } - - def 'should write an ObjectId'() { - given: - def objectIdAsByteArray = [12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1] as byte[] - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - - when: - bsonOutput.writeObjectId(new ObjectId(objectIdAsByteArray)) - - then: - getBytes(bsonOutput) == objectIdAsByteArray - bsonOutput.position == 12 - bsonOutput.size == 12 - } - - def 'should write an empty string'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - - when: - bsonOutput.writeString('') - - then: - getBytes(bsonOutput) == [1, 0, 0 , 0, 0] as byte[] - bsonOutput.position == 5 - bsonOutput.size == 5 - } - - def 'should write an ASCII string'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - - when: - bsonOutput.writeString('Java') - - then: - getBytes(bsonOutput) == [5, 0, 0, 0, 0x4a, 0x61, 0x76, 0x61, 0] as byte[] - bsonOutput.position == 9 - bsonOutput.size == 9 - } - - def 'should write a UTF-8 string'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - - when: - bsonOutput.writeString('\u0900') - - then: - getBytes(bsonOutput) == [4, 0, 0, 0, 0xe0, 0xa4, 0x80, 0] as byte[] - bsonOutput.position == 8 - bsonOutput.size == 8 - } - - def 'should write an empty CString'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - - when: - bsonOutput.writeCString('') - - then: - getBytes(bsonOutput) == [0] as byte[] - bsonOutput.position == 1 - bsonOutput.size == 1 - } - - def 'should write an ASCII CString'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - - when: - bsonOutput.writeCString('Java') - - then: - getBytes(bsonOutput) == [0x4a, 0x61, 0x76, 0x61, 0] as byte[] - bsonOutput.position == 5 - bsonOutput.size == 5 - } - - def 'should write a UTF-8 CString'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - - when: - bsonOutput.writeCString('\u0900') - - then: - getBytes(bsonOutput) == [0xe0, 0xa4, 0x80, 0] as byte[] - bsonOutput.position == 4 - bsonOutput.size == 4 - } - - def 'should get byte buffers as little endian'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - - when: - bsonOutput.writeBytes([1, 0, 0, 0] as byte[]) - - then: - bsonOutput.getByteBuffers()[0].getInt() == 1 - } - - def 'null character in CString should throw SerializationException'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - - when: - bsonOutput.writeCString('hell\u0000world') - - then: - thrown(BsonSerializationException) - } - - def 'null character in String should not throw SerializationException'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - - when: - bsonOutput.writeString('h\u0000i') - - then: - getBytes(bsonOutput) == [4, 0, 0, 0, (byte) 'h', 0, (byte) 'i', 0] as byte[] - } - - def 'write Int32 at position should throw with invalid position'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - bsonOutput.writeBytes([1, 2, 3, 4] as byte[]) - - when: - bsonOutput.writeInt32(-1, 0x1020304) - - then: - thrown(IllegalArgumentException) - - when: - bsonOutput.writeInt32(1, 0x1020304) - - then: - thrown(IllegalArgumentException) - } - - def 'should write Int32 at position'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - bsonOutput.writeBytes([0, 0, 0, 0, 1, 2, 3, 4] as byte[]) - - when: 'the position is in the first buffer' - bsonOutput.writeInt32(0, 0x1020304) - - then: - getBytes(bsonOutput) == [4, 3, 2, 1, 1, 2, 3, 4] as byte[] - bsonOutput.position == 8 - bsonOutput.size == 8 - - when: 'the position is at the end of the first buffer' - bsonOutput.writeInt32(4, 0x1020304) - - then: - getBytes(bsonOutput) == [4, 3, 2, 1, 4, 3, 2, 1] as byte[] - bsonOutput.position == 8 - bsonOutput.size == 8 - - when: 'the position is not in the first buffer' - bsonOutput.writeBytes(new byte[1024]) - bsonOutput.writeInt32(1023, 0x1020304) - - then: - getBytes(bsonOutput)[1023..1026] as byte[] == [4, 3, 2, 1] as byte[] - bsonOutput.position == 1032 - bsonOutput.size == 1032 - } - - def 'truncate should throw with invalid position'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - bsonOutput.writeBytes([1, 2, 3, 4] as byte[]) - - when: - bsonOutput.truncateToPosition(5) - - then: - thrown(IllegalArgumentException) - - when: - bsonOutput.truncateToPosition(-1) - - then: - thrown(IllegalArgumentException) - } - - def 'should truncate to position'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - bsonOutput.writeBytes([1, 2, 3, 4] as byte[]) - bsonOutput.writeBytes(new byte[1024]) - - when: - bsonOutput.truncateToPosition(2) - - then: - getBytes(bsonOutput) == [1, 2] as byte[] - bsonOutput.position == 2 - bsonOutput.size == 2 - } - - def 'should grow'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - def bytes = new byte[1023] - bsonOutput.writeBytes(bytes) - - when: - bsonOutput.writeInt32(0x1020304) - - then: - getBytes(bsonOutput)[0..1022] as byte[] == bytes - getBytes(bsonOutput)[1023..1026] as byte[] == [4, 3, 2, 1] as byte[] - bsonOutput.position == 1027 - bsonOutput.size == 1027 - } - - @Slow - def 'should grow to maximum allowed size of byte buffer'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - def bytes = new byte[0x2000000] - def random = new SecureRandom() - random.nextBytes(bytes) - - when: - bsonOutput.writeBytes(bytes) - - then: - bsonOutput.size == 0x2000000 - bsonOutput.getByteBuffers()*.capacity() == - [1 << 10, 1 << 11, 1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 1 << 17, 1 << 18, 1 << 19, - 1 << 20, 1 << 21, 1 << 22, 1 << 23, 1 << 24, 1 << 24] - - when: - def stream = new ByteArrayOutputStream(bsonOutput.size) - bsonOutput.pipe(stream) - - then: - Arrays.equals(bytes, stream.toByteArray()) // faster than using Groovy's == implementation - } - - def 'should pipe'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - def bytes = new byte[1027] - bsonOutput.writeBytes(bytes) - - when: - def baos = new ByteArrayOutputStream() - bsonOutput.pipe(baos) - - then: - bytes == baos.toByteArray() - bsonOutput.position == 1027 - bsonOutput.size == 1027 - - when: - baos = new ByteArrayOutputStream() - bsonOutput.pipe(baos) - - then: - bytes == baos.toByteArray() - bsonOutput.position == 1027 - bsonOutput.size == 1027 - } - - - def 'should close'() { - given: - def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) - bsonOutput.writeBytes(new byte[1027]) - - when: - bsonOutput.close() - bsonOutput.writeByte(11) - - then: - thrown(IllegalStateException) - } - - def getBytes(final ByteBufferBsonOutput byteBufferBsonOutput) { - ByteArrayOutputStream baos = new ByteArrayOutputStream(byteBufferBsonOutput.size) - - for (def cur : byteBufferBsonOutput.byteBuffers) { - while (cur.hasRemaining()) { - baos.write(cur.get()) - } - } - - baos.toByteArray() - } -} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufferBsonOutputTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufferBsonOutputTest.java new file mode 100644 index 00000000000..3a8a2c83acb --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufferBsonOutputTest.java @@ -0,0 +1,625 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.assertions.Assertions; +import org.bson.BsonSerializationException; +import org.bson.ByteBuf; +import org.bson.types.ObjectId; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.CharacterCodingException; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.ThreadLocalRandom; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +import static com.mongodb.internal.connection.ByteBufferBsonOutput.INITIAL_BUFFER_SIZE; +import static com.mongodb.internal.connection.ByteBufferBsonOutput.MAX_BUFFER_SIZE; +import static java.util.Arrays.asList; +import static java.util.Arrays.copyOfRange; +import static java.util.stream.Collectors.toList; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +final class ByteBufferBsonOutputTest { + @DisplayName("constructor should throw if buffer provider is null") + @Test + @SuppressWarnings("try") + void constructorShouldThrowIfBufferProviderIsNull() { + assertThrows(IllegalArgumentException.class, () -> { + try (ByteBufferBsonOutput ignored = new ByteBufferBsonOutput(null)) { + // nothing to do + } + }); + } + + @DisplayName("position and size should be 0 after constructor") + @ParameterizedTest + @ValueSource(strings = {"none", "empty", "truncated"}) + void positionAndSizeShouldBe0AfterConstructor(final String branchState) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + switch (branchState) { + case "none": { + break; + } + case "empty": { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + assertEquals(0, branch.getPosition()); + assertEquals(0, branch.size()); + } + break; + } + case "truncated": { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + for (int i = 0; i < MAX_BUFFER_SIZE; i++) { + branch.writeByte(i); + } + branch.truncateToPosition(0); + } + break; + } + default: { + throw Assertions.fail(branchState); + } + } + assertEquals(0, out.getPosition()); + assertEquals(0, out.size()); + } + } + + @DisplayName("should write a byte") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void shouldWriteByte(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + byte v = 11; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeByte(v); + } + } else { + out.writeByte(v); + } + assertArrayEquals(new byte[] {v}, out.toByteArray()); + assertEquals(1, out.getPosition()); + assertEquals(1, out.size()); + } + } + + @DisplayName("should write a bytes") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void shouldWriteBytes(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + byte[] v = {1, 2, 3, 4}; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeBytes(v); + } + } else { + out.writeBytes(v); + } + assertArrayEquals(v, out.toByteArray()); + assertEquals(v.length, out.getPosition()); + assertEquals(v.length, out.size()); + } + } + + @DisplayName("should write bytes from offset until length") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void shouldWriteBytesFromOffsetUntilLength(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + byte[] v = {0, 1, 2, 3, 4, 5}; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeBytes(v, 1, 4); + } + } else { + out.writeBytes(v, 1, 4); + } + assertArrayEquals(new byte[] {1, 2, 3, 4}, out.toByteArray()); + assertEquals(4, out.getPosition()); + assertEquals(4, out.size()); + } + } + + @DisplayName("should write a little endian Int32") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void shouldWriteLittleEndianInt32(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + int v = 0x1020304; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeInt32(v); + } + } else { + out.writeInt32(v); + } + assertArrayEquals(new byte[] {4, 3, 2, 1}, out.toByteArray()); + assertEquals(4, out.getPosition()); + assertEquals(4, out.size()); + } + } + + @DisplayName("should write a little endian Int64") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void shouldWriteLittleEndianInt64(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + long v = 0x102030405060708L; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeInt64(v); + } + } else { + out.writeInt64(v); + } + assertArrayEquals(new byte[] {8, 7, 6, 5, 4, 3, 2, 1}, out.toByteArray()); + assertEquals(8, out.getPosition()); + assertEquals(8, out.size()); + } + } + + @DisplayName("should write a double") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void shouldWriteDouble(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + double v = Double.longBitsToDouble(0x102030405060708L); + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeDouble(v); + } + } else { + out.writeDouble(v); + } + assertArrayEquals(new byte[] {8, 7, 6, 5, 4, 3, 2, 1}, out.toByteArray()); + assertEquals(8, out.getPosition()); + assertEquals(8, out.size()); + } + } + + @DisplayName("should write an ObjectId") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void shouldWriteObjectId(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + byte[] objectIdAsByteArray = {12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}; + ObjectId v = new ObjectId(objectIdAsByteArray); + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeObjectId(v); + } + } else { + out.writeObjectId(v); + } + assertArrayEquals(objectIdAsByteArray, out.toByteArray()); + assertEquals(12, out.getPosition()); + assertEquals(12, out.size()); + } + } + + @DisplayName("should write an empty string") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void shouldWriteEmptyString(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + String v = ""; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeString(v); + } + } else { + out.writeString(v); + } + assertArrayEquals(new byte[] {1, 0, 0, 0, 0}, out.toByteArray()); + assertEquals(5, out.getPosition()); + assertEquals(5, out.size()); + } + } + + @DisplayName("should write an ASCII string") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void shouldWriteAsciiString(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + String v = "Java"; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeString(v); + } + } else { + out.writeString(v); + } + assertArrayEquals(new byte[] {5, 0, 0, 0, 0x4a, 0x61, 0x76, 0x61, 0}, out.toByteArray()); + assertEquals(9, out.getPosition()); + assertEquals(9, out.size()); + } + } + + @DisplayName("should write a UTF-8 string") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void shouldWriteUtf8String(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + String v = "\u0900"; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeString(v); + } + } else { + out.writeString(v); + } + assertArrayEquals(new byte[] {4, 0, 0, 0, (byte) 0xe0, (byte) 0xa4, (byte) 0x80, 0}, out.toByteArray()); + assertEquals(8, out.getPosition()); + assertEquals(8, out.size()); + } + } + + @DisplayName("should write an empty CString") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void shouldWriteEmptyCString(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + String v = ""; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeCString(v); + } + } else { + out.writeCString(v); + } + assertArrayEquals(new byte[] {0}, out.toByteArray()); + assertEquals(1, out.getPosition()); + assertEquals(1, out.size()); + } + } + + @DisplayName("should write an ASCII CString") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void shouldWriteAsciiCString(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + String v = "Java"; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeCString(v); + } + } else { + out.writeCString(v); + } + assertArrayEquals(new byte[] {0x4a, 0x61, 0x76, 0x61, 0}, out.toByteArray()); + assertEquals(5, out.getPosition()); + assertEquals(5, out.size()); + } + } + + @DisplayName("should write a UTF-8 CString") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void shouldWriteUtf8CString(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + String v = "\u0900"; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeCString(v); + } + } else { + out.writeCString(v); + } + assertArrayEquals(new byte[] {(byte) 0xe0, (byte) 0xa4, (byte) 0x80, 0}, out.toByteArray()); + assertEquals(4, out.getPosition()); + assertEquals(4, out.size()); + } + } + + @DisplayName("should get byte buffers as little endian") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void shouldGetByteBuffersAsLittleEndian(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + byte[] v = {1, 0, 0, 0}; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeBytes(v); + } + } else { + out.writeBytes(v); + } + assertEquals(1, out.getByteBuffers().get(0).getInt()); + } + } + + @DisplayName("null character in CString should throw SerializationException") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void nullCharacterInCStringShouldThrowSerializationException(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + String v = "hell\u0000world"; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + assertThrows(BsonSerializationException.class, () -> branch.writeCString(v)); + } + } else { + assertThrows(BsonSerializationException.class, () -> out.writeCString(v)); + } + } + } + + @DisplayName("null character in String should not throw SerializationException") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void nullCharacterInStringShouldNotThrowSerializationException(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + String v = "h\u0000i"; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeString(v); + } + } else { + out.writeString(v); + } + assertArrayEquals(new byte[] {4, 0, 0, 0, (byte) 'h', 0, (byte) 'i', 0}, out.toByteArray()); + } + } + + @DisplayName("write Int32 at position should throw with invalid position") + @ParameterizedTest + @CsvSource({"false, -1", "false, 1", "true, -1", "true, 1"}) + void writeInt32AtPositionShouldThrowWithInvalidPosition(final boolean useBranch, final int position) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + byte[] v = {1, 2, 3, 4}; + int v2 = 0x1020304; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeBytes(v); + assertThrows(IllegalArgumentException.class, () -> branch.writeInt32(position, v2)); + } + } else { + out.writeBytes(v); + assertThrows(IllegalArgumentException.class, () -> out.writeInt32(position, v2)); + } + } + } + + @DisplayName("should write Int32 at position") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void shouldWriteInt32AtPosition(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + Consumer lastAssertions = effectiveOut -> { + assertArrayEquals(new byte[] {4, 3, 2, 1}, copyOfRange(effectiveOut.toByteArray(), 1023, 1027), "the position is not in the first buffer"); + assertEquals(1032, effectiveOut.getPosition()); + assertEquals(1032, effectiveOut.size()); + }; + Consumer assertions = effectiveOut -> { + effectiveOut.writeBytes(new byte[] {0, 0, 0, 0, 1, 2, 3, 4}); + effectiveOut.writeInt32(0, 0x1020304); + assertArrayEquals(new byte[] {4, 3, 2, 1, 1, 2, 3, 4}, effectiveOut.toByteArray(), "the position is in the first buffer"); + assertEquals(8, effectiveOut.getPosition()); + assertEquals(8, effectiveOut.size()); + effectiveOut.writeInt32(4, 0x1020304); + assertArrayEquals(new byte[] {4, 3, 2, 1, 4, 3, 2, 1}, effectiveOut.toByteArray(), "the position is at the end of the first buffer"); + assertEquals(8, effectiveOut.getPosition()); + assertEquals(8, effectiveOut.size()); + effectiveOut.writeBytes(new byte[1024]); + effectiveOut.writeInt32(1023, 0x1020304); + lastAssertions.accept(effectiveOut); + }; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + assertions.accept(branch); + } + } else { + assertions.accept(out); + } + lastAssertions.accept(out); + } + } + + @DisplayName("truncate should throw with invalid position") + @ParameterizedTest + @CsvSource({"false, -1", "false, 5", "true, -1", "true, 5"}) + void truncateShouldThrowWithInvalidPosition(final boolean useBranch, final int position) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + byte[] v = {1, 2, 3, 4}; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeBytes(v); + assertThrows(IllegalArgumentException.class, () -> branch.truncateToPosition(position)); + } + } else { + out.writeBytes(v); + assertThrows(IllegalArgumentException.class, () -> out.truncateToPosition(position)); + } + } + } + + @DisplayName("should truncate to position") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void shouldTruncateToPosition(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + byte[] v = {1, 2, 3, 4}; + byte[] v2 = new byte[1024]; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeBytes(v); + branch.writeBytes(v2); + branch.truncateToPosition(2); + } + } else { + out.writeBytes(v); + out.writeBytes(v2); + out.truncateToPosition(2); + } + assertArrayEquals(new byte[] {1, 2}, out.toByteArray()); + assertEquals(2, out.getPosition()); + assertEquals(2, out.size()); + } + } + + @DisplayName("should grow to maximum allowed size of byte buffer") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void shouldGrowToMaximumAllowedSizeOfByteBuffer(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + byte[] v = new byte[0x2000000]; + ThreadLocalRandom.current().nextBytes(v); + Consumer assertByteBuffers = effectiveOut -> assertEquals( + asList(1 << 10, 1 << 11, 1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 1 << 17, 1 << 18, 1 << 19, 1 << 20, + 1 << 21, 1 << 22, 1 << 23, 1 << 24, 1 << 24), + effectiveOut.getByteBuffers().stream().map(ByteBuf::capacity).collect(toList())); + Consumer assertions = effectiveOut -> { + effectiveOut.writeBytes(v); + assertEquals(v.length, effectiveOut.size()); + assertByteBuffers.accept(effectiveOut); + ByteArrayOutputStream baos = new ByteArrayOutputStream(effectiveOut.size()); + try { + effectiveOut.pipe(baos); + } catch (IOException e) { + throw new RuntimeException(e); + } + assertArrayEquals(v, baos.toByteArray()); + }; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + assertions.accept(branch); + } + } else { + assertions.accept(out); + } + assertByteBuffers.accept(out); + } + } + + @DisplayName("should pipe") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void shouldPipe(final boolean useBranch) throws IOException { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + byte[] v = new byte[1027]; + BiConsumer assertions = (effectiveOut, baos) -> { + assertArrayEquals(v, baos.toByteArray()); + assertEquals(v.length, effectiveOut.getPosition()); + assertEquals(v.length, effectiveOut.size()); + }; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeBytes(v); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + branch.pipe(baos); + assertions.accept(branch, baos); + baos = new ByteArrayOutputStream(); + branch.pipe(baos); + assertions.accept(branch, baos); + } + } else { + out.writeBytes(v); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + out.pipe(baos); + assertions.accept(out, baos); + baos = new ByteArrayOutputStream(); + out.pipe(baos); + assertions.accept(out, baos); + } + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + out.pipe(baos); + assertions.accept(out, baos); + } + } + + @DisplayName("should close") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + @SuppressWarnings("try") + void shouldClose(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + byte[] v = new byte[1027]; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeBytes(v); + branch.close(); + assertThrows(IllegalStateException.class, () -> branch.writeByte(11)); + } + } else { + out.writeBytes(v); + out.close(); + assertThrows(IllegalStateException.class, () -> out.writeByte(11)); + } + } + } + + @DisplayName("should handle mixed branching and truncating") + @ParameterizedTest + @ValueSource(ints = {1, INITIAL_BUFFER_SIZE, INITIAL_BUFFER_SIZE * 3}) + void shouldHandleMixedBranchingAndTruncating(final int reps) throws CharacterCodingException { + BiConsumer write = (out, c) -> { + Assertions.assertTrue((byte) c.charValue() == c); + for (int i = 0; i < reps; i++) { + out.writeByte(c); + } + }; + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + write.accept(out, 'a'); + try (ByteBufferBsonOutput.Branch b3 = out.branch(); + ByteBufferBsonOutput.Branch b1 = out.branch()) { + write.accept(b3, 'g'); + write.accept(out, 'b'); + write.accept(b1, 'e'); + try (ByteBufferBsonOutput.Branch b2 = b1.branch()) { + write.accept(out, 'c'); + write.accept(b2, 'f'); + int b2Position = b2.getPosition(); + write.accept(b2, 'x'); + b2.truncateToPosition(b2Position); + } + write.accept(out, 'd'); + } + write.accept(out, 'h'); + try (ByteBufferBsonOutput.Branch b4 = out.branch()) { + write.accept(b4, 'i'); + int outPosition = out.getPosition(); + try (ByteBufferBsonOutput.Branch b5 = out.branch()) { + write.accept(out, 'x'); + write.accept(b5, 'x'); + } + out.truncateToPosition(outPosition); + } + write.accept(out, 'j'); + StringBuilder expected = new StringBuilder(); + "abcdefghij".chars().forEach(c -> { + String s = String.valueOf((char) c); + for (int i = 0; i < reps; i++) { + expected.append(s); + } + }); + assertEquals(expected.toString(), StandardCharsets.UTF_8.newDecoder().decode(ByteBuffer.wrap(out.toByteArray())).toString()); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy index e8ed6c152ae..e3351e2eb0f 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy @@ -38,7 +38,6 @@ import org.bson.BsonTimestamp import org.bson.ByteBuf import org.bson.ByteBufNIO import org.bson.codecs.BsonDocumentCodec -import org.bson.io.BasicOutputBuffer import spock.lang.Specification import java.nio.ByteBuffer @@ -47,6 +46,9 @@ import static com.mongodb.internal.connection.SplittablePayload.Type.INSERT import static com.mongodb.internal.operation.ServerVersionHelper.FOUR_DOT_ZERO_WIRE_VERSION import static com.mongodb.internal.operation.ServerVersionHelper.LATEST_WIRE_VERSION +/** + * New tests must be added to {@link CommandMessageTest}. + */ class CommandMessageSpecification extends Specification { def namespace = new MongoNamespace('db.test') @@ -61,8 +63,8 @@ class CommandMessageSpecification extends Specification { .serverType(serverType as ServerType) .sessionSupported(true) .build(), - responseExpected, null, null, clusterConnectionMode, null) - def output = new BasicOutputBuffer() + responseExpected, MessageSequences.EmptyMessageSequences.INSTANCE, clusterConnectionMode, null) + def output = new ByteBufferBsonOutput(new SimpleBufferProvider()) when: message.encode(output, operationContext) @@ -93,6 +95,9 @@ class CommandMessageSpecification extends Specification { } getCommandDocument(byteBuf, replyHeader) == expectedCommandDocument + cleanup: + output.close() + where: [readPreference, serverType, clusterConnectionMode, operationContext, responseExpected, isCryptd] << [ [ReadPreference.primary(), ReadPreference.secondary()], @@ -149,7 +154,8 @@ class CommandMessageSpecification extends Specification { def 'should get command document'() { given: def message = new CommandMessage(namespace, originalCommandDocument, fieldNameValidator, ReadPreference.primary(), - MessageSettings.builder().maxWireVersion(maxWireVersion).build(), true, payload, NoOpFieldNameValidator.INSTANCE, + MessageSettings.builder().maxWireVersion(maxWireVersion).build(), true, + payload == null ? MessageSequences.EmptyMessageSequences.INSTANCE : payload, ClusterConnectionMode.MULTIPLE, null) def output = new ByteBufferBsonOutput(new SimpleBufferProvider()) message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, @@ -172,7 +178,8 @@ class CommandMessageSpecification extends Specification { new BsonDocument('insert', new BsonString('coll')), new SplittablePayload(INSERT, [new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('_id', new BsonInt32(2))] - .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true), + .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, + true, NoOpFieldNameValidator.INSTANCE), ], [ LATEST_WIRE_VERSION, @@ -193,10 +200,10 @@ class CommandMessageSpecification extends Specification { new BsonDocument('_id', new BsonInt32(3)).append('c', new BsonBinary(new byte[450])), new BsonDocument('_id', new BsonInt32(4)).append('b', new BsonBinary(new byte[441])), new BsonDocument('_id', new BsonInt32(5)).append('c', new BsonBinary(new byte[451]))] - .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true) + .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true, fieldNameValidator) def message = new CommandMessage(namespace, insertCommand, fieldNameValidator, ReadPreference.primary(), messageSettings, - false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null) - def output = new BasicOutputBuffer() + false, payload, ClusterConnectionMode.MULTIPLE, null) + def output = new ByteBufferBsonOutput(new SimpleBufferProvider()) def sessionContext = Stub(SessionContext) { getReadConcern() >> ReadConcern.DEFAULT } @@ -219,7 +226,7 @@ class CommandMessageSpecification extends Specification { when: payload = payload.getNextSplit() message = new CommandMessage(namespace, insertCommand, fieldNameValidator, ReadPreference.primary(), messageSettings, - false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null) + false, payload, ClusterConnectionMode.MULTIPLE, null) output.truncateToPosition(0) message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, Stub(TimeoutContext), null)) byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray())) @@ -237,7 +244,7 @@ class CommandMessageSpecification extends Specification { when: payload = payload.getNextSplit() message = new CommandMessage(namespace, insertCommand, fieldNameValidator, ReadPreference.primary(), messageSettings, - false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null) + false, payload, ClusterConnectionMode.MULTIPLE, null) output.truncateToPosition(0) message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, Stub(TimeoutContext), null)) byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray())) @@ -255,7 +262,7 @@ class CommandMessageSpecification extends Specification { when: payload = payload.getNextSplit() message = new CommandMessage(namespace, insertCommand, fieldNameValidator, ReadPreference.primary(), messageSettings, - false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null) + false, payload, ClusterConnectionMode.MULTIPLE, null) output.truncateToPosition(0) message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, @@ -272,6 +279,9 @@ class CommandMessageSpecification extends Specification { byteBuf.getInt() == 1 << 1 payload.getPosition() == 1 !payload.hasAnotherSplit() + + cleanup: + output.close() } def 'should respect the max batch count'() { @@ -280,10 +290,10 @@ class CommandMessageSpecification extends Specification { def payload = new SplittablePayload(INSERT, [new BsonDocument('a', new BsonBinary(new byte[900])), new BsonDocument('b', new BsonBinary(new byte[450])), new BsonDocument('c', new BsonBinary(new byte[450]))] - .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true) + .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true, fieldNameValidator) def message = new CommandMessage(namespace, command, fieldNameValidator, ReadPreference.primary(), messageSettings, - false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null) - def output = new BasicOutputBuffer() + false, payload, ClusterConnectionMode.MULTIPLE, null) + def output = new ByteBufferBsonOutput(new SimpleBufferProvider()) def sessionContext = Stub(SessionContext) { getReadConcern() >> ReadConcern.DEFAULT } @@ -307,7 +317,7 @@ class CommandMessageSpecification extends Specification { when: payload = payload.getNextSplit() message = new CommandMessage(namespace, command, fieldNameValidator, ReadPreference.primary(), messageSettings, - false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null) + false, payload, ClusterConnectionMode.MULTIPLE, null) output.truncateToPosition(0) message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, Stub(TimeoutContext), null)) @@ -321,6 +331,9 @@ class CommandMessageSpecification extends Specification { byteBuf.getInt() == 1 << 1 payload.getPosition() == 1 !payload.hasAnotherSplit() + + cleanup: + output.close() } def 'should throw if payload document bigger than max document size'() { @@ -328,10 +341,10 @@ class CommandMessageSpecification extends Specification { def messageSettings = MessageSettings.builder().maxDocumentSize(900) .maxWireVersion(LATEST_WIRE_VERSION).build() def payload = new SplittablePayload(INSERT, [new BsonDocument('a', new BsonBinary(new byte[900]))] - .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true) + .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true, fieldNameValidator) def message = new CommandMessage(namespace, command, fieldNameValidator, ReadPreference.primary(), messageSettings, - false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null) - def output = new BasicOutputBuffer() + false, payload, ClusterConnectionMode.MULTIPLE, null) + def output = new ByteBufferBsonOutput(new SimpleBufferProvider()) def sessionContext = Stub(SessionContext) { getReadConcern() >> ReadConcern.DEFAULT } @@ -342,16 +355,19 @@ class CommandMessageSpecification extends Specification { then: thrown(BsonMaximumSizeExceededException) + + cleanup: + output.close() } def 'should throw if wire version and sharded cluster does not support transactions'() { given: def messageSettings = MessageSettings.builder().serverType(ServerType.SHARD_ROUTER) .maxWireVersion(FOUR_DOT_ZERO_WIRE_VERSION).build() - def payload = new SplittablePayload(INSERT, [new BsonDocument('a', new BsonInt32(1))], true) + def payload = new SplittablePayload(INSERT, [new BsonDocument('a', new BsonInt32(1))], true, fieldNameValidator) def message = new CommandMessage(namespace, command, fieldNameValidator, ReadPreference.primary(), messageSettings, - false, payload, fieldNameValidator, ClusterConnectionMode.MULTIPLE, null) - def output = new BasicOutputBuffer() + false, payload, ClusterConnectionMode.MULTIPLE, null) + def output = new ByteBufferBsonOutput(new SimpleBufferProvider()) def sessionContext = Stub(SessionContext) { getReadConcern() >> ReadConcern.DEFAULT hasActiveTransaction() >> true @@ -363,6 +379,9 @@ class CommandMessageSpecification extends Specification { then: thrown(MongoClientException) + + cleanup: + output.close() } private static BsonDocument getCommandDocument(ByteBufNIO byteBuf, ReplyHeader replyHeader) { diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageTest.java index 4735811f025..1388ffcef22 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageTest.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageTest.java @@ -20,19 +20,39 @@ import com.mongodb.MongoOperationTimeoutException; import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; import com.mongodb.connection.ClusterConnectionMode; import com.mongodb.connection.ServerType; +import com.mongodb.internal.IgnorableRequestContext; import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.client.model.bulk.ConcreteClientBulkWriteOptions; +import com.mongodb.internal.connection.MessageSequences.EmptyMessageSequences; +import com.mongodb.internal.operation.ClientBulkWriteOperation; +import com.mongodb.internal.operation.ClientBulkWriteOperation.ClientBulkWriteCommand.OpsAndNsInfo; import com.mongodb.internal.session.SessionContext; import com.mongodb.internal.validator.NoOpFieldNameValidator; +import org.bson.BsonArray; +import org.bson.BsonBoolean; import org.bson.BsonDocument; +import org.bson.BsonInt32; import org.bson.BsonString; import org.bson.BsonTimestamp; -import org.bson.io.BasicOutputBuffer; import org.junit.jupiter.api.Test; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry; +import static com.mongodb.client.model.bulk.ClientBulkWriteOptions.clientBulkWriteOptions; import static com.mongodb.internal.mockito.MongoMockito.mock; import static com.mongodb.internal.operation.ServerVersionHelper.FOUR_DOT_ZERO_WIRE_VERSION; +import static com.mongodb.internal.operation.ServerVersionHelper.LATEST_WIRE_VERSION; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doThrow; @@ -53,21 +73,22 @@ void encodeShouldThrowTimeoutExceptionWhenTimeoutContextIsCalled() { .serverType(ServerType.REPLICA_SET_SECONDARY) .sessionSupported(true) .build(), - true, null, null, ClusterConnectionMode.MULTIPLE, null); + true, EmptyMessageSequences.INSTANCE, ClusterConnectionMode.MULTIPLE, null); - BasicOutputBuffer bsonOutput = new BasicOutputBuffer(); - SessionContext sessionContext = mock(SessionContext.class); - TimeoutContext timeoutContext = mock(TimeoutContext.class, mock -> { - doThrow(new MongoOperationTimeoutException("test")).when(mock).runMaxTimeMS(any()); - }); - OperationContext operationContext = mock(OperationContext.class, mock -> { - when(mock.getSessionContext()).thenReturn(sessionContext); - when(mock.getTimeoutContext()).thenReturn(timeoutContext); - }); + try (ByteBufferBsonOutput bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + SessionContext sessionContext = mock(SessionContext.class); + TimeoutContext timeoutContext = mock(TimeoutContext.class, mock -> { + doThrow(new MongoOperationTimeoutException("test")).when(mock).runMaxTimeMS(any()); + }); + OperationContext operationContext = mock(OperationContext.class, mock -> { + when(mock.getSessionContext()).thenReturn(sessionContext); + when(mock.getTimeoutContext()).thenReturn(timeoutContext); + }); - //when & then - assertThrows(MongoOperationTimeoutException.class, () -> - commandMessage.encode(bsonOutput, operationContext)); + //when & then + assertThrows(MongoOperationTimeoutException.class, () -> + commandMessage.encode(bsonOutput, operationContext)); + } } @Test @@ -80,27 +101,72 @@ void encodeShouldNotAddExtraElementsFromTimeoutContextWhenConnectedToMongoCrypt( .sessionSupported(true) .cryptd(true) .build(), - true, null, null, ClusterConnectionMode.MULTIPLE, null); + true, EmptyMessageSequences.INSTANCE, ClusterConnectionMode.MULTIPLE, null); + + try (ByteBufferBsonOutput bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + SessionContext sessionContext = mock(SessionContext.class, mock -> { + when(mock.getClusterTime()).thenReturn(new BsonDocument("clusterTime", new BsonTimestamp(42, 1))); + when(mock.hasSession()).thenReturn(false); + when(mock.getReadConcern()).thenReturn(ReadConcern.DEFAULT); + when(mock.notifyMessageSent()).thenReturn(true); + when(mock.hasActiveTransaction()).thenReturn(false); + when(mock.isSnapshot()).thenReturn(false); + }); + TimeoutContext timeoutContext = mock(TimeoutContext.class); + OperationContext operationContext = mock(OperationContext.class, mock -> { + when(mock.getSessionContext()).thenReturn(sessionContext); + when(mock.getTimeoutContext()).thenReturn(timeoutContext); + }); - BasicOutputBuffer bsonOutput = new BasicOutputBuffer(); - SessionContext sessionContext = mock(SessionContext.class, mock -> { - when(mock.getClusterTime()).thenReturn(new BsonDocument("clusterTime", new BsonTimestamp(42, 1))); - when(mock.hasSession()).thenReturn(false); - when(mock.getReadConcern()).thenReturn(ReadConcern.DEFAULT); - when(mock.notifyMessageSent()).thenReturn(true); - when(mock.hasActiveTransaction()).thenReturn(false); - when(mock.isSnapshot()).thenReturn(false); - }); - TimeoutContext timeoutContext = mock(TimeoutContext.class); - OperationContext operationContext = mock(OperationContext.class, mock -> { - when(mock.getSessionContext()).thenReturn(sessionContext); - when(mock.getTimeoutContext()).thenReturn(timeoutContext); - }); + //when + commandMessage.encode(bsonOutput, operationContext); - //when - commandMessage.encode(bsonOutput, operationContext); + //then + verifyNoInteractions(timeoutContext); + } + } - //then - verifyNoInteractions(timeoutContext); + @Test + void getCommandDocumentFromClientBulkWrite() { + MongoNamespace ns = new MongoNamespace("db", "test"); + boolean retryWrites = false; + BsonDocument command = new BsonDocument("bulkWrite", new BsonInt32(1)) + .append("errorsOnly", BsonBoolean.valueOf(false)) + .append("ordered", BsonBoolean.valueOf(true)); + List documents = IntStream.range(0, 2).mapToObj(i -> new BsonDocument("_id", new BsonInt32(i))) + .collect(Collectors.toList()); + List writeModels = asList( + ClientNamespacedWriteModel.insertOne(ns, documents.get(0)), + ClientNamespacedWriteModel.insertOne(ns, documents.get(1))); + OpsAndNsInfo opsAndNsInfo = new OpsAndNsInfo( + retryWrites, + writeModels, + new ClientBulkWriteOperation( + writeModels, + clientBulkWriteOptions(), + WriteConcern.MAJORITY, + retryWrites, + getDefaultCodecRegistry() + ).new BatchEncoder(), + (ConcreteClientBulkWriteOptions) clientBulkWriteOptions(), + () -> 1L); + BsonDocument expectedCommandDocument = command.clone() + .append("$db", new BsonString(ns.getDatabaseName())) + .append("ops", new BsonArray(asList( + new BsonDocument("insert", new BsonInt32(0)).append("document", documents.get(0)), + new BsonDocument("insert", new BsonInt32(0)).append("document", documents.get(1))))) + .append("nsInfo", new BsonArray(singletonList(new BsonDocument("ns", new BsonString(ns.toString()))))); + CommandMessage commandMessage = new CommandMessage( + ns, command, NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), + MessageSettings.builder().maxWireVersion(LATEST_WIRE_VERSION).build(), true, opsAndNsInfo, ClusterConnectionMode.MULTIPLE, null); + try (ByteBufferBsonOutput output = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + commandMessage.encode( + output, + new OperationContext( + IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, + new TimeoutContext(TimeoutSettings.DEFAULT), null)); + BsonDocument actualCommandDocument = commandMessage.getCommandDocument(output); + assertEquals(expectedCommandDocument, actualCommandDocument); + } } } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerConnectionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerConnectionSpecification.groovy index 282c4dbb868..be6fbe06b83 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerConnectionSpecification.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerConnectionSpecification.groovy @@ -49,8 +49,8 @@ class DefaultServerConnectionSpecification extends Specification { then: 1 * executor.executeAsync({ - compare(new CommandProtocolImpl('test', command, validator, ReadPreference.primary(), codec, true, null, null, - ClusterConnectionMode.MULTIPLE, OPERATION_CONTEXT), it) + compare(new CommandProtocolImpl('test', command, validator, ReadPreference.primary(), codec, true, + MessageSequences.EmptyMessageSequences.INSTANCE, ClusterConnectionMode.MULTIPLE, OPERATION_CONTEXT), it) }, internalConnection, OPERATION_CONTEXT.getSessionContext(), callback) } } diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ElementExtendingBsonWriterSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/ElementExtendingBsonWriterSpecification.groovy deleted file mode 100644 index f96e11acece..00000000000 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/ElementExtendingBsonWriterSpecification.groovy +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright 2008-present MongoDB, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.internal.connection - -import org.bson.BsonBinaryReader -import org.bson.BsonBinaryWriter -import org.bson.BsonDocument -import org.bson.BsonDocumentReader -import org.bson.BsonElement -import org.bson.BsonString -import org.bson.codecs.BsonDocumentCodec -import org.bson.codecs.DecoderContext -import org.bson.codecs.EncoderContext -import org.bson.io.BasicOutputBuffer -import org.bson.io.BsonOutput -import spock.lang.Specification - -import static org.bson.BsonHelper.documentWithValuesOfEveryType - -class ElementExtendingBsonWriterSpecification extends Specification { - - def 'should write all types'() { - given: - def binaryWriter = new BsonBinaryWriter(new BasicOutputBuffer()) - - when: - new BsonDocumentCodec().encode(new ElementExtendingBsonWriter(binaryWriter, []), documentWithValuesOfEveryType(), - EncoderContext.builder().build()) - - then: - getEncodedDocument(binaryWriter.getBsonOutput()) == documentWithValuesOfEveryType() - } - - def 'should extend with extra elements'() { - given: - def extraElements = [ - new BsonElement('$db', new BsonString('test')), - new BsonElement('$readPreference', new BsonDocument('mode', new BsonString('primary'))) - ] - def expectedDocument = documentWithValuesOfEveryType() - for (def cur : extraElements) { - expectedDocument.put(cur.name, cur.value) - } - def binaryWriter = new BsonBinaryWriter(new BasicOutputBuffer()) - def writer = new ElementExtendingBsonWriter(binaryWriter, extraElements) - - when: - new BsonDocumentCodec().encode(writer, documentWithValuesOfEveryType(), EncoderContext.builder().build()) - - then: - getEncodedDocument(binaryWriter.getBsonOutput()) == expectedDocument - } - - def 'should extend with extra elements when piping a reader at the top level'() { - given: - def extraElements = [ - new BsonElement('$db', new BsonString('test')), - new BsonElement('$readPreference', new BsonDocument('mode', new BsonString('primary'))) - ] - def expectedDocument = documentWithValuesOfEveryType() - for (def cur : extraElements) { - expectedDocument.put(cur.name, cur.value) - } - def binaryWriter = new BsonBinaryWriter(new BasicOutputBuffer()) - def writer = new ElementExtendingBsonWriter(binaryWriter, extraElements) - - when: - writer.pipe(new BsonDocumentReader(documentWithValuesOfEveryType())) - - then: - getEncodedDocument(binaryWriter.getBsonOutput()) == expectedDocument - } - - def 'should not extend with extra elements when piping a reader at nested level'() { - given: - def extraElements = [ - new BsonElement('$db', new BsonString('test')), - new BsonElement('$readPreference', new BsonDocument('mode', new BsonString('primary'))) - ] - def expectedDocument = new BsonDocument('pipedDocument', new BsonDocument()) - for (def cur : extraElements) { - expectedDocument.put(cur.name, cur.value) - } - - def binaryWriter = new BsonBinaryWriter(new BasicOutputBuffer()) - - def writer = new ElementExtendingBsonWriter(binaryWriter, extraElements) - - when: - writer.writeStartDocument() - writer.writeName('pipedDocument') - writer.pipe(new BsonDocumentReader(new BsonDocument())) - writer.writeEndDocument() - - then: - getEncodedDocument(binaryWriter.getBsonOutput()) == expectedDocument - } - - private static BsonDocument getEncodedDocument(BsonOutput buffer) { - new BsonDocumentCodec().decode(new BsonBinaryReader(buffer.getByteBuffers().get(0).asNIO()), - DecoderContext.builder().build()) - } -} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/StreamHelper.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/StreamHelper.groovy index 251ce1a79fb..0a21e056176 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/StreamHelper.groovy +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/StreamHelper.groovy @@ -31,7 +31,6 @@ import org.bson.BsonWriter import org.bson.ByteBuf import org.bson.ByteBufNIO import org.bson.io.BasicOutputBuffer -import org.bson.io.OutputBuffer import org.bson.json.JsonReader import java.nio.ByteBuffer @@ -170,13 +169,17 @@ class StreamHelper { CommandMessage command = new CommandMessage(new MongoNamespace('admin', COMMAND_COLLECTION_NAME), new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), MessageSettings.builder().build(), SINGLE, null) - OutputBuffer outputBuffer = new BasicOutputBuffer() - command.encode(outputBuffer, new OperationContext( - IgnorableRequestContext.INSTANCE, - NoOpSessionContext.INSTANCE, - new TimeoutContext(ClusterFixture.TIMEOUT_SETTINGS), null)) - nextMessageId++ - [outputBuffer.byteBuffers, nextMessageId] + ByteBufferBsonOutput outputBuffer = new ByteBufferBsonOutput(new SimpleBufferProvider()) + try { + command.encode(outputBuffer, new OperationContext( + IgnorableRequestContext.INSTANCE, + NoOpSessionContext.INSTANCE, + new TimeoutContext(ClusterFixture.TIMEOUT_SETTINGS), null)) + nextMessageId++ + [outputBuffer.byteBuffers, nextMessageId] + } finally { + outputBuffer.close() + } } static helloAsync() { diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnection.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnection.java index 7811cdec815..5fbc6dafde0 100644 --- a/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnection.java +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnection.java @@ -19,7 +19,6 @@ import com.mongodb.ReadPreference; import com.mongodb.connection.ConnectionDescription; import com.mongodb.internal.async.SingleResultCallback; -import com.mongodb.lang.Nullable; import org.bson.BsonDocument; import org.bson.FieldNameValidator; import org.bson.codecs.Decoder; @@ -65,8 +64,7 @@ public T command(final String database, final BsonDocument command, final Fi @Override public T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext, - final boolean responseExpected, @Nullable final SplittablePayload payload, - @Nullable final FieldNameValidator payloadFieldNameValidator) { + final boolean responseExpected, final MessageSequences sequences) { return executeEnqueuedCommandBasedProtocol(operationContext); } @@ -80,8 +78,7 @@ public void commandAsync(final String database, final BsonDocument command, @Override public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext, - final boolean responseExpected, @Nullable final SplittablePayload payload, - @Nullable final FieldNameValidator payloadFieldNameValidator, final SingleResultCallback callback) { + final boolean responseExpected, final MessageSequences sequences, final SingleResultCallback callback) { executeEnqueuedCommandBasedProtocolAsync(operationContext, callback); } diff --git a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCluster.kt b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCluster.kt index 42313ed2b13..01d0e27ff58 100644 --- a/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCluster.kt +++ b/driver-kotlin-coroutine/src/integration/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCluster.kt @@ -25,6 +25,9 @@ import com.mongodb.client.ListDatabasesIterable import com.mongodb.client.MongoCluster as JMongoCluster import com.mongodb.client.MongoDatabase import com.mongodb.client.MongoIterable +import com.mongodb.client.model.bulk.ClientBulkWriteOptions +import com.mongodb.client.model.bulk.ClientBulkWriteResult +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel import com.mongodb.kotlin.client.coroutine.MongoCluster import java.util.concurrent.TimeUnit import kotlinx.coroutines.runBlocking @@ -111,5 +114,25 @@ internal open class SyncMongoCluster(open val wrapped: MongoCluster) : JMongoClu ): ChangeStreamIterable = SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline, resultClass)) + override fun bulkWrite(models: MutableList): ClientBulkWriteResult = runBlocking { + wrapped.bulkWrite(models) + } + + override fun bulkWrite( + models: MutableList, + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = runBlocking { wrapped.bulkWrite(models, options) } + + override fun bulkWrite( + clientSession: ClientSession, + models: MutableList + ): ClientBulkWriteResult = runBlocking { wrapped.bulkWrite(clientSession.unwrapped(), models) } + + override fun bulkWrite( + clientSession: ClientSession, + models: MutableList, + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = runBlocking { wrapped.bulkWrite(clientSession.unwrapped(), models, options) } + private fun ClientSession.unwrapped() = (this as SyncClientSession).wrapped } diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCluster.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCluster.kt index 88df39dd23d..65ec0aa7f45 100644 --- a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCluster.kt +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCluster.kt @@ -15,12 +15,20 @@ */ package com.mongodb.kotlin.client.coroutine +import com.mongodb.ClientBulkWriteException import com.mongodb.ClientSessionOptions +import com.mongodb.MongoClientSettings +import com.mongodb.MongoException import com.mongodb.ReadConcern import com.mongodb.ReadPreference import com.mongodb.WriteConcern import com.mongodb.annotations.Alpha import com.mongodb.annotations.Reason +import com.mongodb.client.model.bulk.ClientBulkWriteOptions +import com.mongodb.client.model.bulk.ClientBulkWriteResult +import com.mongodb.client.model.bulk.ClientNamespacedDeleteManyModel +import com.mongodb.client.model.bulk.ClientNamespacedUpdateManyModel +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel import com.mongodb.reactivestreams.client.MongoCluster as JMongoCluster import java.util.concurrent.TimeUnit import kotlinx.coroutines.flow.Flow @@ -307,4 +315,111 @@ public open class MongoCluster protected constructor(private val wrapped: JMongo clientSession: ClientSession, pipeline: List = emptyList() ): ChangeStreamFlow = watch(clientSession, pipeline, T::class.java) + + /** + * Executes a client-level bulk write operation. This method is functionally equivalent to + * [bulkWrite(models, options)][bulkWrite] with the + * [default options][ClientBulkWriteOptions.clientBulkWriteOptions]. + * + * This operation supports [retryable writes][MongoClientSettings.getRetryWrites]. Depending on the number of + * `models`, encoded size of `models`, and the size limits in effect, executing this operation may require multiple + * `bulkWrite` commands. The eligibility for retries is determined per each `bulkWrite` command: + * [ClientNamespacedUpdateManyModel], [ClientNamespacedDeleteManyModel] in a command render it non-retryable. + * + * This operation is not supported by MongoDB Atlas Serverless instances. + * + * @param models The [individual write operations][ClientNamespacedWriteModel]. + * @return The [ClientBulkWriteResult] if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, and + * there is at least one of the following pieces of information to report: + * [ClientBulkWriteException.getWriteConcernErrors], [ClientBulkWriteException.getWriteErrors], + * [ClientBulkWriteException.getPartialResult]. + * @throws MongoException Only if the operation is unsuccessful. + * @see [BulkWrite command](https://www.mongodb.com/docs/manual/reference/command/bulkWrite/) + * @since 5.3 + */ + public suspend fun bulkWrite(models: List): ClientBulkWriteResult = + wrapped.bulkWrite(models).awaitSingle() + + /** + * Executes a client-level bulk write operation. + * + * This operation supports [retryable writes][MongoClientSettings.getRetryWrites]. Depending on the number of + * `models`, encoded size of `models`, and the size limits in effect, executing this operation may require multiple + * `bulkWrite` commands. The eligibility for retries is determined per each `bulkWrite` command: + * [ClientNamespacedUpdateManyModel], [ClientNamespacedDeleteManyModel] in a command render it non-retryable. + * + * This operation is not supported by MongoDB Atlas Serverless instances. + * + * @param models The [individual write operations][ClientNamespacedWriteModel]. + * @param options The [options][ClientBulkWriteOptions]. + * @return The [ClientBulkWriteResult] if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, and + * there is at least one of the following pieces of information to report: + * [ClientBulkWriteException.getWriteConcernErrors], [ClientBulkWriteException.getWriteErrors], + * [ClientBulkWriteException.getPartialResult]. + * @throws MongoException Only if the operation is unsuccessful. + * @see [BulkWrite command](https://www.mongodb.com/docs/manual/reference/command/bulkWrite/) + * @since 5.3 + */ + public suspend fun bulkWrite( + models: List, + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = wrapped.bulkWrite(models, options).awaitSingle() + + /** + * Executes a client-level bulk write operation. This method is functionally equivalent to + * [bulkWrite(clientSession, models, options)][bulkWrite] with the + * [default options][ClientBulkWriteOptions.clientBulkWriteOptions]. + * + * This operation supports [retryable writes][MongoClientSettings.getRetryWrites]. Depending on the number of + * `models`, encoded size of `models`, and the size limits in effect, executing this operation may require multiple + * `bulkWrite` commands. The eligibility for retries is determined per each `bulkWrite` command: + * [ClientNamespacedUpdateManyModel], [ClientNamespacedDeleteManyModel] in a command render it non-retryable. + * + * This operation is not supported by MongoDB Atlas Serverless instances. + * + * @param clientSession The [client session][ClientSession] with which to associate this operation. + * @param models The [individual write operations][ClientNamespacedWriteModel]. + * @return The [ClientBulkWriteResult] if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, and + * there is at least one of the following pieces of information to report: + * [ClientBulkWriteException.getWriteConcernErrors], [ClientBulkWriteException.getWriteErrors], + * [ClientBulkWriteException.getPartialResult]. + * @throws MongoException Only if the operation is unsuccessful. + * @see [BulkWrite command](https://www.mongodb.com/docs/manual/reference/command/bulkWrite/) + * @since 5.3 + */ + public suspend fun bulkWrite( + clientSession: ClientSession, + models: List + ): ClientBulkWriteResult = wrapped.bulkWrite(clientSession.wrapped, models).awaitSingle() + + /** + * Executes a client-level bulk write operation. + * + * This operation supports [retryable writes][MongoClientSettings.getRetryWrites]. Depending on the number of + * `models`, encoded size of `models`, and the size limits in effect, executing this operation may require multiple + * `bulkWrite` commands. The eligibility for retries is determined per each `bulkWrite` command: + * [ClientNamespacedUpdateManyModel], [ClientNamespacedDeleteManyModel] in a command render it non-retryable. + * + * This operation is not supported by MongoDB Atlas Serverless instances. + * + * @param clientSession The [client session][ClientSession] with which to associate this operation. + * @param models The [individual write operations][ClientNamespacedWriteModel]. + * @param options The [options][ClientBulkWriteOptions]. + * @return The [ClientBulkWriteResult] if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, and + * there is at least one of the following pieces of information to report: + * [ClientBulkWriteException.getWriteConcernErrors], [ClientBulkWriteException.getWriteErrors], + * [ClientBulkWriteException.getPartialResult]. + * @throws MongoException Only if the operation is unsuccessful. + * @see [BulkWrite command](https://www.mongodb.com/docs/manual/reference/command/bulkWrite/) + * @since 5.3 + */ + public suspend fun bulkWrite( + clientSession: ClientSession, + models: List, + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = wrapped.bulkWrite(clientSession.wrapped, models, options).awaitSingle() } diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoClientTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoClientTest.kt index 9ac4805f6fa..fd66e4de31b 100644 --- a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoClientTest.kt +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoClientTest.kt @@ -16,6 +16,9 @@ package com.mongodb.kotlin.client.coroutine import com.mongodb.ClientSessionOptions +import com.mongodb.MongoNamespace +import com.mongodb.client.model.bulk.ClientBulkWriteOptions +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel import com.mongodb.reactivestreams.client.MongoClient as JMongoClient import kotlin.reflect.full.declaredFunctions import kotlin.test.assertEquals @@ -166,4 +169,29 @@ class MongoClientTest { verify(wrapped, times(2)).watch(clientSession.wrapped, pipeline, BsonDocument::class.java) verifyNoMoreInteractions(wrapped) } + + @Test + fun shouldCallTheUnderlyingBulkWrite() { + val mongoClient = MongoClient(wrapped) + val requests = listOf(ClientNamespacedWriteModel.insertOne(MongoNamespace("test.test"), Document())) + val options = ClientBulkWriteOptions.clientBulkWriteOptions().bypassDocumentValidation(true) + + whenever(wrapped.bulkWrite(requests)).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.bulkWrite(requests, options)).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.bulkWrite(clientSession.wrapped, requests)).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.bulkWrite(clientSession.wrapped, requests, options)).doReturn(Mono.fromCallable { mock() }) + + runBlocking { + mongoClient.bulkWrite(requests) + mongoClient.bulkWrite(requests, options) + mongoClient.bulkWrite(clientSession, requests) + mongoClient.bulkWrite(clientSession, requests, options) + } + + verify(wrapped).bulkWrite(requests) + verify(wrapped).bulkWrite(requests, options) + verify(wrapped).bulkWrite(clientSession.wrapped, requests) + verify(wrapped).bulkWrite(clientSession.wrapped, requests, options) + verifyNoMoreInteractions(wrapped) + } } diff --git a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCluster.kt b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCluster.kt index 7b948fa6d1d..b86f2447a17 100644 --- a/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCluster.kt +++ b/driver-kotlin-sync/src/integration/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCluster.kt @@ -25,6 +25,9 @@ import com.mongodb.client.ListDatabasesIterable import com.mongodb.client.MongoCluster as JMongoCluster import com.mongodb.client.MongoDatabase import com.mongodb.client.MongoIterable +import com.mongodb.client.model.bulk.ClientBulkWriteOptions +import com.mongodb.client.model.bulk.ClientBulkWriteResult +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel import com.mongodb.kotlin.client.MongoCluster import java.util.concurrent.TimeUnit import org.bson.Document @@ -110,5 +113,24 @@ internal open class SyncMongoCluster(open val wrapped: MongoCluster) : JMongoClu ): ChangeStreamIterable = SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline, resultClass)) + override fun bulkWrite(models: MutableList): ClientBulkWriteResult = + wrapped.bulkWrite(models) + + override fun bulkWrite( + models: MutableList, + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = wrapped.bulkWrite(models, options) + + override fun bulkWrite( + clientSession: ClientSession, + models: MutableList + ): ClientBulkWriteResult = wrapped.bulkWrite(clientSession.unwrapped(), models) + + override fun bulkWrite( + clientSession: ClientSession, + models: MutableList, + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = wrapped.bulkWrite(clientSession.unwrapped(), models, options) + private fun ClientSession.unwrapped() = (this as SyncClientSession).wrapped } diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCluster.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCluster.kt index f541aaf1a9f..90fb45d1dbd 100644 --- a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCluster.kt +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCluster.kt @@ -15,13 +15,21 @@ */ package com.mongodb.kotlin.client +import com.mongodb.ClientBulkWriteException import com.mongodb.ClientSessionOptions +import com.mongodb.MongoClientSettings +import com.mongodb.MongoException import com.mongodb.ReadConcern import com.mongodb.ReadPreference import com.mongodb.WriteConcern import com.mongodb.annotations.Alpha import com.mongodb.annotations.Reason import com.mongodb.client.MongoCluster as JMongoCluster +import com.mongodb.client.model.bulk.ClientBulkWriteOptions +import com.mongodb.client.model.bulk.ClientBulkWriteResult +import com.mongodb.client.model.bulk.ClientNamespacedDeleteManyModel +import com.mongodb.client.model.bulk.ClientNamespacedUpdateManyModel +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel import java.util.concurrent.TimeUnit import org.bson.Document import org.bson.codecs.configuration.CodecRegistry @@ -303,4 +311,110 @@ public open class MongoCluster protected constructor(private val wrapped: JMongo clientSession: ClientSession, pipeline: List = emptyList() ): ChangeStreamIterable = watch(clientSession, pipeline, T::class.java) + + /** + * Executes a client-level bulk write operation. This method is functionally equivalent to + * [bulkWrite(models, options)][bulkWrite] with the + * [default options][ClientBulkWriteOptions.clientBulkWriteOptions]. + * + * This operation supports [retryable writes][MongoClientSettings.getRetryWrites]. Depending on the number of + * `models`, encoded size of `models`, and the size limits in effect, executing this operation may require multiple + * `bulkWrite` commands. The eligibility for retries is determined per each `bulkWrite` command: + * [ClientNamespacedUpdateManyModel], [ClientNamespacedDeleteManyModel] in a command render it non-retryable. + * + * This operation is not supported by MongoDB Atlas Serverless instances. + * + * @param models The [individual write operations][ClientNamespacedWriteModel]. + * @return The [ClientBulkWriteResult] if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, and + * there is at least one of the following pieces of information to report: + * [ClientBulkWriteException.getWriteConcernErrors], [ClientBulkWriteException.getWriteErrors], + * [ClientBulkWriteException.getPartialResult]. + * @throws MongoException Only if the operation is unsuccessful. + * @see [BulkWrite command](https://www.mongodb.com/docs/manual/reference/command/bulkWrite/) + * @since 5.3 + */ + public fun bulkWrite(models: List): ClientBulkWriteResult = wrapped.bulkWrite(models) + + /** + * Executes a client-level bulk write operation. + * + * This operation supports [retryable writes][MongoClientSettings.getRetryWrites]. Depending on the number of + * `models`, encoded size of `models`, and the size limits in effect, executing this operation may require multiple + * `bulkWrite` commands. The eligibility for retries is determined per each `bulkWrite` command: + * [ClientNamespacedUpdateManyModel], [ClientNamespacedDeleteManyModel] in a command render it non-retryable. + * + * This operation is not supported by MongoDB Atlas Serverless instances. + * + * @param models The [individual write operations][ClientNamespacedWriteModel]. + * @param options The [options][ClientBulkWriteOptions]. + * @return The [ClientBulkWriteResult] if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, and + * there is at least one of the following pieces of information to report: + * [ClientBulkWriteException.getWriteConcernErrors], [ClientBulkWriteException.getWriteErrors], + * [ClientBulkWriteException.getPartialResult]. + * @throws MongoException Only if the operation is unsuccessful. + * @see [BulkWrite command](https://www.mongodb.com/docs/manual/reference/command/bulkWrite/) + * @since 5.3 + */ + public fun bulkWrite( + models: List, + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = wrapped.bulkWrite(models, options) + + /** + * Executes a client-level bulk write operation. This method is functionally equivalent to + * [bulkWrite(clientSession, models, options)][bulkWrite] with the + * [default options][ClientBulkWriteOptions.clientBulkWriteOptions]. + * + * This operation supports [retryable writes][MongoClientSettings.getRetryWrites]. Depending on the number of + * `models`, encoded size of `models`, and the size limits in effect, executing this operation may require multiple + * `bulkWrite` commands. The eligibility for retries is determined per each `bulkWrite` command: + * [ClientNamespacedUpdateManyModel], [ClientNamespacedDeleteManyModel] in a command render it non-retryable. + * + * This operation is not supported by MongoDB Atlas Serverless instances. + * + * @param clientSession The [client session][ClientSession] with which to associate this operation. + * @param models The [individual write operations][ClientNamespacedWriteModel]. + * @return The [ClientBulkWriteResult] if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, and + * there is at least one of the following pieces of information to report: + * [ClientBulkWriteException.getWriteConcernErrors], [ClientBulkWriteException.getWriteErrors], + * [ClientBulkWriteException.getPartialResult]. + * @throws MongoException Only if the operation is unsuccessful. + * @see [BulkWrite command](https://www.mongodb.com/docs/manual/reference/command/bulkWrite/) + * @since 5.3 + */ + public fun bulkWrite( + clientSession: ClientSession, + models: List + ): ClientBulkWriteResult = wrapped.bulkWrite(clientSession.wrapped, models) + + /** + * Executes a client-level bulk write operation. + * + * This operation supports [retryable writes][com.mongodb.MongoClientSettings.getRetryWrites]. Depending on the + * number of `models`, encoded size of `models`, and the size limits in effect, executing this operation may require + * multiple `bulkWrite` commands. The eligibility for retries is determined per each `bulkWrite` command: + * [ClientNamespacedUpdateManyModel], [ClientNamespacedDeleteManyModel] in a command render it non-retryable. + * + * This operation is not supported by MongoDB Atlas Serverless instances. + * + * @param clientSession The [client session][ClientSession] with which to associate this operation. + * @param models The [individual write operations][ClientNamespacedWriteModel]. + * @param options The [options][ClientBulkWriteOptions]. + * @return The [ClientBulkWriteResult] if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, and + * there is at least one of the following pieces of information to report: + * [ClientBulkWriteException.getWriteConcernErrors], [ClientBulkWriteException.getWriteErrors], + * [ClientBulkWriteException.getPartialResult]. + * @throws MongoException Only if the operation is unsuccessful. + * @see [BulkWrite command](https://www.mongodb.com/docs/manual/reference/command/bulkWrite/) + * @since 5.3 + */ + public fun bulkWrite( + clientSession: ClientSession, + models: List, + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = wrapped.bulkWrite(clientSession.wrapped, models, options) } diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoClientTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoClientTest.kt index 0999e77080e..0aa0c582ff4 100644 --- a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoClientTest.kt +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoClientTest.kt @@ -16,7 +16,10 @@ package com.mongodb.kotlin.client import com.mongodb.ClientSessionOptions +import com.mongodb.MongoNamespace import com.mongodb.client.MongoClient as JMongoClient +import com.mongodb.client.model.bulk.ClientBulkWriteOptions +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel import kotlin.reflect.full.declaredFunctions import kotlin.reflect.full.declaredMemberProperties import kotlin.test.assertEquals @@ -168,4 +171,27 @@ class MongoClientTest { verify(wrapped, times(2)).watch(clientSession.wrapped, pipeline, BsonDocument::class.java) verifyNoMoreInteractions(wrapped) } + + @Test + fun shouldCallTheUnderlyingBulkWrite() { + val mongoClient = MongoClient(wrapped) + val requests = listOf(ClientNamespacedWriteModel.insertOne(MongoNamespace("test.test"), Document())) + val options = ClientBulkWriteOptions.clientBulkWriteOptions().bypassDocumentValidation(true) + + whenever(wrapped.bulkWrite(requests)).doReturn(mock()) + whenever(wrapped.bulkWrite(requests, options)).doReturn(mock()) + whenever(wrapped.bulkWrite(clientSession.wrapped, requests)).doReturn(mock()) + whenever(wrapped.bulkWrite(clientSession.wrapped, requests, options)).doReturn(mock()) + + mongoClient.bulkWrite(requests) + mongoClient.bulkWrite(requests, options) + mongoClient.bulkWrite(clientSession, requests) + mongoClient.bulkWrite(clientSession, requests, options) + + verify(wrapped).bulkWrite(requests) + verify(wrapped).bulkWrite(requests, options) + verify(wrapped).bulkWrite(clientSession.wrapped, requests) + verify(wrapped).bulkWrite(clientSession.wrapped, requests, options) + verifyNoMoreInteractions(wrapped) + } } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCluster.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCluster.java index ef7c0ddb79d..edcc8f29408 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCluster.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCluster.java @@ -16,7 +16,10 @@ package com.mongodb.reactivestreams.client; +import com.mongodb.ClientBulkWriteException; import com.mongodb.ClientSessionOptions; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoException; import com.mongodb.MongoNamespace; import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; @@ -24,6 +27,11 @@ import com.mongodb.annotations.Alpha; import com.mongodb.annotations.Immutable; import com.mongodb.annotations.Reason; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedDeleteManyModel; +import com.mongodb.client.model.bulk.ClientNamespacedUpdateManyModel; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; import com.mongodb.lang.Nullable; import org.bson.Document; import org.bson.codecs.configuration.CodecRegistry; @@ -353,4 +361,135 @@ public interface MongoCluster { * @mongodb.driver.dochub core/changestreams Change Streams */ ChangeStreamPublisher watch(ClientSession clientSession, List pipeline, Class resultClass); + + /** + * Executes a client-level bulk write operation. + * This method is functionally equivalent to {@link #bulkWrite(List, ClientBulkWriteOptions)} + * with the {@linkplain ClientBulkWriteOptions#clientBulkWriteOptions() default options}. + *

    + * This operation supports {@linkplain MongoClientSettings#getRetryWrites() retryable writes}. + * Depending on the number of {@code models}, encoded size of {@code models}, and the size limits in effect, + * executing this operation may require multiple {@code bulkWrite} commands. + * The eligibility for retries is determined per each {@code bulkWrite} command: + * {@link ClientNamespacedUpdateManyModel}, {@link ClientNamespacedDeleteManyModel} in a command render it non-retryable.

    + *

    + * This operation is not supported by MongoDB Atlas Serverless instances.

    + * + * @param models The {@linkplain ClientNamespacedWriteModel individual write operations}. + * @return The {@link Publisher} signalling at most one element {@link ClientBulkWriteResult} if the operation is successful, + * or the following errors: + *
      + *
    • + * {@link ClientBulkWriteException} - If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * {@link ClientBulkWriteException#getWriteConcernErrors()}, {@link ClientBulkWriteException#getWriteErrors()}, + * {@link ClientBulkWriteException#getPartialResult()}.
    • + *
    • + * {@link MongoException} - Only if the operation is unsuccessful.
    • + *
    + * @since 5.3 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual reference/command/bulkWrite/ bulkWrite + */ + Publisher bulkWrite(List models); + + /** + * Executes a client-level bulk write operation. + *

    + * This operation supports {@linkplain MongoClientSettings#getRetryWrites() retryable writes}. + * Depending on the number of {@code models}, encoded size of {@code models}, and the size limits in effect, + * executing this operation may require multiple {@code bulkWrite} commands. + * The eligibility for retries is determined per each {@code bulkWrite} command: + * {@link ClientNamespacedUpdateManyModel}, {@link ClientNamespacedDeleteManyModel} in a command render it non-retryable.

    + *

    + * This operation is not supported by MongoDB Atlas Serverless instances.

    + * + * @param models The {@linkplain ClientNamespacedWriteModel individual write operations}. + * @param options The options. + * @return The {@link Publisher} signalling at most one element {@link ClientBulkWriteResult} if the operation is successful, + * or the following errors: + *
      + *
    • + * {@link ClientBulkWriteException} - If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * {@link ClientBulkWriteException#getWriteConcernErrors()}, {@link ClientBulkWriteException#getWriteErrors()}, + * {@link ClientBulkWriteException#getPartialResult()}.
    • + *
    • + * {@link MongoException} - Only if the operation is unsuccessful.
    • + *
    + * @since 5.3 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual reference/command/bulkWrite/ bulkWrite + */ + Publisher bulkWrite( + List models, + ClientBulkWriteOptions options); + + /** + * Executes a client-level bulk write operation. + * This method is functionally equivalent to {@link #bulkWrite(ClientSession, List, ClientBulkWriteOptions)} + * with the {@linkplain ClientBulkWriteOptions#clientBulkWriteOptions() default options}. + *

    + * This operation supports {@linkplain MongoClientSettings#getRetryWrites() retryable writes}. + * Depending on the number of {@code models}, encoded size of {@code models}, and the size limits in effect, + * executing this operation may require multiple {@code bulkWrite} commands. + * The eligibility for retries is determined per each {@code bulkWrite} command: + * {@link ClientNamespacedUpdateManyModel}, {@link ClientNamespacedDeleteManyModel} in a command render it non-retryable.

    + *

    + * This operation is not supported by MongoDB Atlas Serverless instances.

    + * + * @param clientSession The {@linkplain ClientSession client session} with which to associate this operation. + * @param models The {@linkplain ClientNamespacedWriteModel individual write operations}. + * @return The {@link Publisher} signalling at most one element {@link ClientBulkWriteResult} if the operation is successful, + * or the following errors: + *
      + *
    • + * {@link ClientBulkWriteException} - If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * {@link ClientBulkWriteException#getWriteConcernErrors()}, {@link ClientBulkWriteException#getWriteErrors()}, + * {@link ClientBulkWriteException#getPartialResult()}.
    • + *
    • + * {@link MongoException} - Only if the operation is unsuccessful.
    • + *
    + * @since 5.3 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual reference/command/bulkWrite/ bulkWrite + */ + Publisher bulkWrite( + ClientSession clientSession, + List models); + + /** + * Executes a client-level bulk write operation. + *

    + * This operation supports {@linkplain MongoClientSettings#getRetryWrites() retryable writes}. + * Depending on the number of {@code models}, encoded size of {@code models}, and the size limits in effect, + * executing this operation may require multiple {@code bulkWrite} commands. + * The eligibility for retries is determined per each {@code bulkWrite} command: + * {@link ClientNamespacedUpdateManyModel}, {@link ClientNamespacedDeleteManyModel} in a command render it non-retryable.

    + *

    + * This operation is not supported by MongoDB Atlas Serverless instances.

    + * + * @param clientSession The {@linkplain ClientSession client session} with which to associate this operation. + * @param models The {@linkplain ClientNamespacedWriteModel individual write operations}. + * @param options The options. + * @return The {@link Publisher} signalling at most one element {@link ClientBulkWriteResult} if the operation is successful, + * or the following errors: + *
      + *
    • + * {@link ClientBulkWriteException} - If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * {@link ClientBulkWriteException#getWriteConcernErrors()}, {@link ClientBulkWriteException#getWriteErrors()}, + * {@link ClientBulkWriteException#getPartialResult()}.
    • + *
    • + * {@link MongoException} - Only if the operation is unsuccessful.
    • + *
    + * @since 5.3 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual reference/command/bulkWrite/ bulkWrite + */ + Publisher bulkWrite( + ClientSession clientSession, + List models, + ClientBulkWriteOptions options); } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java index 27a0c9195c3..3d4822eb7e3 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java @@ -24,6 +24,9 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; import com.mongodb.WriteConcern; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; import com.mongodb.connection.ClusterDescription; import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.connection.Cluster; @@ -229,6 +232,30 @@ public ChangeStreamPublisher watch( return delegate.watch(clientSession, pipeline, resultClass); } + @Override + public Publisher bulkWrite(final List models) { + return delegate.bulkWrite(models); + } + + @Override + public Publisher bulkWrite(final List models, + final ClientBulkWriteOptions options) { + return delegate.bulkWrite(models, options); + } + + @Override + public Publisher bulkWrite(final ClientSession clientSession, + final List models) { + return delegate.bulkWrite(clientSession, models); + } + + @Override + public Publisher bulkWrite(final ClientSession clientSession, + final List models, + final ClientBulkWriteOptions options) { + return delegate.bulkWrite(clientSession, models, options); + } + @Override public Publisher startSession() { return delegate.startSession(); diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClusterImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClusterImpl.java index 72bcf53e303..04028ecc684 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClusterImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClusterImpl.java @@ -20,6 +20,9 @@ import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; import com.mongodb.WriteConcern; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; import com.mongodb.internal.connection.Cluster; @@ -42,6 +45,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; import static java.util.concurrent.TimeUnit.MILLISECONDS; @@ -237,4 +241,40 @@ public ChangeStreamPublisher watch(final ClientSession clientSession, fin resultClass, pipeline, ChangeStreamLevel.CLIENT); } + @Override + public Publisher bulkWrite(final List clientWriteModels) { + notNull("clientWriteModels", clientWriteModels); + isTrueArgument("`clientWriteModels` must not be empty", !clientWriteModels.isEmpty()); + return mongoOperationPublisher.clientBulkWrite(null, clientWriteModels, null); + } + + @Override + public Publisher bulkWrite(final List clientWriteModels, + final ClientBulkWriteOptions options) { + notNull("clientWriteModels", clientWriteModels); + isTrueArgument("`clientWriteModels` must not be empty", !clientWriteModels.isEmpty()); + notNull("options", options); + return mongoOperationPublisher.clientBulkWrite(null, clientWriteModels, options); + } + + @Override + public Publisher bulkWrite(final ClientSession clientSession, + final List clientWriteModels) { + notNull("clientSession", clientSession); + notNull("clientWriteModels", clientWriteModels); + isTrueArgument("`clientWriteModels` must not be empty", !clientWriteModels.isEmpty()); + return mongoOperationPublisher.clientBulkWrite(clientSession, clientWriteModels, null); + } + + @Override + public Publisher bulkWrite(final ClientSession clientSession, + final List clientWriteModels, + final ClientBulkWriteOptions options) { + notNull("clientSession", clientSession); + notNull("clientWriteModels", clientWriteModels); + isTrueArgument("`clientWriteModels` must not be empty", !clientWriteModels.isEmpty()); + notNull("options", options); + return mongoOperationPublisher.clientBulkWrite(clientSession, clientWriteModels, options); + } + } diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java index 5ccea518cb5..58030f75fa9 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java @@ -50,6 +50,9 @@ import com.mongodb.client.model.SearchIndexModel; import com.mongodb.client.model.UpdateOptions; import com.mongodb.client.model.WriteModel; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; import com.mongodb.client.result.DeleteResult; import com.mongodb.client.result.InsertManyResult; import com.mongodb.client.result.InsertOneResult; @@ -80,6 +83,7 @@ import java.util.function.Function; import java.util.function.Supplier; +import static com.mongodb.assertions.Assertions.isTrue; import static com.mongodb.assertions.Assertions.notNull; import static java.util.Collections.singletonList; import static org.bson.codecs.configuration.CodecRegistries.withUuidRepresentation; @@ -91,6 +95,7 @@ public final class MongoOperationPublisher { private final AsyncOperations operations; private final UuidRepresentation uuidRepresentation; + @Nullable private final AutoEncryptionSettings autoEncryptionSettings; private final OperationExecutor executor; @@ -289,6 +294,16 @@ Publisher bulkWrite( () -> operations.bulkWrite(notNull("requests", requests), notNull("options", options)), clientSession); } + Publisher clientBulkWrite( + @Nullable final ClientSession clientSession, + final List clientWriteModels, + @Nullable final ClientBulkWriteOptions options) { + isTrue("`autoEncryptionSettings` is null, as bulkWrite does not currently support automatic encryption", autoEncryptionSettings == null); + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.clientBulkWriteOperation(clientWriteModels, options), clientSession); + } + Publisher insertOne(@Nullable final ClientSession clientSession, final T document, final InsertOneOptions options) { return createSingleWriteRequestMono(() -> operations.insertOne(notNull("document", document), notNull("options", options)), diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutorImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutorImpl.java index 1c89ab81d34..0a4b0318d1c 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutorImpl.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutorImpl.java @@ -33,6 +33,7 @@ import com.mongodb.internal.connection.ReadConcernAwareNoOpSessionContext; import com.mongodb.internal.operation.AsyncReadOperation; import com.mongodb.internal.operation.AsyncWriteOperation; +import com.mongodb.internal.operation.OperationHelper; import com.mongodb.lang.Nullable; import com.mongodb.reactivestreams.client.ClientSession; import com.mongodb.reactivestreams.client.ReactiveContextProvider; @@ -96,8 +97,9 @@ public Mono execute(final AsyncReadOperation operation, final ReadPref sinkToCallback(sink).onResult(result, t); } })).doOnError((t) -> { - labelException(session, t); - unpinServerAddressOnTransientTransactionError(session, t); + Throwable exceptionToHandle = t instanceof MongoException ? OperationHelper.unwrap((MongoException) t) : t; + labelException(session, exceptionToHandle); + unpinServerAddressOnTransientTransactionError(session, exceptionToHandle); }); } }).subscribe(subscriber) @@ -126,8 +128,9 @@ public Mono execute(final AsyncWriteOperation operation, final ReadCon sinkToCallback(sink).onResult(result, t); } })).doOnError((t) -> { - labelException(session, t); - unpinServerAddressOnTransientTransactionError(session, t); + Throwable exceptionToHandle = t instanceof MongoException ? OperationHelper.unwrap((MongoException) t) : t; + labelException(session, exceptionToHandle); + unpinServerAddressOnTransientTransactionError(session, exceptionToHandle); }) ).subscribe(subscriber) ); diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptConnection.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptConnection.java index f7466c14828..23c6a060749 100644 --- a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptConnection.java +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptConnection.java @@ -23,6 +23,8 @@ import com.mongodb.internal.connection.AsyncConnection; import com.mongodb.internal.connection.Connection; import com.mongodb.internal.connection.MessageSettings; +import com.mongodb.internal.connection.MessageSequences; +import com.mongodb.internal.connection.MessageSequences.EmptyMessageSequences; import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.connection.SplittablePayload; import com.mongodb.internal.connection.SplittablePayloadBsonWriter; @@ -51,6 +53,7 @@ import java.util.Map; import java.util.function.Function; +import static com.mongodb.assertions.Assertions.fail; import static com.mongodb.internal.operation.ServerVersionHelper.serverIsLessThanVersionFourDotTwo; import static com.mongodb.reactivestreams.client.internal.MongoOperationPublisher.sinkToCallback; import static org.bson.codecs.configuration.CodecRegistries.fromProviders; @@ -93,14 +96,13 @@ public void commandAsync(final String database, final BsonDocument command, @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext, final SingleResultCallback callback) { commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder, - operationContext, true, null, null, callback); + operationContext, true, EmptyMessageSequences.INSTANCE, callback); } @Override public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, - final OperationContext operationContext, final boolean responseExpected, - @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator, + final OperationContext operationContext, final boolean responseExpected, final MessageSequences sequences, final SingleResultCallback callback) { if (serverIsLessThanVersionFourDotTwo(wrapped.getDescription())) { @@ -109,6 +111,14 @@ public void commandAsync(final String database, final BsonDocument command, } try { + SplittablePayload payload = null; + FieldNameValidator payloadFieldNameValidator = null; + if (sequences instanceof SplittablePayload) { + payload = (SplittablePayload) sequences; + payloadFieldNameValidator = payload.getFieldNameValidator(); + } else if (!(sequences instanceof EmptyMessageSequences)) { + fail(sequences.toString()); + } BasicOutputBuffer bsonOutput = new BasicOutputBuffer(); BsonBinaryWriter bsonBinaryWriter = new BsonBinaryWriter( new BsonWriterSettings(), new BsonBinaryWriterSettings(getDescription().getMaxDocumentSize()), @@ -124,7 +134,7 @@ public void commandAsync(final String database, final BsonDocument command, crypt.encrypt(database, new RawBsonDocument(bsonOutput.getInternalBuffer(), 0, bsonOutput.getSize()), operationTimeout) .flatMap((Function>) encryptedCommand -> Mono.create(sink -> wrapped.commandAsync(database, encryptedCommand, commandFieldNameValidator, readPreference, - new RawBsonDocumentCodec(), operationContext, responseExpected, null, null, sinkToCallback(sink)))) + new RawBsonDocumentCodec(), operationContext, responseExpected, EmptyMessageSequences.INSTANCE, sinkToCallback(sink)))) .flatMap(rawBsonDocument -> crypt.decrypt(rawBsonDocument, operationTimeout)) .map(decryptedResponse -> commandResultDecoder.decode(new BsonBinaryReader(decryptedResponse.getByteBuffer().asNIO()), diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/CrudProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/CrudProseTest.java new file mode 100644 index 00000000000..81d88e6fdb0 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/CrudProseTest.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.reactivestreams.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; + +/** + * See + * CRUD Prose Tests. + */ +final class CrudProseTest extends com.mongodb.client.CrudProseTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings.Builder mongoClientSettingsBuilder) { + return new SyncMongoClient(MongoClients.create(mongoClientSettingsBuilder.build())); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoClient.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoClient.java index ceb5ea72769..3f2265cb795 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoClient.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoClient.java @@ -16,6 +16,7 @@ package com.mongodb.reactivestreams.client.syncadapter; +import com.mongodb.ClientBulkWriteException; import com.mongodb.ClientSessionOptions; import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; @@ -27,6 +28,9 @@ import com.mongodb.client.MongoCluster; import com.mongodb.client.MongoDatabase; import com.mongodb.client.MongoIterable; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; import com.mongodb.connection.ClusterDescription; import com.mongodb.reactivestreams.client.internal.BatchCursor; import org.bson.Document; @@ -274,6 +278,34 @@ public void close() { } + @Override + public ClientBulkWriteResult bulkWrite( + final List clientWriteModels) throws ClientBulkWriteException { + return delegate.bulkWrite(clientWriteModels); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final List clientWriteModels, + final ClientBulkWriteOptions options) throws ClientBulkWriteException { + return delegate.bulkWrite(clientWriteModels, options); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final ClientSession clientSession, + final List clientWriteModels) throws ClientBulkWriteException { + return delegate.bulkWrite(clientSession, clientWriteModels); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final ClientSession clientSession, + final List clientWriteModels, + final ClientBulkWriteOptions options) throws ClientBulkWriteException { + return delegate.bulkWrite(clientSession, clientWriteModels, options); + } + @Override public ClusterDescription getClusterDescription() { return wrapped.getClusterDescription(); diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCluster.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCluster.java index 780f7260eb4..fc3cad4b6a7 100644 --- a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCluster.java +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCluster.java @@ -16,6 +16,7 @@ package com.mongodb.reactivestreams.client.syncadapter; +import com.mongodb.ClientBulkWriteException; import com.mongodb.ClientSessionOptions; import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; @@ -26,6 +27,9 @@ import com.mongodb.client.MongoCluster; import com.mongodb.client.MongoDatabase; import com.mongodb.client.MongoIterable; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; import org.bson.BsonDocument; import org.bson.Document; import org.bson.codecs.configuration.CodecRegistry; @@ -278,6 +282,36 @@ public ChangeStreamIterable watch(final ClientSession clientS return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), pipeline, resultClass)); } + @Override + public ClientBulkWriteResult bulkWrite( + final List clientWriteModels) throws ClientBulkWriteException { + return requireNonNull(Mono.from(wrapped.bulkWrite(clientWriteModels)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final List clientWriteModels, + final ClientBulkWriteOptions options) throws ClientBulkWriteException { + return requireNonNull(Mono.from(wrapped.bulkWrite(clientWriteModels, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final ClientSession clientSession, + final List clientWriteModels) throws ClientBulkWriteException { + return requireNonNull( + Mono.from(wrapped.bulkWrite(unwrap(clientSession), clientWriteModels)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final ClientSession clientSession, + final List clientWriteModels, + final ClientBulkWriteOptions options) throws ClientBulkWriteException { + return requireNonNull(Mono.from(wrapped.bulkWrite(unwrap(clientSession), clientWriteModels, options)).contextWrite(CONTEXT) + .block(TIMEOUT_DURATION)); + } + private com.mongodb.reactivestreams.client.ClientSession unwrap(final ClientSession clientSession) { return ((SyncClientSession) clientSession).getWrapped(); } diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java index 42d6bb14c5c..1c096748c11 100644 --- a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java @@ -31,6 +31,7 @@ import java.util.concurrent.TimeUnit; +import static com.mongodb.ClusterFixture.TIMEOUT; import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_TIMEOUT; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -113,7 +114,7 @@ public void withReadPreference() { @Test public void withTimeout() { - assertEquals(DEFAULT_MOP, DEFAULT_MOP.withTimeout(60_000, TimeUnit.MILLISECONDS)); + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withTimeout(TIMEOUT, TimeUnit.SECONDS)); assertEquals(1000, DEFAULT_MOP.withTimeout(1000, TimeUnit.MILLISECONDS).getTimeoutMS()); assertThrows(IllegalArgumentException.class, () -> DEFAULT_MOP.withTimeout(500, TimeUnit.NANOSECONDS)); } diff --git a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCluster.scala b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCluster.scala index 3871aded144..972831f197f 100644 --- a/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCluster.scala +++ b/driver-scala/src/integration/scala/org/mongodb/scala/syncadapter/SyncMongoCluster.scala @@ -1,5 +1,7 @@ package org.mongodb.scala.syncadapter +import com.mongodb.assertions.Assertions +import com.mongodb.client.model.bulk.{ ClientBulkWriteOptions, ClientBulkWriteResult, ClientNamespacedWriteModel } import com.mongodb.{ ClientSessionOptions, ReadConcern, ReadPreference, WriteConcern } import com.mongodb.client.{ ClientSession, MongoCluster => JMongoCluster, MongoDatabase => JMongoDatabase } import org.bson.Document @@ -8,6 +10,7 @@ import org.bson.conversions.Bson import org.mongodb.scala.MongoCluster import org.mongodb.scala.bson.DefaultHelper.DefaultsTo +import java.util import java.util.concurrent.TimeUnit import scala.collection.JavaConverters._ import scala.concurrent.Await @@ -123,4 +126,36 @@ class SyncMongoCluster(wrapped: MongoCluster) extends JMongoCluster { private def unwrap(clientSession: ClientSession): org.mongodb.scala.ClientSession = clientSession.asInstanceOf[SyncClientSession].wrapped + + override def bulkWrite( + models: util.List[_ <: ClientNamespacedWriteModel] + ): ClientBulkWriteResult = { + org.junit.Assume.assumeTrue("TODO-JAVA-5531 implement", java.lang.Boolean.parseBoolean(toString)) + throw Assertions.fail("TODO-JAVA-5531 implement") + } + + override def bulkWrite( + models: util.List[_ <: ClientNamespacedWriteModel], + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = { + org.junit.Assume.assumeTrue("TODO-JAVA-5531 implement", java.lang.Boolean.parseBoolean(toString)) + throw Assertions.fail("TODO-JAVA-5531 implement") + } + + override def bulkWrite( + clientSession: ClientSession, + models: util.List[_ <: ClientNamespacedWriteModel] + ): ClientBulkWriteResult = { + org.junit.Assume.assumeTrue("TODO-JAVA-5531 implement", java.lang.Boolean.parseBoolean(toString)) + throw Assertions.fail("TODO-JAVA-5531 implement") + } + + override def bulkWrite( + clientSession: ClientSession, + models: util.List[_ <: ClientNamespacedWriteModel], + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = { + org.junit.Assume.assumeTrue("TODO-JAVA-5531 implement", java.lang.Boolean.parseBoolean(toString)) + throw Assertions.fail("TODO-JAVA-5531 implement") + } } diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ApiAliasAndCompanionSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ApiAliasAndCompanionSpec.scala index b22d0d8373d..5b8e46c598d 100644 --- a/driver-scala/src/test/scala/org/mongodb/scala/ApiAliasAndCompanionSpec.scala +++ b/driver-scala/src/test/scala/org/mongodb/scala/ApiAliasAndCompanionSpec.scala @@ -90,7 +90,10 @@ class ApiAliasAndCompanionSpec extends BaseSpec { "SyncMongoCluster", "SyncGridFSBucket", "SyncMongoDatabase", - "SyncClientEncryption" + "SyncClientEncryption", + "BaseClientUpdateOptions", + "BaseClientDeleteOptions", + "MongoBaseInterfaceAssertions" ) val scalaExclusions = Set( "BuildInfo", @@ -150,7 +153,9 @@ class ApiAliasAndCompanionSpec extends BaseSpec { .asScala .map(_.getSimpleName) .toSet + - "MongoException" - "MongoGridFSException" - "MongoConfigurationException" - "MongoWriteConcernWithResponseException" + "MongoException" - "MongoGridFSException" - "MongoConfigurationException" - "MongoWriteConcernWithResponseException" - + // TODO-JAVA-5531 remove the `"ClientBulkWriteException"` exclusion + "ClientBulkWriteException" val objects = new Reflections( new ConfigurationBuilder() diff --git a/driver-scala/src/test/scala/org/mongodb/scala/MongoClientSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/MongoClientSpec.scala index 4c721ed8774..4e0189bfd5e 100644 --- a/driver-scala/src/test/scala/org/mongodb/scala/MongoClientSpec.scala +++ b/driver-scala/src/test/scala/org/mongodb/scala/MongoClientSpec.scala @@ -35,7 +35,12 @@ class MongoClientSpec extends BaseSpec with MockitoSugar { wrapped.foreach((name: String) => { val cleanedName = name.stripPrefix("get") - assert(local.contains(name) | local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + + if (!cleanedName.contains("bulkWrite")) { + // TODO-JAVA-5531 remove this whole `if` block + assert(local.contains(name) | local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + } + // TODO-JAVA-5531 uncomment: assert(local.contains(name) | local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") }) } diff --git a/driver-sync/src/main/com/mongodb/client/MongoCluster.java b/driver-sync/src/main/com/mongodb/client/MongoCluster.java index f901845333b..f097f71288f 100644 --- a/driver-sync/src/main/com/mongodb/client/MongoCluster.java +++ b/driver-sync/src/main/com/mongodb/client/MongoCluster.java @@ -16,7 +16,10 @@ package com.mongodb.client; +import com.mongodb.ClientBulkWriteException; import com.mongodb.ClientSessionOptions; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoException; import com.mongodb.MongoNamespace; import com.mongodb.ReadConcern; import com.mongodb.ReadPreference; @@ -24,6 +27,11 @@ import com.mongodb.annotations.Alpha; import com.mongodb.annotations.Immutable; import com.mongodb.annotations.Reason; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientNamespacedDeleteManyModel; +import com.mongodb.client.model.bulk.ClientNamespacedUpdateManyModel; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; import com.mongodb.lang.Nullable; import org.bson.Document; import org.bson.codecs.configuration.CodecRegistry; @@ -352,4 +360,115 @@ public interface MongoCluster { * @mongodb.driver.dochub core/changestreams Change Streams */ ChangeStreamIterable watch(ClientSession clientSession, List pipeline, Class resultClass); + + /** + * Executes a client-level bulk write operation. + * This method is functionally equivalent to {@link #bulkWrite(List, ClientBulkWriteOptions)} + * with the {@linkplain ClientBulkWriteOptions#clientBulkWriteOptions() default options}. + *

    + * This operation supports {@linkplain MongoClientSettings#getRetryWrites() retryable writes}. + * Depending on the number of {@code models}, encoded size of {@code models}, and the size limits in effect, + * executing this operation may require multiple {@code bulkWrite} commands. + * The eligibility for retries is determined per each {@code bulkWrite} command: + * {@link ClientNamespacedUpdateManyModel}, {@link ClientNamespacedDeleteManyModel} in a command render it non-retryable.

    + *

    + * This operation is not supported by MongoDB Atlas Serverless instances.

    + * + * @param models The {@linkplain ClientNamespacedWriteModel individual write operations}. + * @return The {@link ClientBulkWriteResult} if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * {@link ClientBulkWriteException#getWriteConcernErrors()}, {@link ClientBulkWriteException#getWriteErrors()}, + * {@link ClientBulkWriteException#getPartialResult()}. + * @throws MongoException Only if the operation is unsuccessful. + * @since 5.3 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual reference/command/bulkWrite/ bulkWrite + */ + ClientBulkWriteResult bulkWrite(List models) throws ClientBulkWriteException; + + /** + * Executes a client-level bulk write operation. + *

    + * This operation supports {@linkplain MongoClientSettings#getRetryWrites() retryable writes}. + * Depending on the number of {@code models}, encoded size of {@code models}, and the size limits in effect, + * executing this operation may require multiple {@code bulkWrite} commands. + * The eligibility for retries is determined per each {@code bulkWrite} command: + * {@link ClientNamespacedUpdateManyModel}, {@link ClientNamespacedDeleteManyModel} in a command render it non-retryable.

    + *

    + * This operation is not supported by MongoDB Atlas Serverless instances.

    + * + * @param models The {@linkplain ClientNamespacedWriteModel individual write operations}. + * @param options The options. + * @return The {@link ClientBulkWriteResult} if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * {@link ClientBulkWriteException#getWriteConcernErrors()}, {@link ClientBulkWriteException#getWriteErrors()}, + * {@link ClientBulkWriteException#getPartialResult()}. + * @throws MongoException Only if the operation is unsuccessful. + * @since 5.3 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual reference/command/bulkWrite/ bulkWrite + */ + ClientBulkWriteResult bulkWrite( + List models, + ClientBulkWriteOptions options) throws ClientBulkWriteException; + + /** + * Executes a client-level bulk write operation. + * This method is functionally equivalent to {@link #bulkWrite(ClientSession, List, ClientBulkWriteOptions)} + * with the {@linkplain ClientBulkWriteOptions#clientBulkWriteOptions() default options}. + *

    + * This operation supports {@linkplain MongoClientSettings#getRetryWrites() retryable writes}. + * Depending on the number of {@code models}, encoded size of {@code models}, and the size limits in effect, + * executing this operation may require multiple {@code bulkWrite} commands. + * The eligibility for retries is determined per each {@code bulkWrite} command: + * {@link ClientNamespacedUpdateManyModel}, {@link ClientNamespacedDeleteManyModel} in a command render it non-retryable.

    + *

    + * This operation is not supported by MongoDB Atlas Serverless instances.

    + * + * @param clientSession The {@linkplain ClientSession client session} with which to associate this operation. + * @param models The {@linkplain ClientNamespacedWriteModel individual write operations}. + * @return The {@link ClientBulkWriteResult} if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * {@link ClientBulkWriteException#getWriteConcernErrors()}, {@link ClientBulkWriteException#getWriteErrors()}, + * {@link ClientBulkWriteException#getPartialResult()}. + * @throws MongoException Only if the operation is unsuccessful. + * @since 5.3 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual reference/command/bulkWrite/ bulkWrite + */ + ClientBulkWriteResult bulkWrite( + ClientSession clientSession, + List models) throws ClientBulkWriteException; + + /** + * Executes a client-level bulk write operation. + *

    + * This operation supports {@linkplain MongoClientSettings#getRetryWrites() retryable writes}. + * Depending on the number of {@code models}, encoded size of {@code models}, and the size limits in effect, + * executing this operation may require multiple {@code bulkWrite} commands. + * The eligibility for retries is determined per each {@code bulkWrite} command: + * {@link ClientNamespacedUpdateManyModel}, {@link ClientNamespacedDeleteManyModel} in a command render it non-retryable.

    + *

    + * This operation is not supported by MongoDB Atlas Serverless instances.

    + * + * @param clientSession The {@linkplain ClientSession client session} with which to associate this operation. + * @param models The {@linkplain ClientNamespacedWriteModel individual write operations}. + * @param options The options. + * @return The {@link ClientBulkWriteResult} if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * {@link ClientBulkWriteException#getWriteConcernErrors()}, {@link ClientBulkWriteException#getWriteErrors()}, + * {@link ClientBulkWriteException#getPartialResult()}. + * @throws MongoException Only if the operation is unsuccessful. + * @since 5.3 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual reference/command/bulkWrite/ bulkWrite + */ + ClientBulkWriteResult bulkWrite( + ClientSession clientSession, + List models, + ClientBulkWriteOptions options) throws ClientBulkWriteException; } diff --git a/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java index d3bbd850ae0..b60fc90316a 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java @@ -30,6 +30,7 @@ import com.mongodb.internal.TimeoutContext; import com.mongodb.internal.operation.AbortTransactionOperation; import com.mongodb.internal.operation.CommitTransactionOperation; +import com.mongodb.internal.operation.OperationHelper; import com.mongodb.internal.operation.ReadOperation; import com.mongodb.internal.operation.WriteConcernHelper; import com.mongodb.internal.operation.WriteOperation; @@ -241,7 +242,8 @@ public T withTransaction(final TransactionBody transactionBody, final Tra abortTransaction(); } if (e instanceof MongoException && !(e instanceof MongoOperationTimeoutException)) { - if (((MongoException) e).hasErrorLabel(TRANSIENT_TRANSACTION_ERROR_LABEL) + MongoException exceptionToHandle = OperationHelper.unwrap((MongoException) e); + if (exceptionToHandle.hasErrorLabel(TRANSIENT_TRANSACTION_ERROR_LABEL) && ClientSessionClock.INSTANCE.now() - startTime < MAX_RETRY_TIME_LIMIT_MS) { continue; } diff --git a/driver-sync/src/main/com/mongodb/client/internal/CryptConnection.java b/driver-sync/src/main/com/mongodb/client/internal/CryptConnection.java index f47f6a810a6..a62aa68783e 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/CryptConnection.java +++ b/driver-sync/src/main/com/mongodb/client/internal/CryptConnection.java @@ -21,6 +21,8 @@ import com.mongodb.connection.ConnectionDescription; import com.mongodb.internal.connection.Connection; import com.mongodb.internal.connection.MessageSettings; +import com.mongodb.internal.connection.MessageSequences; +import com.mongodb.internal.connection.MessageSequences.EmptyMessageSequences; import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.connection.SplittablePayload; import com.mongodb.internal.connection.SplittablePayloadBsonWriter; @@ -47,10 +49,14 @@ import java.util.HashMap; import java.util.Map; +import static com.mongodb.assertions.Assertions.fail; import static com.mongodb.internal.operation.ServerVersionHelper.serverIsLessThanVersionFourDotTwo; import static org.bson.codecs.configuration.CodecRegistries.fromProviders; -class CryptConnection implements Connection { +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class CryptConnection implements Connection { private static final CodecRegistry REGISTRY = fromProviders(new BsonValueCodecProvider()); private static final int MAX_SPLITTABLE_DOCUMENT_SIZE = 2097152; @@ -87,13 +93,20 @@ public ConnectionDescription getDescription() { @Override public T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, - final OperationContext operationContext, final boolean responseExpected, - @Nullable final SplittablePayload payload, @Nullable final FieldNameValidator payloadFieldNameValidator) { + final OperationContext operationContext, final boolean responseExpected, final MessageSequences sequences) { if (serverIsLessThanVersionFourDotTwo(wrapped.getDescription())) { throw new MongoClientException("Auto-encryption requires a minimum MongoDB version of 4.2"); } + SplittablePayload payload = null; + FieldNameValidator payloadFieldNameValidator = null; + if (sequences instanceof SplittablePayload) { + payload = (SplittablePayload) sequences; + payloadFieldNameValidator = payload.getFieldNameValidator(); + } else if (!(sequences instanceof EmptyMessageSequences)) { + fail(sequences.toString()); + } BasicOutputBuffer bsonOutput = new BasicOutputBuffer(); BsonBinaryWriter bsonBinaryWriter = new BsonBinaryWriter(new BsonWriterSettings(), new BsonBinaryWriterSettings(getDescription().getMaxDocumentSize()), @@ -110,7 +123,7 @@ public T command(final String database, final BsonDocument command, final Fi new RawBsonDocument(bsonOutput.getInternalBuffer(), 0, bsonOutput.getSize()), operationTimeout); RawBsonDocument encryptedResponse = wrapped.command(database, encryptedCommand, commandFieldNameValidator, readPreference, - new RawBsonDocumentCodec(), operationContext, responseExpected, null, null); + new RawBsonDocumentCodec(), operationContext, responseExpected, EmptyMessageSequences.INSTANCE); if (encryptedResponse == null) { return null; @@ -127,7 +140,7 @@ public T command(final String database, final BsonDocument command, final Fi @Override public T command(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext) { - return command(database, command, fieldNameValidator, readPreference, commandResultDecoder, operationContext, true, null, null); + return command(database, command, fieldNameValidator, readPreference, commandResultDecoder, operationContext, true, EmptyMessageSequences.INSTANCE); } @SuppressWarnings("unchecked") diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoClientImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoClientImpl.java index d7ee2ff64ca..17b132bc978 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/MongoClientImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoClientImpl.java @@ -17,6 +17,7 @@ package com.mongodb.client.internal; import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientBulkWriteException; import com.mongodb.ClientSessionOptions; import com.mongodb.MongoClientSettings; import com.mongodb.MongoDriverInformation; @@ -31,6 +32,9 @@ import com.mongodb.client.MongoDatabase; import com.mongodb.client.MongoIterable; import com.mongodb.client.SynchronousContextProvider; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; import com.mongodb.connection.ClusterDescription; import com.mongodb.connection.SocketSettings; import com.mongodb.internal.TimeoutSettings; @@ -254,6 +258,34 @@ public ChangeStreamIterable watch( return delegate.watch(clientSession, pipeline, resultClass); } + @Override + public ClientBulkWriteResult bulkWrite( + final List clientWriteModels) throws ClientBulkWriteException { + return delegate.bulkWrite(clientWriteModels); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final List clientWriteModels, + final ClientBulkWriteOptions options) throws ClientBulkWriteException { + return delegate.bulkWrite(clientWriteModels, options); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final ClientSession clientSession, + final List clientWriteModels) throws ClientBulkWriteException { + return delegate.bulkWrite(clientSession, clientWriteModels); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final ClientSession clientSession, + final List clientWriteModels, + final ClientBulkWriteOptions options) throws ClientBulkWriteException { + return delegate.bulkWrite(clientSession, clientWriteModels, options); + } + private static Cluster createCluster(final MongoClientSettings settings, @Nullable final MongoDriverInformation mongoDriverInformation) { notNull("settings", settings); diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoClusterImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoClusterImpl.java index b3d03095070..9c0033e42a7 100644 --- a/driver-sync/src/main/com/mongodb/client/internal/MongoClusterImpl.java +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoClusterImpl.java @@ -17,6 +17,7 @@ package com.mongodb.client.internal; import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientBulkWriteException; import com.mongodb.ClientSessionOptions; import com.mongodb.MongoClientException; import com.mongodb.MongoException; @@ -37,6 +38,9 @@ import com.mongodb.client.MongoDatabase; import com.mongodb.client.MongoIterable; import com.mongodb.client.SynchronousContextProvider; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; import com.mongodb.internal.IgnorableRequestContext; import com.mongodb.internal.TimeoutSettings; import com.mongodb.internal.binding.ClusterAwareReadWriteBinding; @@ -48,7 +52,9 @@ import com.mongodb.internal.connection.Cluster; import com.mongodb.internal.connection.OperationContext; import com.mongodb.internal.connection.ReadConcernAwareNoOpSessionContext; +import com.mongodb.internal.operation.OperationHelper; import com.mongodb.internal.operation.ReadOperation; +import com.mongodb.internal.operation.SyncOperations; import com.mongodb.internal.operation.WriteOperation; import com.mongodb.internal.session.ServerSessionPool; import com.mongodb.lang.Nullable; @@ -67,6 +73,7 @@ import static com.mongodb.MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL; import static com.mongodb.ReadPreference.primary; import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.isTrueArgument; import static com.mongodb.assertions.Assertions.notNull; import static com.mongodb.internal.TimeoutContext.createTimeoutContext; @@ -91,6 +98,7 @@ final class MongoClusterImpl implements MongoCluster { private final TimeoutSettings timeoutSettings; private final UuidRepresentation uuidRepresentation; private final WriteConcern writeConcern; + private final SyncOperations operations; MongoClusterImpl( @Nullable final AutoEncryptionSettings autoEncryptionSettings, final Cluster cluster, final CodecRegistry codecRegistry, @@ -115,6 +123,16 @@ final class MongoClusterImpl implements MongoCluster { this.timeoutSettings = timeoutSettings; this.uuidRepresentation = uuidRepresentation; this.writeConcern = writeConcern; + operations = new SyncOperations<>( + null, + BsonDocument.class, + readPreference, + codecRegistry, + readConcern, + writeConcern, + retryWrites, + retryReads, + timeoutSettings); } @Override @@ -307,6 +325,46 @@ public ChangeStreamIterable watch(final ClientSession clientS return createChangeStreamIterable(clientSession, pipeline, clazz); } + @Override + public ClientBulkWriteResult bulkWrite( + final List clientWriteModels) throws ClientBulkWriteException { + notNull("clientWriteModels", clientWriteModels); + isTrueArgument("`clientWriteModels` must not be empty", !clientWriteModels.isEmpty()); + return executeBulkWrite(null, clientWriteModels, null); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final List clientWriteModels, + final ClientBulkWriteOptions options) throws ClientBulkWriteException { + notNull("clientWriteModels", clientWriteModels); + isTrueArgument("`clientWriteModels` must not be empty", !clientWriteModels.isEmpty()); + notNull("options", options); + return executeBulkWrite(null, clientWriteModels, options); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final ClientSession clientSession, + final List clientWriteModels) throws ClientBulkWriteException { + notNull("clientSession", clientSession); + notNull("clientWriteModels", clientWriteModels); + isTrueArgument("`clientWriteModels` must not be empty", !clientWriteModels.isEmpty()); + return executeBulkWrite(clientSession, clientWriteModels, null); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final ClientSession clientSession, + final List clientWriteModels, + final ClientBulkWriteOptions options) throws ClientBulkWriteException { + notNull("clientSession", clientSession); + notNull("clientWriteModels", clientWriteModels); + isTrueArgument("`clientWriteModels` must not be empty", !clientWriteModels.isEmpty()); + notNull("options", options); + return executeBulkWrite(clientSession, clientWriteModels, options); + } + private ListDatabasesIterable createListDatabasesIterable(@Nullable final ClientSession clientSession, final Class clazz) { return new ListDatabasesIterableImpl<>(clientSession, clazz, codecRegistry, ReadPreference.primary(), operationExecutor, retryReads, timeoutSettings); } @@ -324,6 +382,14 @@ private ChangeStreamIterable createChangeStreamIterable(@Null retryReads, timeoutSettings); } + private ClientBulkWriteResult executeBulkWrite( + @Nullable final ClientSession clientSession, + final List clientWriteModels, + @Nullable final ClientBulkWriteOptions options) { + isTrue("`autoEncryptionSettings` is null, as bulkWrite does not currently support automatic encryption", autoEncryptionSettings == null); + return operationExecutor.execute(operations.clientBulkWriteOperation(clientWriteModels, options), readConcern, clientSession); + } + final class OperationExecutorImpl implements OperationExecutor { private final TimeoutSettings executorTimeoutSettings; @@ -357,8 +423,9 @@ public T execute(final ReadOperation operation, final ReadPreference read } return operation.execute(binding); } catch (MongoException e) { - labelException(actualClientSession, e); - clearTransactionContextOnTransientTransactionError(session, e); + MongoException exceptionToHandle = OperationHelper.unwrap(e); + labelException(actualClientSession, exceptionToHandle); + clearTransactionContextOnTransientTransactionError(session, exceptionToHandle); throw e; } finally { binding.release(); @@ -378,8 +445,9 @@ public T execute(final WriteOperation operation, final ReadConcern readCo try { return operation.execute(binding); } catch (MongoException e) { - labelException(actualClientSession, e); - clearTransactionContextOnTransientTransactionError(session, e); + MongoException exceptionToHandle = OperationHelper.unwrap(e); + labelException(actualClientSession, exceptionToHandle); + clearTransactionContextOnTransientTransactionError(session, exceptionToHandle); throw e; } finally { binding.release(); diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideOperationsTimeoutProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideOperationsTimeoutProseTest.java index 4806356f98b..8eb47aa0a6c 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideOperationsTimeoutProseTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideOperationsTimeoutProseTest.java @@ -16,12 +16,14 @@ package com.mongodb.client; +import com.mongodb.ClientBulkWriteException; import com.mongodb.ClientSessionOptions; import com.mongodb.ClusterFixture; import com.mongodb.ConnectionString; import com.mongodb.CursorType; import com.mongodb.MongoClientSettings; import com.mongodb.MongoCredential; +import com.mongodb.MongoException; import com.mongodb.MongoNamespace; import com.mongodb.MongoOperationTimeoutException; import com.mongodb.MongoSocketReadTimeoutException; @@ -34,6 +36,7 @@ import com.mongodb.client.gridfs.GridFSDownloadStream; import com.mongodb.client.gridfs.GridFSUploadStream; import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; import com.mongodb.client.model.changestream.ChangeStreamDocument; import com.mongodb.client.model.changestream.FullDocument; import com.mongodb.client.test.CollectionHelper; @@ -48,8 +51,11 @@ import com.mongodb.internal.connection.TestCommandListener; import com.mongodb.internal.connection.TestConnectionPoolListener; import com.mongodb.test.FlakyTest; +import org.bson.BsonArray; +import org.bson.BsonBoolean; import org.bson.BsonDocument; import org.bson.BsonInt32; +import org.bson.BsonString; import org.bson.BsonTimestamp; import org.bson.Document; import org.bson.codecs.BsonDocumentCodec; @@ -81,7 +87,9 @@ import static com.mongodb.ClusterFixture.sleep; import static com.mongodb.client.Fixture.getDefaultDatabaseName; import static com.mongodb.client.Fixture.getPrimary; +import static java.lang.String.join; import static java.util.Arrays.asList; +import static java.util.Collections.nCopies; import static java.util.Collections.singletonList; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -700,6 +708,40 @@ public void test10CustomTestWithTransactionUsesASingleTimeoutWithLock() { } } + @DisplayName("11. Multi-batch bulkWrites") + @Test + @SuppressWarnings("try") + protected void test11MultiBatchBulkWrites() throws InterruptedException { + assumeTrue(serverVersionAtLeast(8, 0)); + assumeFalse(isServerlessTest()); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder())) { + // a workaround for https://jira.mongodb.org/browse/DRIVERS-2997, remove this block when the aforementioned bug is fixed + client.getDatabase(namespace.getDatabaseName()).drop(); + } + BsonDocument failPointDocument = new BsonDocument("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument("times", new BsonInt32(2))) + .append("data", new BsonDocument("failCommands", new BsonArray(singletonList(new BsonString("bulkWrite")))) + .append("blockConnection", BsonBoolean.TRUE) + .append("blockTimeMS", new BsonInt32(2020))); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder().timeout(4000, TimeUnit.MILLISECONDS)); + FailPoint ignored = FailPoint.enable(failPointDocument, getPrimary())) { + MongoDatabase db = client.getDatabase(namespace.getDatabaseName()); + db.drop(); + Document helloResponse = db.runCommand(new Document("hello", 1)); + int maxBsonObjectSize = helloResponse.getInteger("maxBsonObjectSize"); + int maxMessageSizeBytes = helloResponse.getInteger("maxMessageSizeBytes"); + ClientNamespacedWriteModel model = ClientNamespacedWriteModel.insertOne( + namespace, + new Document("a", join("", nCopies(maxBsonObjectSize - 500, "b")))); + MongoException topLevelError = assertThrows(ClientBulkWriteException.class, () -> + client.bulkWrite(nCopies(maxMessageSizeBytes / maxBsonObjectSize + 1, model))) + .getCause(); + assertNotNull(topLevelError); + assertInstanceOf(MongoOperationTimeoutException.class, topLevelError); + assertEquals(2, commandListener.getCommandStartedEvents("bulkWrite").size()); + } + } + /** * Not a prose spec test. However, it is additional test case for better coverage. */ diff --git a/driver-sync/src/test/functional/com/mongodb/client/CrudProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/CrudProseTest.java index 5d3907bb210..7138cdfe67e 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/CrudProseTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/CrudProseTest.java @@ -16,181 +16,569 @@ package com.mongodb.client; +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientBulkWriteException; +import com.mongodb.Function; import com.mongodb.MongoBulkWriteException; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; import com.mongodb.MongoWriteConcernException; import com.mongodb.MongoWriteException; -import com.mongodb.ServerAddress; +import com.mongodb.WriteConcern; +import com.mongodb.assertions.Assertions; import com.mongodb.client.model.CreateCollectionOptions; import com.mongodb.client.model.Filters; +import com.mongodb.client.model.InsertOneModel; +import com.mongodb.client.model.Updates; import com.mongodb.client.model.ValidationOptions; -import com.mongodb.event.CommandListener; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; import com.mongodb.event.CommandStartedEvent; +import com.mongodb.internal.connection.TestCommandListener; import org.bson.BsonArray; import org.bson.BsonDocument; +import org.bson.BsonDocumentWrapper; import org.bson.BsonInt32; +import org.bson.BsonMaximumSizeExceededException; import org.bson.BsonString; import org.bson.BsonValue; import org.bson.Document; +import org.bson.RawBsonDocument; +import org.bson.codecs.configuration.CodecRegistry; import org.bson.codecs.pojo.PojoCodecProvider; -import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; +import org.opentest4j.AssertionFailedError; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; import java.util.concurrent.TimeUnit; +import java.util.function.BiFunction; +import java.util.function.Supplier; +import java.util.stream.Stream; import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.ClusterFixture.isServerlessTest; +import static com.mongodb.ClusterFixture.isStandalone; import static com.mongodb.ClusterFixture.serverVersionAtLeast; import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry; import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; -import static java.lang.String.format; +import static com.mongodb.client.Fixture.getPrimary; +import static com.mongodb.client.model.bulk.ClientBulkWriteOptions.clientBulkWriteOptions; +import static com.mongodb.client.model.bulk.ClientNamespacedWriteModel.insertOne; +import static com.mongodb.client.model.bulk.ClientUpdateOneOptions.clientUpdateOneOptions; +import static java.lang.String.join; import static java.util.Arrays.asList; +import static java.util.Collections.nCopies; import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; import static org.bson.codecs.configuration.CodecRegistries.fromProviders; import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; +import static org.junit.jupiter.api.Assertions.assertAll; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeFalse; import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static org.junit.jupiter.params.provider.Arguments.arguments; /** - * See https://github.com/mongodb/specifications/blob/master/source/crud/tests/README.rst#prose-tests + * See + * CRUD Prose Tests. */ -public class CrudProseTest extends DatabaseTestCase { - private BsonDocument failPointDocument; +public class CrudProseTest { + private static final MongoNamespace NAMESPACE = new MongoNamespace("db", "coll"); - @BeforeEach - @Override - public void setUp() { - super.setUp(); - } - - /** - * 1. WriteConcernError.details exposes writeConcernError.errInfo - */ + @DisplayName("1. WriteConcernError.details exposes writeConcernError.errInfo") @Test - public void testWriteConcernErrInfoIsPropagated() { + @SuppressWarnings("try") + void testWriteConcernErrInfoIsPropagated() throws InterruptedException { assumeTrue(isDiscoverableReplicaSet() && serverVersionAtLeast(4, 0)); - - try { - setFailPoint(); - collection.insertOne(Document.parse("{ x: 1 }")); - } catch (MongoWriteConcernException e) { - assertEquals(e.getWriteConcernError().getCode(), 100); - assertEquals("UnsatisfiableWriteConcern", e.getWriteConcernError().getCodeName()); - assertEquals(e.getWriteConcernError().getDetails(), new BsonDocument("writeConcern", + BsonDocument failPointDocument = new BsonDocument("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument("times", new BsonInt32(1))) + .append("data", new BsonDocument("failCommands", new BsonArray(singletonList(new BsonString("insert")))) + .append("writeConcernError", new BsonDocument("code", new BsonInt32(100)) + .append("codeName", new BsonString("UnsatisfiableWriteConcern")) + .append("errmsg", new BsonString("Not enough data-bearing nodes")) + .append("errInfo", new BsonDocument("writeConcern", new BsonDocument("w", new BsonInt32(2)) + .append("wtimeout", new BsonInt32(0)) + .append("provenance", new BsonString("clientSupplied")))))); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder()); + FailPoint ignored = FailPoint.enable(failPointDocument, getPrimary())) { + MongoWriteConcernException actual = assertThrows(MongoWriteConcernException.class, () -> + droppedCollection(client, Document.class).insertOne(Document.parse("{ x: 1 }"))); + assertEquals(actual.getWriteConcernError().getCode(), 100); + assertEquals("UnsatisfiableWriteConcern", actual.getWriteConcernError().getCodeName()); + assertEquals(actual.getWriteConcernError().getDetails(), new BsonDocument("writeConcern", new BsonDocument("w", new BsonInt32(2)) .append("wtimeout", new BsonInt32(0)) .append("provenance", new BsonString("clientSupplied")))); - } catch (Exception ex) { - fail(format("Incorrect exception thrown in test: %s", ex.getClass())); - } finally { - disableFailPoint(); } } - /** - * 2. WriteError.details exposes writeErrors[].errInfo - */ + @DisplayName("2. WriteError.details exposes writeErrors[].errInfo") @Test - public void testWriteErrorDetailsIsPropagated() { - getCollectionHelper().create(getCollectionName(), - new CreateCollectionOptions() - .validationOptions(new ValidationOptions() - .validator(Filters.type("x", "string")))); + void testWriteErrorDetailsIsPropagated() { + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder())) { + MongoCollection collection = droppedCollection(client, Document.class); + droppedDatabase(client).createCollection( + collection.getNamespace().getCollectionName(), + new CreateCollectionOptions().validationOptions(new ValidationOptions().validator(Filters.type("x", "string")))); + assertAll( + () -> { + MongoWriteException actual = assertThrows(MongoWriteException.class, () -> + collection.insertOne(new Document("x", 1))); + // These assertions don't do exactly what's required by the specification, + // but it's simpler to implement and nearly as effective. + assertTrue(actual.getMessage().contains("Write error")); + assertNotNull(actual.getError().getDetails()); + if (serverVersionAtLeast(5, 0)) { + assertFalse(actual.getError().getDetails().isEmpty()); + } + }, + () -> { + MongoBulkWriteException actual = assertThrows(MongoBulkWriteException.class, () -> + collection.insertMany(singletonList(new Document("x", 1)))); + // These assertions don't do exactly what's required by the specification, + // but it's simpler to implement and nearly as effective. + assertTrue(actual.getMessage().contains("Write errors")); + assertEquals(1, actual.getWriteErrors().size()); + if (serverVersionAtLeast(5, 0)) { + assertFalse(actual.getWriteErrors().get(0).getDetails().isEmpty()); + } + } + ); - try { - collection.insertOne(new Document("x", 1)); - fail("Should throw, as document doesn't match schema"); - } catch (MongoWriteException e) { - // These assertions doesn't do exactly what's required by the specification, but it's simpler to implement and nearly as - // effective - assertTrue(e.getMessage().contains("Write error")); - assertNotNull(e.getError().getDetails()); - if (serverVersionAtLeast(5, 0)) { - assertFalse(e.getError().getDetails().isEmpty()); + } + } + + @DisplayName("3. MongoClient.bulkWrite batch splits a writeModels input with greater than maxWriteBatchSize operations") + @Test + void testBulkWriteSplitsWhenExceedingMaxWriteBatchSize() { + assumeTrue(serverVersionAtLeast(8, 0)); + assumeFalse(isServerlessTest()); + TestCommandListener commandListener = new TestCommandListener(); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder().addCommandListener(commandListener))) { + int maxWriteBatchSize = droppedDatabase(client).runCommand(new Document("hello", 1)).getInteger("maxWriteBatchSize"); + ClientBulkWriteResult result = client.bulkWrite(nCopies( + maxWriteBatchSize + 1, + insertOne(NAMESPACE, new Document("a", "b")))); + assertEquals(maxWriteBatchSize + 1, result.getInsertedCount()); + List startedBulkWriteCommandEvents = commandListener.getCommandStartedEvents("bulkWrite"); + assertEquals(2, startedBulkWriteCommandEvents.size()); + CommandStartedEvent firstEvent = startedBulkWriteCommandEvents.get(0); + CommandStartedEvent secondEvent = startedBulkWriteCommandEvents.get(1); + assertEquals(maxWriteBatchSize, firstEvent.getCommand().getArray("ops").size()); + assertEquals(1, secondEvent.getCommand().getArray("ops").size()); + assertEquals(firstEvent.getOperationId(), secondEvent.getOperationId()); + } + } + + @DisplayName("4. MongoClient.bulkWrite batch splits when an ops payload exceeds maxMessageSizeBytes") + @Test + void testBulkWriteSplitsWhenExceedingMaxMessageSizeBytes() { + assumeTrue(serverVersionAtLeast(8, 0)); + assumeFalse(isServerlessTest()); + TestCommandListener commandListener = new TestCommandListener(); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder().addCommandListener(commandListener))) { + Document helloResponse = droppedDatabase(client).runCommand(new Document("hello", 1)); + int maxBsonObjectSize = helloResponse.getInteger("maxBsonObjectSize"); + int maxMessageSizeBytes = helloResponse.getInteger("maxMessageSizeBytes"); + ClientNamespacedWriteModel model = insertOne( + NAMESPACE, + new Document("a", join("", nCopies(maxBsonObjectSize - 500, "b")))); + int numModels = maxMessageSizeBytes / maxBsonObjectSize + 1; + ClientBulkWriteResult result = client.bulkWrite(nCopies(numModels, model)); + assertEquals(numModels, result.getInsertedCount()); + List startedBulkWriteCommandEvents = commandListener.getCommandStartedEvents("bulkWrite"); + assertEquals(2, startedBulkWriteCommandEvents.size()); + CommandStartedEvent firstEvent = startedBulkWriteCommandEvents.get(0); + CommandStartedEvent secondEvent = startedBulkWriteCommandEvents.get(1); + assertEquals(numModels - 1, firstEvent.getCommand().getArray("ops").size()); + assertEquals(1, secondEvent.getCommand().getArray("ops").size()); + assertEquals(firstEvent.getOperationId(), secondEvent.getOperationId()); + } + } + + @DisplayName("5. MongoClient.bulkWrite collects WriteConcernErrors across batches") + @Test + @SuppressWarnings("try") + protected void testBulkWriteCollectsWriteConcernErrorsAcrossBatches() throws InterruptedException { + assumeTrue(serverVersionAtLeast(8, 0)); + assumeFalse(isServerlessTest()); + TestCommandListener commandListener = new TestCommandListener(); + BsonDocument failPointDocument = new BsonDocument("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument("times", new BsonInt32(2))) + .append("data", new BsonDocument() + .append("failCommands", new BsonArray(singletonList(new BsonString("bulkWrite")))) + .append("writeConcernError", new BsonDocument("code", new BsonInt32(91)) + .append("errmsg", new BsonString("Replication is being shut down")))); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() + .retryWrites(false) + .addCommandListener(commandListener)); + FailPoint ignored = FailPoint.enable(failPointDocument, getPrimary())) { + int maxWriteBatchSize = droppedDatabase(client).runCommand(new Document("hello", 1)).getInteger("maxWriteBatchSize"); + ClientNamespacedWriteModel model = insertOne(NAMESPACE, new Document("a", "b")); + int numModels = maxWriteBatchSize + 1; + ClientBulkWriteException error = assertThrows(ClientBulkWriteException.class, () -> + client.bulkWrite(nCopies(numModels, model))); + assertEquals(2, error.getWriteConcernErrors().size()); + ClientBulkWriteResult partialResult = error.getPartialResult() + .orElseThrow(org.junit.jupiter.api.Assertions::fail); + assertEquals(numModels, partialResult.getInsertedCount()); + assertEquals(2, commandListener.getCommandStartedEvents("bulkWrite").size()); + } + } + + @DisplayName("6. MongoClient.bulkWrite handles individual WriteErrors across batches") + @ParameterizedTest + @ValueSource(booleans = {false, true}) + protected void testBulkWriteHandlesWriteErrorsAcrossBatches(final boolean ordered) { + assumeTrue(serverVersionAtLeast(8, 0)); + assumeFalse(isServerlessTest()); + TestCommandListener commandListener = new TestCommandListener(); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() + .retryWrites(false) + .addCommandListener(commandListener))) { + int maxWriteBatchSize = droppedDatabase(client).runCommand(new Document("hello", 1)).getInteger("maxWriteBatchSize"); + Document document = new Document("_id", 1); + MongoCollection collection = droppedCollection(client, Document.class); + collection.insertOne(document); + ClientNamespacedWriteModel model = insertOne(collection.getNamespace(), document); + int numModels = maxWriteBatchSize + 1; + ClientBulkWriteException error = assertThrows(ClientBulkWriteException.class, () -> + client.bulkWrite(nCopies(numModels, model), clientBulkWriteOptions().ordered(ordered))); + int expectedWriteErrorCount = ordered ? 1 : numModels; + int expectedCommandStartedEventCount = ordered ? 1 : 2; + assertEquals(expectedWriteErrorCount, error.getWriteErrors().size()); + assertEquals(expectedCommandStartedEventCount, commandListener.getCommandStartedEvents("bulkWrite").size()); + } + } + + @DisplayName("7. MongoClient.bulkWrite handles a cursor requiring a getMore") + @Test + void testBulkWriteHandlesCursorRequiringGetMore() { + assumeTrue(serverVersionAtLeast(8, 0)); + assumeFalse(isServerlessTest()); + assertBulkWriteHandlesCursorRequiringGetMore(false); + } + + @DisplayName("8. MongoClient.bulkWrite handles a cursor requiring getMore within a transaction") + @Test + protected void testBulkWriteHandlesCursorRequiringGetMoreWithinTransaction() { + assumeTrue(serverVersionAtLeast(8, 0)); + assumeFalse(isServerlessTest()); + assumeFalse(isStandalone()); + assertBulkWriteHandlesCursorRequiringGetMore(true); + } + + private void assertBulkWriteHandlesCursorRequiringGetMore(final boolean transaction) { + TestCommandListener commandListener = new TestCommandListener(); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() + .retryWrites(false) + .addCommandListener(commandListener))) { + int maxBsonObjectSize = droppedDatabase(client).runCommand(new Document("hello", 1)).getInteger("maxBsonObjectSize"); + try (ClientSession session = transaction ? client.startSession() : null) { + BiFunction, ClientBulkWriteOptions, ClientBulkWriteResult> bulkWrite = + (models, options) -> session == null + ? client.bulkWrite(models, options) + : client.bulkWrite(session, models, options); + Supplier action = () -> bulkWrite.apply(asList( + ClientNamespacedWriteModel.updateOne( + NAMESPACE, + Filters.eq(join("", nCopies(maxBsonObjectSize / 2, "a"))), + Updates.set("x", 1), + clientUpdateOneOptions().upsert(true)), + ClientNamespacedWriteModel.updateOne( + NAMESPACE, + Filters.eq(join("", nCopies(maxBsonObjectSize / 2, "b"))), + Updates.set("x", 1), + clientUpdateOneOptions().upsert(true))), + clientBulkWriteOptions().verboseResults(true) + ); + + ClientBulkWriteResult result = transaction ? runInTransaction(session, action) : action.get(); + assertEquals(2, result.getUpsertedCount()); + assertEquals(2, result.getVerboseResults().orElseThrow(Assertions::fail).getUpdateResults().size()); + assertEquals(1, commandListener.getCommandStartedEvents("bulkWrite").size()); } } + } - try { - collection.insertMany(asList(new Document("x", 1))); - fail("Should throw, as document doesn't match schema"); - } catch (MongoBulkWriteException e) { - // These assertions doesn't do exactly what's required by the specification, but it's simpler to implement and nearly as - // effective - assertTrue(e.getMessage().contains("Write errors")); - assertEquals(1, e.getWriteErrors().size()); - if (serverVersionAtLeast(5, 0)) { - assertFalse(e.getWriteErrors().get(0).getDetails().isEmpty()); + @DisplayName("11. MongoClient.bulkWrite batch splits when the addition of a new namespace exceeds the maximum message size") + @Test + protected void testBulkWriteSplitsWhenExceedingMaxMessageSizeBytesDueToNsInfo() { + assumeTrue(serverVersionAtLeast(8, 0)); + assumeFalse(isServerlessTest()); + assertAll( + () -> { + // Case 1: No batch-splitting required + testBulkWriteSplitsWhenExceedingMaxMessageSizeBytesDueToNsInfo((client, models, commandListener) -> { + models.add(insertOne(NAMESPACE, new Document("a", "b"))); + ClientBulkWriteResult result = client.bulkWrite(models); + assertEquals(models.size(), result.getInsertedCount()); + List startedBulkWriteCommandEvents = commandListener.getCommandStartedEvents("bulkWrite"); + assertEquals(1, startedBulkWriteCommandEvents.size()); + CommandStartedEvent event = startedBulkWriteCommandEvents.get(0); + BsonDocument command = event.getCommand(); + assertEquals(models.size(), command.getArray("ops").asArray().size()); + BsonArray nsInfo = command.getArray("nsInfo").asArray(); + assertEquals(1, nsInfo.size()); + assertEquals(NAMESPACE.getFullName(), nsInfo.get(0).asDocument().getString("ns").getValue()); + }); + }, + () -> { + // Case 2: Batch-splitting required + testBulkWriteSplitsWhenExceedingMaxMessageSizeBytesDueToNsInfo((client, models, commandListener) -> { + MongoNamespace namespace = new MongoNamespace(NAMESPACE.getDatabaseName(), join("", nCopies(200, "c"))); + models.add(insertOne(namespace, new Document("a", "b"))); + ClientBulkWriteResult result = client.bulkWrite(models); + assertEquals(models.size(), result.getInsertedCount()); + List startedBulkWriteCommandEvents = commandListener.getCommandStartedEvents("bulkWrite"); + assertEquals(2, startedBulkWriteCommandEvents.size()); + BsonDocument firstEventCommand = startedBulkWriteCommandEvents.get(0).getCommand(); + assertEquals(models.size() - 1, firstEventCommand.getArray("ops").asArray().size()); + BsonArray firstNsInfo = firstEventCommand.getArray("nsInfo").asArray(); + assertEquals(1, firstNsInfo.size()); + assertEquals(NAMESPACE.getFullName(), firstNsInfo.get(0).asDocument().getString("ns").getValue()); + BsonDocument secondEventCommand = startedBulkWriteCommandEvents.get(1).getCommand(); + assertEquals(1, secondEventCommand.getArray("ops").asArray().size()); + BsonArray secondNsInfo = secondEventCommand.getArray("nsInfo").asArray(); + assertEquals(1, secondNsInfo.size()); + assertEquals(namespace.getFullName(), secondNsInfo.get(0).asDocument().getString("ns").getValue()); + }); + } + ); + } + + private void testBulkWriteSplitsWhenExceedingMaxMessageSizeBytesDueToNsInfo( + final TriConsumer, TestCommandListener> test) { + TestCommandListener commandListener = new TestCommandListener(); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder().addCommandListener(commandListener))) { + Document helloResponse = droppedDatabase(client).runCommand(new Document("hello", 1)); + int maxBsonObjectSize = helloResponse.getInteger("maxBsonObjectSize"); + int maxMessageSizeBytes = helloResponse.getInteger("maxMessageSizeBytes"); + int opsBytes = maxMessageSizeBytes - 1122; + int numModels = opsBytes / maxBsonObjectSize; + int remainderBytes = opsBytes % maxBsonObjectSize; + List models = new ArrayList<>(nCopies( + numModels, + insertOne( + NAMESPACE, + new Document("a", join("", nCopies(maxBsonObjectSize - 57, "b")))))); + if (remainderBytes >= 217) { + models.add(insertOne( + NAMESPACE, + new Document("a", join("", nCopies(remainderBytes - 57, "b"))))); } + test.accept(client, models, commandListener); + } + } + + @DisplayName("12. MongoClient.bulkWrite returns an error if no operations can be added to ops") + @ParameterizedTest + @ValueSource(strings = {"document", "namespace"}) + protected void testBulkWriteSplitsErrorsForTooLargeOpsOrNsInfo(final String tooLarge) { + assumeTrue(serverVersionAtLeast(8, 0)); + assumeFalse(isServerlessTest()); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder())) { + int maxMessageSizeBytes = droppedDatabase(client).runCommand(new Document("hello", 1)).getInteger("maxMessageSizeBytes"); + ClientNamespacedWriteModel model; + switch (tooLarge) { + case "document": { + model = insertOne( + NAMESPACE, + new Document("a", join("", nCopies(maxMessageSizeBytes, "b")))); + break; + } + case "namespace": { + model = insertOne( + new MongoNamespace(NAMESPACE.getDatabaseName(), join("", nCopies(maxMessageSizeBytes, "b"))), + new Document("a", "b")); + break; + } + default: { + throw Assertions.fail(tooLarge); + } + } + assertThrows(BsonMaximumSizeExceededException.class, () -> client.bulkWrite(singletonList(model))); + } + } + + @DisplayName("13. MongoClient.bulkWrite returns an error if auto-encryption is configured") + @Test + protected void testBulkWriteErrorsForAutoEncryption() { + assumeTrue(serverVersionAtLeast(8, 0)); + assumeFalse(isServerlessTest()); + HashMap awsKmsProviderProperties = new HashMap<>(); + awsKmsProviderProperties.put("accessKeyId", "foo"); + awsKmsProviderProperties.put("secretAccessKey", "bar"); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() + .autoEncryptionSettings(AutoEncryptionSettings.builder() + .keyVaultNamespace(NAMESPACE.getFullName()) + .kmsProviders(singletonMap("aws", awsKmsProviderProperties)) + .build()))) { + assertTrue( + assertThrows( + IllegalStateException.class, + () -> client.bulkWrite(singletonList(insertOne(NAMESPACE, new Document("a", "b")))) + ).getMessage().contains("bulkWrite does not currently support automatic encryption") + ); + } + } + + @DisplayName("15. MongoClient.bulkWrite with unacknowledged write concern uses w:0 for all batches") + @Test + protected void testWriteConcernOfAllBatchesWhenUnacknowledgedRequested() { + assumeTrue(serverVersionAtLeast(8, 0)); + assumeFalse(isServerlessTest()); + TestCommandListener commandListener = new TestCommandListener(); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder().addCommandListener(commandListener) + .writeConcern(WriteConcern.UNACKNOWLEDGED))) { + MongoDatabase database = droppedDatabase(client); + database.createCollection(NAMESPACE.getCollectionName()); + Document helloResponse = database.runCommand(new Document("hello", 1)); + int maxBsonObjectSize = helloResponse.getInteger("maxBsonObjectSize"); + int maxMessageSizeBytes = helloResponse.getInteger("maxMessageSizeBytes"); + ClientNamespacedWriteModel model = insertOne( + NAMESPACE, + new Document("a", join("", nCopies(maxBsonObjectSize - 500, "b")))); + int numModels = maxMessageSizeBytes / maxBsonObjectSize + 1; + ClientBulkWriteResult result = client.bulkWrite(nCopies(numModels, model), clientBulkWriteOptions().ordered(false)); + assertFalse(result.isAcknowledged()); + List startedBulkWriteCommandEvents = commandListener.getCommandStartedEvents("bulkWrite"); + assertEquals(2, startedBulkWriteCommandEvents.size()); + CommandStartedEvent firstEvent = startedBulkWriteCommandEvents.get(0); + BsonDocument firstCommand = firstEvent.getCommand(); + CommandStartedEvent secondEvent = startedBulkWriteCommandEvents.get(1); + BsonDocument secondCommand = secondEvent.getCommand(); + assertEquals(numModels - 1, firstCommand.getArray("ops").size()); + assertEquals(1, secondCommand.getArray("ops").size()); + assertEquals(firstEvent.getOperationId(), secondEvent.getOperationId()); + assertEquals(0, firstCommand.getDocument("writeConcern").getInt32("w").intValue()); + assertEquals(0, secondCommand.getDocument("writeConcern").getInt32("w").intValue()); + assertEquals(numModels, database.getCollection(NAMESPACE.getCollectionName()).countDocuments()); } } /** * This test is not from the specification. */ - @Test - @SuppressWarnings("try") - void insertMustGenerateIdAtMostOnce() throws ExecutionException, InterruptedException { + @ParameterizedTest + @MethodSource("insertMustGenerateIdAtMostOnceArgs") + protected void insertMustGenerateIdAtMostOnce( + final Class documentClass, + final boolean expectIdGenerated, + final Supplier documentSupplier) { + assumeTrue(serverVersionAtLeast(8, 0)); assumeTrue(isDiscoverableReplicaSet()); - ServerAddress primaryServerAddress = Fixture.getPrimary(); - CompletableFuture futureIdGeneratedByFirstInsertAttempt = new CompletableFuture<>(); - CompletableFuture futureIdGeneratedBySecondInsertAttempt = new CompletableFuture<>(); - CommandListener commandListener = new CommandListener() { - @Override - public void commandStarted(final CommandStartedEvent event) { - if (event.getCommandName().equals("insert")) { - BsonValue generatedId = event.getCommand().getArray("documents").get(0).asDocument().get("_id"); - if (!futureIdGeneratedByFirstInsertAttempt.isDone()) { - futureIdGeneratedByFirstInsertAttempt.complete(generatedId); - } else { - futureIdGeneratedBySecondInsertAttempt.complete(generatedId); - } - } - } - }; + assertAll( + () -> assertInsertMustGenerateIdAtMostOnce("insert", documentClass, expectIdGenerated, + (client, collection) -> collection.insertOne(documentSupplier.get()).getInsertedId()), + () -> assertInsertMustGenerateIdAtMostOnce("insert", documentClass, expectIdGenerated, + (client, collection) -> collection.bulkWrite( + singletonList(new InsertOneModel<>(documentSupplier.get()))) + .getInserts().get(0).getId()), + () -> assertInsertMustGenerateIdAtMostOnce("bulkWrite", documentClass, expectIdGenerated, + (client, collection) -> client.bulkWrite( + singletonList(insertOne(collection.getNamespace(), documentSupplier.get())), + clientBulkWriteOptions().verboseResults(true)) + .getVerboseResults().orElseThrow(Assertions::fail).getInsertResults().get(0).getInsertedId().orElse(null)) + ); + } + + private static Stream insertMustGenerateIdAtMostOnceArgs() { + CodecRegistry codecRegistry = fromRegistries( + getDefaultCodecRegistry(), + fromProviders(PojoCodecProvider.builder().automatic(true).build())); + return Stream.of( + arguments(MyDocument.class, true, (Supplier) MyDocument::new), + arguments(Document.class, true, (Supplier) Document::new), + arguments(BsonDocument.class, true, (Supplier) BsonDocument::new), + arguments( + BsonDocumentWrapper.class, true, + (Supplier>) () -> + new BsonDocumentWrapper<>(new MyDocument(), codecRegistry.get(MyDocument.class))), + arguments( + RawBsonDocument.class, false, + (Supplier) () -> + new RawBsonDocument(new MyDocument(), codecRegistry.get(MyDocument.class))) + ); + } + + @SuppressWarnings("try") + private void assertInsertMustGenerateIdAtMostOnce( + final String commandName, + final Class documentClass, + final boolean expectIdGenerated, + final BiFunction, BsonValue> insertOperation) throws InterruptedException { + TestCommandListener commandListener = new TestCommandListener(); BsonDocument failPointDocument = new BsonDocument("configureFailPoint", new BsonString("failCommand")) .append("mode", new BsonDocument("times", new BsonInt32(1))) .append("data", new BsonDocument() - .append("failCommands", new BsonArray(singletonList(new BsonString("insert")))) + .append("failCommands", new BsonArray(singletonList(new BsonString(commandName)))) .append("errorLabels", new BsonArray(singletonList(new BsonString("RetryableWriteError")))) .append("writeConcernError", new BsonDocument("code", new BsonInt32(91)) .append("errmsg", new BsonString("Replication is being shut down")))); - try (MongoClient client = MongoClients.create(getMongoClientSettingsBuilder() + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() .retryWrites(true) .addCommandListener(commandListener) .applyToServerSettings(builder -> builder.heartbeatFrequency(50, TimeUnit.MILLISECONDS)) - .build()); - FailPoint ignored = FailPoint.enable(failPointDocument, primaryServerAddress)) { - MongoCollection coll = client.getDatabase(database.getName()) - .getCollection(collection.getNamespace().getCollectionName(), MyDocument.class) - .withCodecRegistry(fromRegistries( - getDefaultCodecRegistry(), - fromProviders(PojoCodecProvider.builder().automatic(true).build()))); - BsonValue insertedId = coll.insertOne(new MyDocument()).getInsertedId(); - BsonValue idGeneratedByFirstInsertAttempt = futureIdGeneratedByFirstInsertAttempt.get(); - assertEquals(idGeneratedByFirstInsertAttempt, insertedId); - assertEquals(idGeneratedByFirstInsertAttempt, futureIdGeneratedBySecondInsertAttempt.get()); + .codecRegistry(fromRegistries( + getDefaultCodecRegistry(), + fromProviders(PojoCodecProvider.builder().automatic(true).build())))); + FailPoint ignored = FailPoint.enable(failPointDocument, getPrimary())) { + MongoCollection collection = droppedCollection(client, documentClass); + BsonValue insertedId = insertOperation.apply(client, collection); + if (expectIdGenerated) { + assertNotNull(insertedId); + } else { + assertNull(insertedId); + } + List startedCommandEvents = commandListener.getCommandStartedEvents(commandName); + assertEquals(2, startedCommandEvents.size()); + Function idFromCommand; + switch (commandName) { + case "insert": { + idFromCommand = command -> command.getArray("documents").get(0).asDocument().get("_id"); + break; + } + case "bulkWrite": { + idFromCommand = command -> command.getArray("ops").get(0).asDocument().getDocument("document").get("_id"); + break; + } + default: { + throw Assertions.fail(commandName); + } + } + CommandStartedEvent firstEvent = startedCommandEvents.get(0); + CommandStartedEvent secondEvent = startedCommandEvents.get(1); + assertEquals(insertedId, idFromCommand.apply(firstEvent.getCommand())); + assertEquals(insertedId, idFromCommand.apply(secondEvent.getCommand())); } } - private void setFailPoint() { - failPointDocument = new BsonDocument("configureFailPoint", new BsonString("failCommand")) - .append("mode", new BsonDocument("times", new BsonInt32(1))) - .append("data", new BsonDocument("failCommands", new BsonArray(asList(new BsonString("insert")))) - .append("writeConcernError", new BsonDocument("code", new BsonInt32(100)) - .append("codeName", new BsonString("UnsatisfiableWriteConcern")) - .append("errmsg", new BsonString("Not enough data-bearing nodes")) - .append("errInfo", new BsonDocument("writeConcern", new BsonDocument("w", new BsonInt32(2)) - .append("wtimeout", new BsonInt32(0)) - .append("provenance", new BsonString("clientSupplied")))))); - getCollectionHelper().runAdminCommand(failPointDocument); + protected MongoClient createMongoClient(final MongoClientSettings.Builder mongoClientSettingsBuilder) { + return MongoClients.create(mongoClientSettingsBuilder.build()); + } + + private MongoCollection droppedCollection(final MongoClient client, final Class documentClass) { + return droppedDatabase(client).getCollection(NAMESPACE.getCollectionName(), documentClass); } - private void disableFailPoint() { - getCollectionHelper().runAdminCommand(failPointDocument.append("mode", new BsonString("off"))); + private MongoDatabase droppedDatabase(final MongoClient client) { + MongoDatabase database = client.getDatabase(NAMESPACE.getDatabaseName()); + database.drop(); + return database; } public static final class MyDocument { @@ -203,4 +591,26 @@ public int getV() { return v; } } + + @FunctionalInterface + private interface TriConsumer { + void accept(A1 a1, A2 a2, A3 a3); + } + + /** + * This method is used instead of {@link ClientSession#withTransaction(TransactionBody)} + * because reactive {@code com.mongodb.reactivestreams.client.ClientSession} do not support it. + */ + private static ClientBulkWriteResult runInTransaction(final ClientSession session, + final Supplier action) { + session.startTransaction(); + try { + ClientBulkWriteResult result = action.get(); + session.commitTransaction(); + return result; + } catch (Throwable throwable) { + session.abortTransaction(); + throw throwable; + } + } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/ErrorMatcher.java b/driver-sync/src/test/functional/com/mongodb/client/unified/ErrorMatcher.java index 75d264487f8..d82e4c6beb1 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/ErrorMatcher.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/ErrorMatcher.java @@ -16,6 +16,7 @@ package com.mongodb.client.unified; +import com.mongodb.ClientBulkWriteException; import com.mongodb.MongoBulkWriteException; import com.mongodb.MongoClientException; import com.mongodb.MongoCommandException; @@ -27,23 +28,33 @@ import com.mongodb.MongoSocketException; import com.mongodb.MongoWriteConcernException; import com.mongodb.MongoWriteException; +import com.mongodb.WriteError; +import com.mongodb.bulk.WriteConcernError; import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; import org.bson.BsonValue; import java.util.HashSet; +import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.Set; +import static java.lang.Integer.parseInt; import static java.util.Arrays.asList; +import static java.util.stream.Collectors.toList; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -import static org.spockframework.util.Assert.fail; +import static org.junit.jupiter.api.Assertions.fail; final class ErrorMatcher { private static final Set EXPECTED_ERROR_FIELDS = new HashSet<>( asList("isError", "expectError", "isClientError", "errorCode", "errorCodeName", "errorContains", "errorResponse", - "isClientError", "isTimeoutError", "errorLabelsOmit", "errorLabelsContain", "expectResult")); + "isClientError", "isTimeoutError", "errorLabelsOmit", "errorLabelsContain", + "writeErrors", "writeConcernErrors", "expectResult")); private final AssertionContext context; private final ValueMatcher valueMatcher; @@ -134,13 +145,55 @@ void assertErrorsMatch(final BsonDocument expectedError, final Exception e) { mongoException.hasErrorLabel(cur.asString().getValue())); } } + if (expectedError.containsKey("writeErrors")) { + assertTrue(context.getMessage("Exception must be of type ClientBulkWriteException when checking for write errors"), + e instanceof ClientBulkWriteException); + BsonDocument writeErrors = expectedError.getDocument("writeErrors"); + ClientBulkWriteException actualException = (ClientBulkWriteException) e; + Map actualWriteErrors = actualException.getWriteErrors(); + assertEquals("The number of write errors must match", writeErrors.size(), actualWriteErrors.size()); + writeErrors.forEach((index, writeError) -> { + WriteError actualWriteError = actualWriteErrors.get(parseInt(index)); + assertNotNull("Expected a write error with index " + index, actualWriteError); + valueMatcher.assertValuesMatch(writeError, toMatchableValue(actualWriteError)); + }); + } + if (expectedError.containsKey("writeConcernErrors")) { + assertTrue(context.getMessage("Exception must be of type ClientBulkWriteException when checking for write errors"), + e instanceof ClientBulkWriteException); + List writeConcernErrors = expectedError.getArray("writeConcernErrors").stream() + .map(BsonValue::asDocument).collect(toList()); + ClientBulkWriteException actualException = (ClientBulkWriteException) e; + List actualWriteConcernErrors = actualException.getWriteConcernErrors(); + assertEquals("The number of write concern errors must match", writeConcernErrors.size(), actualWriteConcernErrors.size()); + for (int index = 0; index < writeConcernErrors.size(); index++) { + BsonDocument writeConcernError = writeConcernErrors.get(index); + WriteConcernError actualWriteConcernError = actualWriteConcernErrors.get(index); + valueMatcher.assertValuesMatch(writeConcernError, toMatchableValue(actualWriteConcernError)); + } + } if (expectedError.containsKey("expectResult")) { - // Neither MongoBulkWriteException nor MongoSocketException includes information about the successful writes, so this - // is the only check that can currently be done - assertTrue(context.getMessage("Exception must be of type MongoBulkWriteException or MongoSocketException " - + "when checking for results, but actual type is " + e.getClass().getSimpleName()), - e instanceof MongoBulkWriteException || e instanceof MongoSocketException); + assertTrue(context.getMessage("Exception must be of type" + + " MongoBulkWriteException, or MongoSocketException, or ClientBulkWriteException" + + " when checking for results, but actual type is " + e.getClass().getSimpleName()), + e instanceof MongoBulkWriteException || e instanceof ClientBulkWriteException || e instanceof MongoSocketException); + // neither `MongoBulkWriteException` nor `MongoSocketException` includes information about the successful individual operations + if (e instanceof ClientBulkWriteException) { + BsonDocument actualPartialResult = ((ClientBulkWriteException) e).getPartialResult() + .map(UnifiedCrudHelper::toMatchableValue) + .orElse(new BsonDocument()); + valueMatcher.assertValuesMatch(expectedError.getDocument("expectResult"), actualPartialResult); + } } context.pop(); } + + private static BsonDocument toMatchableValue(final WriteError writeError) { + return new BsonDocument("code", new BsonInt32(writeError.getCode())); + } + + private static BsonDocument toMatchableValue(final WriteConcernError writeConcernError) { + return new BsonDocument("code", new BsonInt32(writeConcernError.getCode())) + .append("message", new BsonString(writeConcernError.getMessage())); + } } diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java index 192bde29e5e..5c925d97272 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java @@ -27,6 +27,7 @@ import com.mongodb.TagSet; import com.mongodb.TransactionOptions; import com.mongodb.WriteConcern; +import com.mongodb.assertions.Assertions; import com.mongodb.bulk.BulkWriteResult; import com.mongodb.client.AggregateIterable; import com.mongodb.client.ChangeStreamIterable; @@ -75,6 +76,15 @@ import com.mongodb.client.model.UpdateOneModel; import com.mongodb.client.model.UpdateOptions; import com.mongodb.client.model.WriteModel; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientDeleteManyOptions; +import com.mongodb.client.model.bulk.ClientDeleteOneOptions; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.client.model.bulk.ClientReplaceOneOptions; +import com.mongodb.client.model.bulk.ClientUpdateManyOptions; +import com.mongodb.client.model.bulk.ClientUpdateOneOptions; +import com.mongodb.client.model.bulk.ClientUpdateResult; import com.mongodb.client.model.changestream.ChangeStreamDocument; import com.mongodb.client.model.changestream.FullDocument; import com.mongodb.client.model.changestream.FullDocumentBeforeChange; @@ -82,7 +92,14 @@ import com.mongodb.client.result.InsertManyResult; import com.mongodb.client.result.InsertOneResult; import com.mongodb.client.result.UpdateResult; +import com.mongodb.internal.client.model.bulk.AbstractClientDeleteOptions; +import com.mongodb.internal.client.model.bulk.AbstractClientUpdateOptions; +import com.mongodb.internal.client.model.bulk.ConcreteClientDeleteManyOptions; +import com.mongodb.internal.client.model.bulk.ConcreteClientDeleteOneOptions; +import com.mongodb.internal.client.model.bulk.ConcreteClientUpdateManyOptions; +import com.mongodb.internal.client.model.bulk.ConcreteClientUpdateOneOptions; import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; import org.bson.BsonArray; import org.bson.BsonDocument; import org.bson.BsonDocumentWriter; @@ -101,15 +118,21 @@ import java.lang.reflect.Method; import java.util.ArrayList; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; +import static com.mongodb.client.model.bulk.ClientBulkWriteOptions.clientBulkWriteOptions; +import static com.mongodb.client.model.bulk.ClientReplaceOneOptions.clientReplaceOneOptions; +import static java.lang.String.format; import static java.util.Arrays.asList; +import static java.util.Collections.singleton; import static java.util.Objects.requireNonNull; import static java.util.stream.Collectors.toList; @@ -215,6 +238,7 @@ private OperationResult resultOf(final Supplier operationResult) { } } + @Nullable private ClientSession getSession(final BsonDocument arguments) { if (arguments.containsKey("session")) { return entities.getSession(arguments.getString("session").asString().getValue()); @@ -1773,6 +1797,252 @@ public OperationResult createChangeStreamCursor(final BsonDocument operation) { }); } + public OperationResult clientBulkWrite(final BsonDocument operation) { + Set unexpectedOperationKeys = singleton("saveResultAsEntity"); + if (operation.keySet().stream().anyMatch(unexpectedOperationKeys::contains)) { + throw new UnsupportedOperationException("Unexpected field in operation. One of " + unexpectedOperationKeys); + } + String clientId = operation.getString("object").getValue(); + MongoCluster cluster = entities.getClient(clientId); + BsonDocument arguments = operation.getDocument("arguments"); + ClientSession session = getSession(arguments); + List models = arguments.getArray("models").stream() + .map(BsonValue::asDocument) + .map(UnifiedCrudHelper::toClientNamespacedWriteModel) + .collect(toList()); + ClientBulkWriteOptions options = clientBulkWriteOptions(); + for (Map.Entry entry : arguments.entrySet()) { + String key = entry.getKey(); + BsonValue argument = entry.getValue(); + switch (key) { + case "models": + case "session": + break; + case "writeConcern": + cluster = cluster.withWriteConcern(asWriteConcern(argument.asDocument())); + break; + case "ordered": + options.ordered(argument.asBoolean().getValue()); + break; + case "bypassDocumentValidation": + options.bypassDocumentValidation(argument.asBoolean().getValue()); + break; + case "let": + options.let(argument.asDocument()); + break; + case "comment": + options.comment(argument); + break; + case "verboseResults": + options.verboseResults(argument.asBoolean().getValue()); + break; + default: + throw new UnsupportedOperationException(format("Unsupported argument: key=%s, argument=%s", key, argument)); + } + } + MongoCluster clusterWithWriteConcern = cluster; + return resultOf(() -> { + if (session == null) { + return toMatchableValue(clusterWithWriteConcern.bulkWrite(models, options)); + } else { + return toMatchableValue(clusterWithWriteConcern.bulkWrite(session, models, options)); + } + }); + } + + private static ClientNamespacedWriteModel toClientNamespacedWriteModel(final BsonDocument model) { + String modelType = model.getFirstKey(); + BsonDocument arguments = model.getDocument(modelType); + MongoNamespace namespace = new MongoNamespace(arguments.getString("namespace").getValue()); + switch (modelType) { + case "insertOne": + Set expectedArguments = new HashSet<>(asList("namespace", "document")); + if (!expectedArguments.containsAll(arguments.keySet())) { + // for other `modelType`s a conceptually similar check is done when creating their options objects + throw new UnsupportedOperationException("Unsupported argument, one of: " + arguments.keySet()); + } + return ClientNamespacedWriteModel.insertOne( + namespace, + arguments.getDocument("document")); + case "replaceOne": + return ClientNamespacedWriteModel.replaceOne( + namespace, + arguments.getDocument("filter"), + arguments.getDocument("replacement"), + getClientReplaceOneOptions(arguments)); + case "updateOne": + return arguments.isDocument("update") + ? ClientNamespacedWriteModel.updateOne( + namespace, + arguments.getDocument("filter"), + arguments.getDocument("update"), + getClientUpdateOneOptions(arguments)) + : ClientNamespacedWriteModel.updateOne( + namespace, + arguments.getDocument("filter"), + arguments.getArray("update").stream().map(BsonValue::asDocument).collect(toList()), + getClientUpdateOneOptions(arguments)); + case "updateMany": + return arguments.isDocument("update") + ? ClientNamespacedWriteModel.updateMany( + namespace, + arguments.getDocument("filter"), + arguments.getDocument("update"), + getClientUpdateManyOptions(arguments)) + : ClientNamespacedWriteModel.updateMany( + namespace, + arguments.getDocument("filter"), + arguments.getArray("update").stream().map(BsonValue::asDocument).collect(toList()), + getClientUpdateManyOptions(arguments)); + case "deleteOne": + return ClientNamespacedWriteModel.deleteOne( + namespace, + arguments.getDocument("filter"), + getClientDeleteOneOptions(arguments)); + case "deleteMany": + return ClientNamespacedWriteModel.deleteMany( + namespace, + arguments.getDocument("filter"), + getClientDeleteManyOptions(arguments)); + default: + throw new UnsupportedOperationException("Unsupported client write model type: " + modelType); + } + } + + private static ClientReplaceOneOptions getClientReplaceOneOptions(final BsonDocument arguments) { + ClientReplaceOneOptions options = clientReplaceOneOptions(); + arguments.forEach((key, argument) -> { + switch (key) { + case "namespace": + case "filter": + case "replacement": + break; + case "collation": + options.collation(asCollation(argument.asDocument())); + break; + case "hint": + if (argument.isDocument()) { + options.hint(argument.asDocument()); + } else { + options.hintString(argument.asString().getValue()); + } + break; + case "upsert": + options.upsert(argument.asBoolean().getValue()); + break; + default: + throw new UnsupportedOperationException(format("Unsupported argument: key=%s, argument=%s", key, argument)); + } + }); + return options; + } + + private static ClientUpdateOneOptions getClientUpdateOneOptions(final BsonDocument arguments) { + return fillAbstractClientUpdateOptions(new ConcreteClientUpdateOneOptions(), arguments); + } + + private static ClientUpdateManyOptions getClientUpdateManyOptions(final BsonDocument arguments) { + return fillAbstractClientUpdateOptions(new ConcreteClientUpdateManyOptions(), arguments); + } + + private static T fillAbstractClientUpdateOptions( + final T options, + final BsonDocument arguments) { + arguments.forEach((key, argument) -> { + switch (key) { + case "namespace": + case "filter": + case "update": + break; + case "arrayFilters": + options.arrayFilters(argument.asArray().stream().map(BsonValue::asDocument).collect(toList())); + break; + case "collation": + options.collation(asCollation(argument.asDocument())); + break; + case "hint": + if (argument.isDocument()) { + options.hint(argument.asDocument()); + } else { + options.hintString(argument.asString().getValue()); + } + break; + case "upsert": + options.upsert(argument.asBoolean().getValue()); + break; + default: + throw new UnsupportedOperationException(format("Unsupported argument: key=%s, argument=%s", key, argument)); + } + }); + return options; + } + + private static ClientDeleteOneOptions getClientDeleteOneOptions(final BsonDocument arguments) { + return fillAbstractClientDeleteOptions(new ConcreteClientDeleteOneOptions(), arguments); + } + + private static ClientDeleteManyOptions getClientDeleteManyOptions(final BsonDocument arguments) { + return fillAbstractClientDeleteOptions(new ConcreteClientDeleteManyOptions(), arguments); + } + + private static T fillAbstractClientDeleteOptions( + final T options, + final BsonDocument arguments) { + arguments.forEach((key, argument) -> { + switch (key) { + case "namespace": + case "filter": + break; + case "collation": + options.collation(asCollation(argument.asDocument())); + break; + case "hint": + if (argument.isDocument()) { + options.hint(argument.asDocument()); + } else { + options.hintString(argument.asString().getValue()); + } + break; + default: + throw new UnsupportedOperationException(format("Unsupported argument: key=%s, argument=%s", key, argument)); + } + }); + return options; + } + + static BsonDocument toMatchableValue(final ClientBulkWriteResult result) { + BsonDocument expected = new BsonDocument(); + if (result.isAcknowledged()) { + expected.append("insertedCount", new BsonInt64(result.getInsertedCount())) + .append("upsertedCount", new BsonInt64(result.getUpsertedCount())) + .append("matchedCount", new BsonInt64(result.getMatchedCount())) + .append("modifiedCount", new BsonInt64(result.getModifiedCount())) + .append("deletedCount", new BsonInt64(result.getDeletedCount())); + result.getVerboseResults().ifPresent(verbose -> + expected.append("insertResults", new BsonDocument(verbose.getInsertResults().entrySet().stream() + .map(entry -> new BsonElement( + entry.getKey().toString(), + new BsonDocument("insertedId", entry.getValue().getInsertedId().orElseThrow(Assertions::fail)))) + .collect(toList()))) + .append("updateResults", new BsonDocument(verbose.getUpdateResults().entrySet().stream() + .map(entry -> { + ClientUpdateResult updateResult = entry.getValue(); + BsonDocument updateResultDocument = new BsonDocument( + "matchedCount", new BsonInt64(updateResult.getMatchedCount())) + .append("modifiedCount", new BsonInt64(updateResult.getModifiedCount())); + updateResult.getUpsertedId().ifPresent(upsertedId -> updateResultDocument.append("upsertedId", upsertedId)); + return new BsonElement(entry.getKey().toString(), updateResultDocument); + }) + .collect(toList()))) + .append("deleteResults", new BsonDocument(verbose.getDeleteResults().entrySet().stream() + .map(entry -> new BsonElement( + entry.getKey().toString(), + new BsonDocument("deletedCount", new BsonInt64(entry.getValue().getDeletedCount())))) + .collect(toList())))); + } + return expected; + } + public OperationResult executeIterateUntilDocumentOrError(final BsonDocument operation) { String id = operation.getString("object").getValue(); MongoCursor cursor = entities.getCursor(id); diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java index b45ff5ea4bf..7ee16484df1 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java @@ -21,10 +21,6 @@ import com.mongodb.MongoNamespace; import com.mongodb.ReadPreference; import com.mongodb.UnixServerAddress; -import com.mongodb.client.unified.UnifiedTestModifications.TestDef; -import com.mongodb.event.TestServerMonitorListener; -import com.mongodb.internal.logging.LogMessage; -import com.mongodb.logging.TestLoggingInterceptor; import com.mongodb.WriteConcern; import com.mongodb.client.ClientSession; import com.mongodb.client.MongoClient; @@ -32,16 +28,20 @@ import com.mongodb.client.gridfs.GridFSBucket; import com.mongodb.client.model.Filters; import com.mongodb.client.test.CollectionHelper; +import com.mongodb.client.unified.UnifiedTestModifications.TestDef; import com.mongodb.client.vault.ClientEncryption; import com.mongodb.connection.ClusterDescription; import com.mongodb.connection.ClusterType; import com.mongodb.connection.ServerDescription; import com.mongodb.event.CommandEvent; import com.mongodb.event.CommandStartedEvent; +import com.mongodb.event.TestServerMonitorListener; import com.mongodb.internal.connection.TestCommandListener; import com.mongodb.internal.connection.TestConnectionPoolListener; +import com.mongodb.internal.logging.LogMessage; import com.mongodb.lang.NonNull; import com.mongodb.lang.Nullable; +import com.mongodb.logging.TestLoggingInterceptor; import com.mongodb.test.AfterBeforeParameterResolver; import org.bson.BsonArray; import org.bson.BsonBoolean; @@ -245,7 +245,8 @@ public void setUp( || schemaVersion.equals("1.16") || schemaVersion.equals("1.17") || schemaVersion.equals("1.18") - || schemaVersion.equals("1.19"), + || schemaVersion.equals("1.19") + || schemaVersion.equals("1.21"), String.format("Unsupported schema version %s", schemaVersion)); if (runOnRequirements != null) { assumeTrue(runOnRequirementsMet(runOnRequirements, getMongoClientSettings(), getServerVersion()), @@ -571,6 +572,8 @@ private OperationResult executeOperation(final UnifiedTestContext context, final return crudHelper.createFindCursor(operation); case "createChangeStream": return crudHelper.createChangeStreamCursor(operation); + case "clientBulkWrite": + return crudHelper.clientBulkWrite(operation); case "close": return crudHelper.close(operation); case "iterateUntilDocumentOrError": diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestModifications.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestModifications.java index 0b2f4a6a2d5..9e1b4e6ed81 100644 --- a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestModifications.java +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestModifications.java @@ -45,7 +45,6 @@ public static void doSkips(final TestDef def) { .directory("atlas-data-lake-testing"); // change-streams - def.skipNoncompliantReactive("error required from change stream initialization") // TODO reason? .test("change-streams", "change-streams", "Test with document comment - pre 4.4"); def.skipNoncompliantReactive("event sensitive tests. We can't guarantee the amount of GetMore commands sent in the reactive driver") @@ -189,24 +188,11 @@ public static void doSkips(final TestDef def) { .test("retryable-writes", "findOneAndDelete-errorLabels", "FindOneAndDelete succeeds after WriteConcernError ShutdownInProgress") .test("retryable-writes", "findOneAndReplace-errorLabels", "FindOneAndReplace succeeds after WriteConcernError ShutdownInProgress") //.testContains("retryable-writes", "succeeds after retryable writeConcernError") - .test("retryable-writes", "client bulkWrite retryable writes", "client bulkWrite with no multi: true operations succeeds after retryable writeConcernError") .test("retryable-writes", "retryable-writes insertOne serverErrors", "InsertOne succeeds after retryable writeConcernError") .test("retryable-writes", "retryable-writes bulkWrite serverErrors", "BulkWrite succeeds after retryable writeConcernError in first batch"); def.skipJira("https://jira.mongodb.org/browse/JAVA-5341") .when(() -> isDiscoverableReplicaSet() && serverVersionLessThan(4, 4)) .test("retryable-writes", "retryable-writes insertOne serverErrors", "RetryableWriteError label is added based on writeConcernError in pre-4.4 mongod response"); - def.skipJira("https://jira.mongodb.org/browse/JAVA-4586") - //.testContains("retryable-writes", "client bulkWrite") - .test("retryable-writes", "client bulkWrite retryable writes", "client bulkWrite with no multi: true operations succeeds after retryable top-level error") - .test("retryable-writes", "client bulkWrite retryable writes", "client bulkWrite with multi: true operations fails after retryable top-level error") - .test("retryable-writes", "client bulkWrite retryable writes", "client bulkWrite with no multi: true operations succeeds after retryable writeConcernError") - .test("retryable-writes", "client bulkWrite retryable writes", "client bulkWrite with multi: true operations fails after retryable writeConcernError") - .test("retryable-writes", "client bulkWrite retryable writes", "client bulkWrite with retryWrites: false does not retry") - .test("retryable-writes", "client bulkWrite retryable writes with client errors", "client bulkWrite with one network error succeeds after retry") - .test("retryable-writes", "client bulkWrite retryable writes with client errors", "client bulkWrite with two network errors fails after retry") - //.testContains("retryable-writes", "client.clientBulkWrite") - .test("retryable-writes", "retryable writes handshake failures", "client.clientBulkWrite succeeds after retryable handshake network error") - .test("retryable-writes", "retryable writes handshake failures", "client.clientBulkWrite succeeds after retryable handshake server error (ShutdownInProgress)"); // server-discovery-and-monitoring (SDAM) @@ -238,6 +224,9 @@ public static void doSkips(final TestDef def) { .test("transactions", "read-concern", "only first distinct includes readConcern") .test("transactions", "read-concern", "distinct ignores collection readConcern") .test("transactions", "reads", "distinct"); + def.skipNoncompliant("`MongoCluster.getWriteConcern`/`MongoCollection.getWriteConcern` are silently ignored in a transaction") + .test("transactions", "client bulkWrite transactions", + "client bulkWrite with writeConcern in a transaction causes a transaction error"); // valid-pass diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy index 8293b6a1599..8a38f966754 100644 --- a/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy @@ -27,6 +27,7 @@ import com.mongodb.internal.TimeoutContext import com.mongodb.internal.bulk.InsertRequest import com.mongodb.internal.bulk.WriteRequestWithIndex import com.mongodb.internal.connection.Connection +import com.mongodb.internal.connection.MessageSequences import com.mongodb.internal.connection.SplittablePayload import com.mongodb.internal.time.Timeout import com.mongodb.internal.validator.NoOpFieldNameValidator @@ -96,7 +97,7 @@ class CryptConnectionSpecification extends Specification { encryptedCommand } 1 * wrappedConnection.command('db', encryptedCommand, _ as NoOpFieldNameValidator, ReadPreference.primary(), - _ as RawBsonDocumentCodec, operationContext, true, null, null) >> { + _ as RawBsonDocumentCodec, operationContext, true, MessageSequences.EmptyMessageSequences.INSTANCE) >> { encryptedResponse } 1 * crypt.decrypt(encryptedResponse, operationTimeout) >> { @@ -115,7 +116,7 @@ class CryptConnectionSpecification extends Specification { def payload = new SplittablePayload(INSERT, [ new BsonDocumentWrapper(new Document('_id', 1).append('ssid', '555-55-5555').append('b', bytes), codec), new BsonDocumentWrapper(new Document('_id', 2).append('ssid', '666-66-6666').append('b', bytes), codec) - ].withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true) + ].withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true, NoOpFieldNameValidator.INSTANCE) def encryptedCommand = toRaw(new BsonDocument('insert', new BsonString('test')).append('documents', new BsonArray( [ new BsonDocument('_id', new BsonInt32(1)) @@ -133,8 +134,7 @@ class CryptConnectionSpecification extends Specification { when: def response = cryptConnection.command('db', new BsonDocumentWrapper(new Document('insert', 'test'), codec), - NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), - operationContext, true, payload, NoOpFieldNameValidator.INSTANCE,) + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), operationContext, true, payload) then: _ * wrappedConnection.getDescription() >> { @@ -151,7 +151,7 @@ class CryptConnectionSpecification extends Specification { encryptedCommand } 1 * wrappedConnection.command('db', encryptedCommand, _ as NoOpFieldNameValidator, ReadPreference.primary(), - _ as RawBsonDocumentCodec, operationContext, true, null, null,) >> { + _ as RawBsonDocumentCodec, operationContext, true, MessageSequences.EmptyMessageSequences.INSTANCE) >> { encryptedResponse } 1 * crypt.decrypt(encryptedResponse, operationTimeout) >> { @@ -172,7 +172,7 @@ class CryptConnectionSpecification extends Specification { new BsonDocumentWrapper(new Document('_id', 1), codec), new BsonDocumentWrapper(new Document('_id', 2), codec), new BsonDocumentWrapper(new Document('_id', 3), codec) - ].withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true) + ].withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true, NoOpFieldNameValidator.INSTANCE) def encryptedCommand = toRaw(new BsonDocument('insert', new BsonString('test')).append('documents', new BsonArray( [ new BsonDocument('_id', new BsonInt32(1)), @@ -190,8 +190,7 @@ class CryptConnectionSpecification extends Specification { when: def response = cryptConnection.command('db', new BsonDocumentWrapper(new Document('insert', 'test'), codec), - NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), operationContext, true, payload, - NoOpFieldNameValidator.INSTANCE) + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), operationContext, true, payload) then: _ * wrappedConnection.getDescription() >> { @@ -207,7 +206,7 @@ class CryptConnectionSpecification extends Specification { encryptedCommand } 1 * wrappedConnection.command('db', encryptedCommand, _ as NoOpFieldNameValidator, ReadPreference.primary(), - _ as RawBsonDocumentCodec, operationContext, true, null, null,) >> { + _ as RawBsonDocumentCodec, operationContext, true, MessageSequences.EmptyMessageSequences.INSTANCE) >> { encryptedResponse } 1 * crypt.decrypt(encryptedResponse, operationTimeout) >> {