|
16 | 16 |
|
17 | 17 | package com.mongodb.internal.operation
|
18 | 18 |
|
| 19 | +import com.mongodb.MongoBulkWriteException |
19 | 20 | import com.mongodb.MongoNamespace
|
20 | 21 | import com.mongodb.ReadConcern
|
21 | 22 | import com.mongodb.ServerAddress
|
@@ -228,23 +229,71 @@ class BulkWriteBatchSpecification extends Specification {
|
228 | 229 | !bulkWriteBatch.hasAnotherBatch()
|
229 | 230 | }
|
230 | 231 |
|
231 |
| - def 'should only map inserts up to the payload position'() { |
| 232 | + def 'should map all inserted ids'() { |
232 | 233 | when:
|
233 | 234 | def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, serverDescription, connectionDescription, false,
|
234 |
| - WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[3..4], sessionContext) |
| 235 | + WriteConcern.ACKNOWLEDGED, null, false, |
| 236 | + [new InsertRequest(toBsonDocument('{_id: 0}')), |
| 237 | + new InsertRequest(toBsonDocument('{_id: 1}')), |
| 238 | + new InsertRequest(toBsonDocument('{_id: 2}')) |
| 239 | + ], |
| 240 | + sessionContext) |
235 | 241 | def payload = bulkWriteBatch.getPayload()
|
236 | 242 | payload.setPosition(1)
|
| 243 | + payload.insertedIds.put(0, new BsonInt32(0)) |
237 | 244 | bulkWriteBatch.addResult(BsonDocument.parse('{"n": 1, "ok": 1.0}'))
|
238 | 245 |
|
239 | 246 | then:
|
240 |
| - bulkWriteBatch.getResult().inserts == [new BulkWriteInsert(0, null)] |
| 247 | + bulkWriteBatch.getResult().inserts == [new BulkWriteInsert(0, new BsonInt32(0))] |
241 | 248 |
|
242 | 249 | when:
|
243 |
| - payload.setPosition(2) |
| 250 | + bulkWriteBatch = bulkWriteBatch.getNextBatch() |
| 251 | + payload = bulkWriteBatch.getPayload() |
| 252 | + payload.setPosition(1) |
| 253 | + payload.insertedIds.put(1, new BsonInt32(1)) |
| 254 | + bulkWriteBatch.addResult(BsonDocument.parse('{"n": 1, "ok": 1.0}')) |
| 255 | + |
| 256 | + then: |
| 257 | + bulkWriteBatch.getResult().inserts == [new BulkWriteInsert(0, new BsonInt32(0)), |
| 258 | + new BulkWriteInsert(1, new BsonInt32(1))] |
| 259 | + |
| 260 | + when: |
| 261 | + bulkWriteBatch = bulkWriteBatch.getNextBatch() |
| 262 | + payload = bulkWriteBatch.getPayload() |
| 263 | + payload.setPosition(1) |
| 264 | + payload.insertedIds.put(2, new BsonInt32(2)) |
244 | 265 | bulkWriteBatch.addResult(BsonDocument.parse('{"n": 1, "ok": 1.0}'))
|
245 | 266 |
|
246 | 267 | then:
|
247 |
| - bulkWriteBatch.getResult().inserts == [new BulkWriteInsert(0, null), new BulkWriteInsert(1, null)] |
| 268 | + bulkWriteBatch.getResult().inserts == [new BulkWriteInsert(0, new BsonInt32(0)), |
| 269 | + new BulkWriteInsert(1, new BsonInt32(1)), |
| 270 | + new BulkWriteInsert(2, new BsonInt32(2))] |
| 271 | + } |
| 272 | + |
| 273 | + def 'should not map inserted id with a write error'() { |
| 274 | + given: |
| 275 | + def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, serverDescription, connectionDescription, false, |
| 276 | + WriteConcern.ACKNOWLEDGED, null, false, |
| 277 | + [new InsertRequest(toBsonDocument('{_id: 0}')), |
| 278 | + new InsertRequest(toBsonDocument('{_id: 1}')), |
| 279 | + new InsertRequest(toBsonDocument('{_id: 2}')) |
| 280 | + ], |
| 281 | + sessionContext) |
| 282 | + def payload = bulkWriteBatch.getPayload() |
| 283 | + payload.setPosition(3) |
| 284 | + payload.insertedIds.put(0, new BsonInt32(0)) |
| 285 | + payload.insertedIds.put(1, new BsonInt32(1)) |
| 286 | + payload.insertedIds.put(2, new BsonInt32(2)) |
| 287 | + |
| 288 | + when: |
| 289 | + bulkWriteBatch.addResult(toBsonDocument('''{"ok": 1, "n": 2, |
| 290 | + "writeErrors": [{ "index" : 1, "code" : 11000, "errmsg": "duplicate key error"}] }''')) |
| 291 | + bulkWriteBatch.getResult() |
| 292 | + |
| 293 | + then: |
| 294 | + def ex = thrown(MongoBulkWriteException) |
| 295 | + ex.getWriteResult().inserts == [new BulkWriteInsert(0, new BsonInt32(0)), |
| 296 | + new BulkWriteInsert(2, new BsonInt32(2))] |
248 | 297 | }
|
249 | 298 |
|
250 | 299 | def 'should not retry when at least one write is not retryable'() {
|
|
0 commit comments