From d1fb2c3135adc0ef611f8700af3ea17b3385b6b7 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 16 Jul 2014 09:32:27 -0300 Subject: [PATCH 001/305] Move package to gopkg.in. --- auth.go | 3 ++- auth_test.go | 11 ++++++----- bson/bson_test.go | 5 +++-- bulk_test.go | 14 +++++++++----- cluster.go | 3 ++- cluster_test.go | 7 ++++--- gridfs.go | 3 ++- gridfs_test.go | 7 ++++--- queue_test.go | 19 ++++++++----------- saslimpl.go | 2 +- server.go | 3 ++- session.go | 4 ++-- session_test.go | 9 +++++---- socket.go | 3 ++- suite_test.go | 8 ++++---- txn/debug.go | 3 ++- txn/flusher.go | 13 +++++++------ txn/mgo_test.go | 4 ++-- txn/sim_test.go | 8 ++++---- txn/tarjan.go | 2 +- txn/tarjan_test.go | 4 ++-- txn/txn.go | 4 ++-- txn/txn_test.go | 8 ++++---- 23 files changed, 80 insertions(+), 67 deletions(-) diff --git a/auth.go b/auth.go index 7f3ba8c30..84c44afe3 100644 --- a/auth.go +++ b/auth.go @@ -31,8 +31,9 @@ import ( "encoding/hex" "errors" "fmt" - "labix.org/v2/mgo/bson" "sync" + + "gopkg.in/mgo.v2/bson" ) type authCmd struct { diff --git a/auth_test.go b/auth_test.go index 07080ca4a..ba09754cd 100644 --- a/auth_test.go +++ b/auth_test.go @@ -29,11 +29,12 @@ package mgo_test import ( "flag" "fmt" - "labix.org/v2/mgo" - . "launchpad.net/gocheck" "net/url" "sync" "time" + + . "gopkg.in/check.v1" + "gopkg.in/mgo.v2" ) func (s *S) TestAuthLoginDatabase(c *C) { @@ -848,9 +849,9 @@ func (s *S) TestAuthPlainCred(c *C) { c.Skip("no -plain") } cred := &mgo.Credential{ - Username: plainUser, - Password: plainPass, - Source: "$external", + Username: plainUser, + Password: plainPass, + Source: "$external", Mechanism: "PLAIN", } c.Logf("Connecting to %s...", *plainFlag) diff --git a/bson/bson_test.go b/bson/bson_test.go index 1263e97a1..c29136da8 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -31,12 +31,13 @@ import ( "encoding/binary" "encoding/json" "errors" - "labix.org/v2/mgo/bson" - . "launchpad.net/gocheck" "net/url" "reflect" "testing" "time" + + . "gopkg.in/check.v1" + "gopkg.in/mgo.v2/bson" ) func TestAll(t *testing.T) { diff --git a/bulk_test.go b/bulk_test.go index f8abca80a..24af1b102 100644 --- a/bulk_test.go +++ b/bulk_test.go @@ -27,8 +27,8 @@ package mgo_test import ( - "labix.org/v2/mgo" - . "launchpad.net/gocheck" + . "gopkg.in/check.v1" + "gopkg.in/mgo.v2" ) func (s *S) TestBulkInsert(c *C) { @@ -44,7 +44,7 @@ func (s *S) TestBulkInsert(c *C) { c.Assert(err, IsNil) c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) - type doc struct{ N int } + type doc struct{ N int } var res []doc err = coll.Find(nil).Sort("n").All(&res) c.Assert(err, IsNil) @@ -62,7 +62,9 @@ func (s *S) TestBulkInsertError(c *C) { _, err = bulk.Run() c.Assert(err, ErrorMatches, ".*duplicate key.*") - type doc struct{ N int `_id` } + type doc struct { + N int `_id` + } var res []doc err = coll.Find(nil).Sort("_id").All(&res) c.Assert(err, IsNil) @@ -81,7 +83,9 @@ func (s *S) TestBulkInsertErrorUnordered(c *C) { _, err = bulk.Run() c.Assert(err, ErrorMatches, ".*duplicate key.*") - type doc struct{ N int `_id` } + type doc struct { + N int `_id` + } var res []doc err = coll.Find(nil).Sort("_id").All(&res) c.Assert(err, IsNil) diff --git a/cluster.go b/cluster.go index b4ed5bf3e..60a69aa25 100644 --- a/cluster.go +++ b/cluster.go @@ -28,10 +28,11 @@ package mgo import ( "errors" - "labix.org/v2/mgo/bson" "net" "sync" "time" + + "gopkg.in/mgo.v2/bson" ) // --------------------------------------------------------------------------- diff --git a/cluster_test.go b/cluster_test.go index d6d2810b7..54f44908e 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -29,13 +29,14 @@ package mgo_test import ( "fmt" "io" - "labix.org/v2/mgo" - "labix.org/v2/mgo/bson" - . "launchpad.net/gocheck" "net" "strings" "sync" "time" + + . "gopkg.in/check.v1" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" ) func (s *S) TestNewSession(c *C) { diff --git a/gridfs.go b/gridfs.go index 312f8fb02..ce1b7f9f8 100644 --- a/gridfs.go +++ b/gridfs.go @@ -32,10 +32,11 @@ import ( "errors" "hash" "io" - "labix.org/v2/mgo/bson" "os" "sync" "time" + + "gopkg.in/mgo.v2/bson" ) type GridFS struct { diff --git a/gridfs_test.go b/gridfs_test.go index fbdd5b0de..252d8f968 100644 --- a/gridfs_test.go +++ b/gridfs_test.go @@ -28,11 +28,12 @@ package mgo_test import ( "io" - "labix.org/v2/mgo" - "labix.org/v2/mgo/bson" - . "launchpad.net/gocheck" "os" "time" + + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + . "gopkg.in/check.v1" ) func (s *S) TestGridFSCreate(c *C) { diff --git a/queue_test.go b/queue_test.go index 38b032531..bd0ab550f 100644 --- a/queue_test.go +++ b/queue_test.go @@ -27,21 +27,21 @@ package mgo import ( - "launchpad.net/gocheck" + . "gopkg.in/check.v1" ) type QS struct{} -var _ = gocheck.Suite(&QS{}) +var _ = Suite(&QS{}) -func (s *QS) TestSequentialGrowth(c *gocheck.C) { +func (s *QS) TestSequentialGrowth(c *C) { q := queue{} n := 2048 for i := 0; i != n; i++ { q.Push(i) } for i := 0; i != n; i++ { - c.Assert(q.Pop(), gocheck.Equals, i) + c.Assert(q.Pop(), Equals, i) } } @@ -61,7 +61,7 @@ var queueTestLists = [][]int{ 0, 1, 2, 3, 4, 5, 6, 7, 8}, } -func (s *QS) TestQueueTestLists(c *gocheck.C) { +func (s *QS) TestQueueTestLists(c *C) { test := []int{} testi := 0 reset := func() { @@ -85,8 +85,7 @@ func (s *QS) TestQueueTestLists(c *gocheck.C) { q := queue{} for _, n := range list { if n == -1 { - c.Assert(q.Pop(), gocheck.Equals, pop(), - gocheck.Commentf("With list %#v", list)) + c.Assert(q.Pop(), Equals, pop(), Commentf("With list %#v", list)) } else { q.Push(n) push(n) @@ -94,11 +93,9 @@ func (s *QS) TestQueueTestLists(c *gocheck.C) { } for n := pop(); n != -1; n = pop() { - c.Assert(q.Pop(), gocheck.Equals, n, - gocheck.Commentf("With list %#v", list)) + c.Assert(q.Pop(), Equals, n, Commentf("With list %#v", list)) } - c.Assert(q.Pop(), gocheck.Equals, nil, - gocheck.Commentf("With list %#v", list)) + c.Assert(q.Pop(), Equals, nil, Commentf("With list %#v", list)) } } diff --git a/saslimpl.go b/saslimpl.go index 3b255def6..58c0891c6 100644 --- a/saslimpl.go +++ b/saslimpl.go @@ -3,7 +3,7 @@ package mgo import ( - "labix.org/v2/mgo/sasl" + "gopkg.in/mgo.v2/sasl" ) func saslNew(cred Credential, host string) (saslStepper, error) { diff --git a/server.go b/server.go index d61f01801..cc880e223 100644 --- a/server.go +++ b/server.go @@ -28,11 +28,12 @@ package mgo import ( "errors" - "labix.org/v2/mgo/bson" "net" "sort" "sync" "time" + + "gopkg.in/mgo.v2/bson" ) // --------------------------------------------------------------------------- diff --git a/session.go b/session.go index 0ca86e869..ab9571976 100644 --- a/session.go +++ b/session.go @@ -31,7 +31,6 @@ import ( "encoding/hex" "errors" "fmt" - "labix.org/v2/mgo/bson" "math" "net" "net/url" @@ -41,6 +40,8 @@ import ( "strings" "sync" "time" + + "gopkg.in/mgo.v2/bson" ) type mode int @@ -2278,7 +2279,6 @@ func checkQueryError(fullname string, d []byte) error { Error: result := &queryError{} bson.Unmarshal(d, result) - logf("queryError: %#v\n", result) if result.LastError != nil { return result.LastError } diff --git a/session_test.go b/session_test.go index e9f90f56b..4421e9953 100644 --- a/session_test.go +++ b/session_test.go @@ -29,9 +29,6 @@ package mgo_test import ( "flag" "fmt" - "labix.org/v2/mgo" - "labix.org/v2/mgo/bson" - . "launchpad.net/gocheck" "math" "reflect" "runtime" @@ -39,6 +36,10 @@ import ( "strconv" "strings" "time" + + . "gopkg.in/check.v1" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" ) func (s *S) TestRunString(c *C) { @@ -764,7 +765,7 @@ func (s *S) TestIsDupValues(c *C) { c.Assert(mgo.IsDup(&mgo.QueryError{Code: 11001}), Equals, true) c.Assert(mgo.IsDup(&mgo.LastError{Code: 12582}), Equals, true) c.Assert(mgo.IsDup(&mgo.QueryError{Code: 12582}), Equals, true) - lerr := &mgo.LastError{Code: 16460, Err:"error inserting 1 documents to shard ... caused by :: E11000 duplicate key error index: ..."} + lerr := &mgo.LastError{Code: 16460, Err: "error inserting 1 documents to shard ... caused by :: E11000 duplicate key error index: ..."} c.Assert(mgo.IsDup(lerr), Equals, true) } diff --git a/socket.go b/socket.go index 97c2fd7bc..227749fdd 100644 --- a/socket.go +++ b/socket.go @@ -28,10 +28,11 @@ package mgo import ( "errors" - "labix.org/v2/mgo/bson" "net" "sync" "time" + + "gopkg.in/mgo.v2/bson" ) type replyFunc func(err error, reply *replyOp, docNum int, docData []byte) diff --git a/suite_test.go b/suite_test.go index a846c5134..3f6b8f7f1 100644 --- a/suite_test.go +++ b/suite_test.go @@ -30,16 +30,16 @@ import ( "errors" "flag" "fmt" - "labix.org/v2/mgo" - "labix.org/v2/mgo/bson" - . "launchpad.net/gocheck" "net" "os/exec" "strconv" "syscall" - "testing" "time" + + . "gopkg.in/check.v1" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" ) var fast = flag.Bool("fast", false, "Skip slow tests") diff --git a/txn/debug.go b/txn/debug.go index 7f67f4e3f..8224bb313 100644 --- a/txn/debug.go +++ b/txn/debug.go @@ -3,9 +3,10 @@ package txn import ( "bytes" "fmt" - "labix.org/v2/mgo/bson" "sort" "sync/atomic" + + "gopkg.in/mgo.v2/bson" ) var ( diff --git a/txn/flusher.go b/txn/flusher.go index 846eefec1..d5e9f4528 100644 --- a/txn/flusher.go +++ b/txn/flusher.go @@ -2,9 +2,10 @@ package txn import ( "fmt" - "labix.org/v2/mgo" - "labix.org/v2/mgo/bson" "sort" + + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" ) func flush(r *Runner, t *transaction) error { @@ -357,11 +358,11 @@ NextDoc: drevno := revno[dkey] switch { case op.Insert != nil && drevno < 0: - revno[dkey] = -drevno+1 + revno[dkey] = -drevno + 1 case op.Update != nil && drevno >= 0: - revno[dkey] = drevno+1 + revno[dkey] = drevno + 1 case op.Remove && drevno >= 0: - revno[dkey] = -drevno-1 + revno[dkey] = -drevno - 1 } } if !prereqs { @@ -745,7 +746,7 @@ func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) err case op.Update != nil: if revno < 0 { err = mgo.ErrNotFound - f.debugf("Won't try to apply update op; negative revision means the document is missing or stashed"); + f.debugf("Won't try to apply update op; negative revision means the document is missing or stashed") } else { newRevno := revno + 1 logRevnos[i] = newRevno diff --git a/txn/mgo_test.go b/txn/mgo_test.go index ce5d9d0ec..5abc47335 100644 --- a/txn/mgo_test.go +++ b/txn/mgo_test.go @@ -2,8 +2,8 @@ package txn_test import ( "bytes" - "labix.org/v2/mgo" - . "launchpad.net/gocheck" + "gopkg.in/mgo.v2" + . "gopkg.in/check.v1" "os/exec" "time" ) diff --git a/txn/sim_test.go b/txn/sim_test.go index 312eed999..35f7048cc 100644 --- a/txn/sim_test.go +++ b/txn/sim_test.go @@ -2,10 +2,10 @@ package txn_test import ( "flag" - "labix.org/v2/mgo" - "labix.org/v2/mgo/bson" - "labix.org/v2/mgo/txn" - . "launchpad.net/gocheck" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2/txn" + . "gopkg.in/check.v1" "math/rand" "time" ) diff --git a/txn/tarjan.go b/txn/tarjan.go index a5ac0b02b..25cce5f5f 100644 --- a/txn/tarjan.go +++ b/txn/tarjan.go @@ -1,7 +1,7 @@ package txn import ( - "labix.org/v2/mgo/bson" + "gopkg.in/mgo.v2/bson" "sort" ) diff --git a/txn/tarjan_test.go b/txn/tarjan_test.go index c422605cf..9586ce067 100644 --- a/txn/tarjan_test.go +++ b/txn/tarjan_test.go @@ -2,8 +2,8 @@ package txn import ( "fmt" - "labix.org/v2/mgo/bson" - . "launchpad.net/gocheck" + "gopkg.in/mgo.v2/bson" + . "gopkg.in/check.v1" ) type TarjanSuite struct{} diff --git a/txn/txn.go b/txn/txn.go index e798e65c2..a235f9032 100644 --- a/txn/txn.go +++ b/txn/txn.go @@ -9,8 +9,8 @@ package txn import ( "encoding/binary" "fmt" - "labix.org/v2/mgo" - "labix.org/v2/mgo/bson" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" "reflect" "sort" "sync" diff --git a/txn/txn_test.go b/txn/txn_test.go index b716783d2..a3aa07679 100644 --- a/txn/txn_test.go +++ b/txn/txn_test.go @@ -1,10 +1,10 @@ package txn_test import ( - "labix.org/v2/mgo" - "labix.org/v2/mgo/bson" - "labix.org/v2/mgo/txn" - . "launchpad.net/gocheck" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2/txn" + . "gopkg.in/check.v1" "testing" ) From 21aa1e8b8421d6a0db878434219ff40d64524aef Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 16 Jul 2014 09:46:38 -0300 Subject: [PATCH 002/305] Fix UserSource incompatibility message for 2.6. --- session.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/session.go b/session.go index ab9571976..c542a0689 100644 --- a/session.go +++ b/session.go @@ -804,10 +804,6 @@ func isNoCmd(err error) bool { } func (db *Database) runUserCmd(cmdName string, user *User) error { - //if user.UserSource != "" && (user.UserSource != "$external" || db.Name != "$external") { - // return fmt.Errorf("MongoDB 2.6+ does not support the UserSource setting") - //} - cmd := make(bson.D, 0, 16) cmd = append(cmd, bson.DocElem{cmdName, user.Username}) if user.Password != "" { @@ -825,7 +821,11 @@ func (db *Database) runUserCmd(cmdName string, user *User) error { if roles != nil || user.Roles != nil || cmdName == "createUser" { cmd = append(cmd, bson.DocElem{"roles", roles}) } - return db.Run(cmd, nil) + err := db.Run(cmd, nil) + if !isNoCmd(err) && user.UserSource != "" && (user.UserSource != "$external" || db.Name != "$external") { + return fmt.Errorf("MongoDB 2.6+ does not support the UserSource setting") + } + return err } // AddUser creates or updates the authentication credentials of user within From f25ece59412eabb2707f1d9912ef04feb6496585 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 16 Jul 2014 09:54:24 -0300 Subject: [PATCH 003/305] Drop unreasonble test check. --- session_test.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/session_test.go b/session_test.go index 4421e9953..788035bd5 100644 --- a/session_test.go +++ b/session_test.go @@ -2774,11 +2774,6 @@ func (s *S) TestMapReduce(c *C) { c.Assert(item.Value, Equals, expected[item.Id]) expected[item.Id] = -1 } - - // Weak attempt of testing that Sort gets delivered. - _, err = coll.Find(nil).Sort("-n").MapReduce(job, &result) - _, isQueryError := err.(*mgo.QueryError) - c.Assert(isQueryError, Equals, true) } func (s *S) TestMapReduceFinalize(c *C) { From 381aac014b41a458de6b9a3530039677777347c0 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 16 Jul 2014 10:56:37 -0300 Subject: [PATCH 004/305] Test that properly RemoveUser returns ErrNotFound. --- auth_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/auth_test.go b/auth_test.go index ba09754cd..07b41f073 100644 --- a/auth_test.go +++ b/auth_test.go @@ -407,6 +407,8 @@ func (s *S) TestAuthRemoveUser(c *C) { c.Assert(err, IsNil) err = mydb.RemoveUser("myuser") c.Assert(err, IsNil) + err = mydb.RemoveUser("myuser") + c.Assert(err, Equals, mgo.ErrNotFound) err = mydb.Login("myuser", "mypass") c.Assert(err, ErrorMatches, "auth fail(s|ed)") From 22e23495b34a24145f3d3d159f01aabaa2fefd8e Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 16 Jul 2014 11:32:37 -0300 Subject: [PATCH 005/305] Return ErrNotFound on RemoveUser if using command. --- session.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/session.go b/session.go index c542a0689..e2a8a725d 100644 --- a/session.go +++ b/session.go @@ -754,7 +754,7 @@ func (db *Database) UpsertUser(user *User) error { rundb = db.Session.DB(user.UserSource) } err := rundb.runUserCmd("updateUser", user) - if e, ok := err.(*QueryError); ok && e.Code == 11 { + if isNotFound(err) { return rundb.runUserCmd("createUser", user) } if !isNoCmd(err) { @@ -803,6 +803,11 @@ func isNoCmd(err error) bool { return ok && strings.HasPrefix(e.Message, "no such cmd:") } +func isNotFound(err error) bool { + e, ok := err.(*QueryError) + return ok && e.Code == 11 +} + func (db *Database) runUserCmd(cmdName string, user *User) error { cmd := make(bson.D, 0, 16) cmd = append(cmd, bson.DocElem{cmdName, user.Username}) @@ -850,7 +855,7 @@ func (db *Database) AddUser(username, password string, readOnly bool) error { } } err := db.runUserCmd("updateUser", user) - if e, ok := err.(*QueryError); ok && e.Code == 11 { + if isNotFound(err) { return db.runUserCmd("createUser", user) } if !isNoCmd(err) { @@ -873,6 +878,9 @@ func (db *Database) RemoveUser(user string) error { users := db.C("system.users") return users.Remove(bson.M{"user": user}) } + if isNotFound(err) { + return ErrNotFound + } return err } From d2115e25399b76fcbea78af352af16d170ce1b80 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 21 Jul 2014 00:46:35 -0300 Subject: [PATCH 006/305] Drop .bzrignore --- .bzrignore | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 .bzrignore diff --git a/.bzrignore b/.bzrignore deleted file mode 100644 index 340cde711..000000000 --- a/.bzrignore +++ /dev/null @@ -1,2 +0,0 @@ -_* -[856].out From 9ba6884b7a264e3f00cdb530df2417138f918a58 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 21 Jul 2014 00:52:10 -0300 Subject: [PATCH 007/305] bson: marshal json.Number as either int or float Patch by Min-Young Wu, from Facebook. --- bson/bson_test.go | 11 +++++++++++ bson/encode.go | 19 ++++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/bson/bson_test.go b/bson/bson_test.go index c29136da8..d186543ab 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -308,6 +308,17 @@ var oneWayMarshalItems = []testItemType{ "\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"}, {bson.M{"": uint(1<<32 - 1)}, "\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"}, + + // Converting json.Number to an actual number + {bson.M{"_": json.Number("5.05")}, + "\x01_\x00333333\x14@"}, + {bson.M{"_": json.Number("258")}, + "\x12_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, + + // If the json.Number integer is too large for int64, parsing as int64 + // will fail and 'overflow' into a float64 + {bson.M{"_": json.Number("1267650600228229401496703205376")}, + "\x01_\x00\x00\x00\x00\x00\x00\x000F"}, } func (s *S) TestOneWayMarshalItems(c *C) { diff --git a/bson/encode.go b/bson/encode.go index 6ba383a23..c24a80013 100644 --- a/bson/encode.go +++ b/bson/encode.go @@ -28,6 +28,7 @@ package bson import ( + "encoding/json" "fmt" "math" "net/url" @@ -51,6 +52,7 @@ var ( typeURL = reflect.TypeOf(url.URL{}) typeTime = reflect.TypeOf(time.Time{}) typeString = reflect.TypeOf("") + typeJSONNumber = reflect.TypeOf(json.Number("")) ) const itoaCacheSize = 32 @@ -271,6 +273,17 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { case typeSymbol: e.addElemName('\x0E', name) e.addStr(s) + case typeJSONNumber: + n := v.Interface().(json.Number) + if i, err := n.Int64(); err == nil { + e.addElemName('\x12', name) + e.addInt64(i) + } else if f, err := n.Float64(); err == nil { + e.addElemName('\x01', name) + e.addFloat64(f) + } else { + panic("Failed to convert a json.Number to a number: " + s) + } default: e.addElemName('\x02', name) e.addStr(s) @@ -278,7 +291,7 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { case reflect.Float32, reflect.Float64: e.addElemName('\x01', name) - e.addInt64(int64(math.Float64bits(v.Float()))) + e.addFloat64(v.Float()) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: u := v.Uint() @@ -457,6 +470,10 @@ func (e *encoder) addInt64(v int64) { byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56)) } +func (e *encoder) addFloat64(v float64) { + e.addInt64(int64(math.Float64bits(v))) +} + func (e *encoder) addBytes(v ...byte) { e.out = append(e.out, v...) } From db25ae1e93a0f67babae134fb03b1896b7ff162c Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 21 Jul 2014 01:24:09 -0300 Subject: [PATCH 008/305] bson: decode json.Number to ensure compatibility --- bson/bson_test.go | 16 +++++----------- bson/decode.go | 13 ++++++++++++- bson/encode.go | 2 +- 3 files changed, 18 insertions(+), 13 deletions(-) diff --git a/bson/bson_test.go b/bson/bson_test.go index d186543ab..3d9799850 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -308,17 +308,6 @@ var oneWayMarshalItems = []testItemType{ "\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"}, {bson.M{"": uint(1<<32 - 1)}, "\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"}, - - // Converting json.Number to an actual number - {bson.M{"_": json.Number("5.05")}, - "\x01_\x00333333\x14@"}, - {bson.M{"_": json.Number("258")}, - "\x12_\x00\x02\x01\x00\x00\x00\x00\x00\x00"}, - - // If the json.Number integer is too large for int64, parsing as int64 - // will fail and 'overflow' into a float64 - {bson.M{"_": json.Number("1267650600228229401496703205376")}, - "\x01_\x00\x00\x00\x00\x00\x00\x000F"}, } func (s *S) TestOneWayMarshalItems(c *C) { @@ -1248,6 +1237,11 @@ var twoWayCrossItems = []crossTypeItem{ // bson.M <=> map[MyString] {bson.M{"a": bson.M{"b": 1, "c": 2}}, map[MyString]interface{}{"a": map[MyString]interface{}{"b": 1, "c": 2}}}, + + // json.Number <=> int64, float64 + {&struct{ N json.Number }{"5"}, map[string]interface{}{"n": int64(5)}}, + {&struct{ N json.Number }{"5.05"}, map[string]interface{}{"n": 5.05}}, + {&struct{ N json.Number }{"9223372036854776000"}, map[string]interface{}{"n": float64(1 << 63)}}, } // Same thing, but only one way (obj1 => obj2). diff --git a/bson/decode.go b/bson/decode.go index 1ec034ea6..f1c8b4f7c 100644 --- a/bson/decode.go +++ b/bson/decode.go @@ -32,6 +32,7 @@ import ( "math" "net/url" "reflect" + "strconv" "sync" "time" ) @@ -594,6 +595,16 @@ func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { out.SetString(string(b)) return true } + case reflect.Int, reflect.Int64: + if outt == typeJSONNumber { + out.SetString(strconv.FormatInt(inv.Int(), 10)) + return true + } + case reflect.Float64: + if outt == typeJSONNumber { + out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64)) + return true + } } case reflect.Slice, reflect.Array: // Remember, array (0x04) slices are built with the correct @@ -632,7 +643,7 @@ func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { } return true case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - panic("Can't happen. No uint types in BSON?") + panic("can't happen: no uint types in BSON (!?)") } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: switch inv.Kind() { diff --git a/bson/encode.go b/bson/encode.go index c24a80013..6544748cb 100644 --- a/bson/encode.go +++ b/bson/encode.go @@ -282,7 +282,7 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { e.addElemName('\x01', name) e.addFloat64(f) } else { - panic("Failed to convert a json.Number to a number: " + s) + panic("failed to convert json.Number to a number: " + s) } default: e.addElemName('\x02', name) From e15707acad38d8b51cf33bc892873c7262812f26 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 21 Jul 2014 01:36:40 -0300 Subject: [PATCH 009/305] Timeout after 10 seconds on address resolution. Fixes MGO-34, reported by Cailin Nelson. --- cluster.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/cluster.go b/cluster.go index 60a69aa25..85cc76af1 100644 --- a/cluster.go +++ b/cluster.go @@ -395,11 +395,14 @@ func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoSer } func resolveAddr(addr string) (*net.TCPAddr, error) { - tcpaddr, err := net.ResolveTCPAddr("tcp", addr) + // This hack allows having a timeout on resolution. + conn, err := net.DialTimeout("udp", addr, 10 * time.Second) if err != nil { - log("SYNC Failed to resolve ", addr, ": ", err.Error()) - return nil, err + log("SYNC Failed to resolve server address: ", addr) + return nil, errors.New("failed to resolve server address: " + addr) } + tcpaddr := (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr)) + conn.Close() if tcpaddr.String() != addr { debug("SYNC Address ", addr, " resolved as ", tcpaddr.String()) } From 114d7e7b1789ac06deb2ba16a96dff5f34f4c7b0 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 21 Jul 2014 11:28:12 -0300 Subject: [PATCH 010/305] Expose pool limit setting via Session.SetPoolLimit. --- cluster.go | 13 ++++++------ cluster_test.go | 55 +++++++++++++++++++++++++++++++++++++++++-------- export_test.go | 9 -------- server.go | 13 ++++++------ session.go | 25 ++++++++++++++++++++-- 5 files changed, 82 insertions(+), 33 deletions(-) diff --git a/cluster.go b/cluster.go index 85cc76af1..3891bbfee 100644 --- a/cluster.go +++ b/cluster.go @@ -515,12 +515,10 @@ func (cluster *mongoCluster) syncServersIteration(direct bool) { cluster.Unlock() } -var socketsPerServer = 4096 - // AcquireSocket returns a socket to a server in the cluster. If slaveOk is // true, it will attempt to return a socket to a slave server. If it is // false, the socket will necessarily be to a master server. -func (cluster *mongoCluster) AcquireSocket(slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D) (s *mongoSocket, err error) { +func (cluster *mongoCluster) AcquireSocket(slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) { var started time.Time var syncCount uint warnedLimit := false @@ -562,12 +560,13 @@ func (cluster *mongoCluster) AcquireSocket(slaveOk bool, syncTimeout time.Durati continue } - s, abended, err := server.AcquireSocket(socketsPerServer, socketTimeout) - if err == errSocketLimit { + s, abended, err := server.AcquireSocket(poolLimit, socketTimeout) + if err == errPoolLimit { if !warnedLimit { + warnedLimit = true log("WARNING: Per-server connection limit reached.") } - time.Sleep(1e8) + time.Sleep(100 * time.Millisecond) continue } if err != nil { @@ -582,7 +581,7 @@ func (cluster *mongoCluster) AcquireSocket(slaveOk bool, syncTimeout time.Durati logf("Cannot confirm server %s as master (%v)", server.Addr, err) s.Release() cluster.syncServers() - time.Sleep(1e8) + time.Sleep(100 * time.Millisecond) continue } } diff --git a/cluster_test.go b/cluster_test.go index 54f44908e..69519a2d9 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -1151,13 +1151,48 @@ func (s *S) TestRemovalOfClusterMember(c *C) { c.Log("========== Test succeeded. ==========") } -func (s *S) TestSocketLimit(c *C) { +func (s *S) TestPoolLimitSimple(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + stats := mgo.GetStats() + for stats.MasterConns+stats.SlaveConns != 1 { + stats = mgo.GetStats() + c.Log("Waiting for connection to be established...") + time.Sleep(100 * time.Millisecond) + } + + c.Assert(stats.SocketsAlive, Equals, 1) + c.Assert(stats.SocketsInUse, Equals, 0) + + // Put one socket in use. + c.Assert(session.Ping(), IsNil) + + done := make(chan time.Duration) + + // Now block trying to get another one due to the pool limit. + go func() { + copy := session.Copy() + defer copy.Close() + copy.SetPoolLimit(1) + started := time.Now() + c.Check(copy.Ping(), IsNil) + done <- time.Now().Sub(started) + }() + + time.Sleep(500 * time.Millisecond) + + // Put the one socket back in the pool, freeing it for the copy. + session.Refresh() + delay := <-done + c.Assert(delay > 500 * time.Millisecond, Equals, true, Commentf("Delay: %s", delay)) +} + +func (s *S) TestPoolLimitMany(c *C) { if *fast { c.Skip("-fast") } - const socketLimit = 64 - restore := mgo.HackSocketsPerServer(socketLimit) - defer restore() session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) @@ -1167,17 +1202,19 @@ func (s *S) TestSocketLimit(c *C) { for stats.MasterConns+stats.SlaveConns != 3 { stats = mgo.GetStats() c.Log("Waiting for all connections to be established...") - time.Sleep(5e8) + time.Sleep(500 * time.Millisecond) } c.Assert(stats.SocketsAlive, Equals, 3) + const poolLimit = 64 + session.SetPoolLimit(poolLimit) + // Consume the whole limit for the master. var master []*mgo.Session - for i := 0; i < socketLimit; i++ { + for i := 0; i < poolLimit; i++ { s := session.Copy() defer s.Close() - err := s.Ping() - c.Assert(err, IsNil) + c.Assert(s.Ping(), IsNil) master = append(master, s) } @@ -1187,7 +1224,7 @@ func (s *S) TestSocketLimit(c *C) { master[0].Refresh() }() - // Now a single ping must block, since it would need another + // Then, a single ping must block, since it would need another // connection to the master, over the limit. Once the goroutine // above releases its socket, it should move on. session.Ping() diff --git a/export_test.go b/export_test.go index b6bfcbc73..690f84d38 100644 --- a/export_test.go +++ b/export_test.go @@ -4,15 +4,6 @@ import ( "time" ) -func HackSocketsPerServer(newLimit int) (restore func()) { - oldLimit := newLimit - restore = func() { - socketsPerServer = oldLimit - } - socketsPerServer = newLimit - return -} - func HackPingDelay(newDelay time.Duration) (restore func()) { globalMutex.Lock() defer globalMutex.Unlock() diff --git a/server.go b/server.go index cc880e223..c86d22642 100644 --- a/server.go +++ b/server.go @@ -89,7 +89,7 @@ func newServer(addr string, tcpaddr *net.TCPAddr, sync chan bool, dial dialer) * return server } -var errSocketLimit = errors.New("per-server connection limit reached") +var errPoolLimit = errors.New("per-server connection limit reached") var errServerClosed = errors.New("server was closed") // AcquireSocket returns a socket for communicating with the server. @@ -97,9 +97,10 @@ var errServerClosed = errors.New("server was closed") // it will establish a new one. The returned socket is owned by the call site, // and will return to the cache when the socket has its Release method called // the same number of times as AcquireSocket + Acquire were called for it. -// If the limit argument is not zero, a socket will only be returned if the -// number of sockets in use for this server is under the provided limit. -func (server *mongoServer) AcquireSocket(limit int, timeout time.Duration) (socket *mongoSocket, abended bool, err error) { +// If the poolLimit argument is greater than zero and the number of sockets in +// use in this server is greater than the provided limit, errPoolLimit is +// returned. +func (server *mongoServer) AcquireSocket(poolLimit int, timeout time.Duration) (socket *mongoSocket, abended bool, err error) { for { server.Lock() abended = server.abended @@ -108,9 +109,9 @@ func (server *mongoServer) AcquireSocket(limit int, timeout time.Duration) (sock return nil, abended, errServerClosed } n := len(server.unusedSockets) - if limit > 0 && len(server.liveSockets)-n >= limit { + if poolLimit > 0 && len(server.liveSockets)-n >= poolLimit { server.Unlock() - return nil, false, errSocketLimit + return nil, false, errPoolLimit } if n > 0 { socket = server.unusedSockets[n-1] diff --git a/session.go b/session.go index e2a8a725d..50f367e7c 100644 --- a/session.go +++ b/session.go @@ -70,6 +70,7 @@ type Session struct { sourcedb string dialCred *Credential creds []Credential + poolLimit int } type Database struct { @@ -431,7 +432,12 @@ func parseURL(s string) (*urlInfo, error) { func newSession(consistency mode, cluster *mongoCluster, timeout time.Duration) (session *Session) { cluster.Acquire() - session = &Session{cluster_: cluster, syncTimeout: timeout, sockTimeout: timeout} + session = &Session{ + cluster_: cluster, + syncTimeout: timeout, + sockTimeout: timeout, + poolLimit: 4096, + } debugf("New session %p on cluster %p", session, cluster) session.SetMode(consistency, true) session.SetSafe(&Safe{}) @@ -1368,6 +1374,21 @@ func (s *Session) SetCursorTimeout(d time.Duration) { s.m.Unlock() } +// SetPoolLimit sets the maximum number of sockets in use in a single server +// before this session will block waiting for a socket to be available. +// The default limit is 4096. +// +// This limit must be set to cover more than any expected workload of the +// application. It is a bad practice and an unsupported use case to use the +// database driver to define the concurrency limit of an application. Prevent +// such concurrency "at the door" instead, by properly restricting the amount +// of used resources and number of goroutines before they are created. +func (s *Session) SetPoolLimit(limit int) { + s.m.Lock() + s.poolLimit = limit + s.m.Unlock() +} + // SetBatch sets the default batch size used when fetching documents from the // database. It's possible to change this setting on a per-query basis as // well, using the Query.Batch method. @@ -3365,7 +3386,7 @@ func (s *Session) acquireSocket(slaveOk bool) (*mongoSocket, error) { } // Still not good. We need a new socket. - sock, err := s.cluster().AcquireSocket(slaveOk && s.slaveOk, s.syncTimeout, s.sockTimeout, s.queryConfig.op.serverTags) + sock, err := s.cluster().AcquireSocket(slaveOk && s.slaveOk, s.syncTimeout, s.sockTimeout, s.queryConfig.op.serverTags, s.poolLimit) if err != nil { return nil, err } From 3116f72d0fe0216b75201cb3a1ae61c82c2a4d63 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 21 Jul 2014 12:14:10 -0300 Subject: [PATCH 011/305] Support standard maxPoolSize URL option. --- cluster_test.go | 59 ++++++++++++++++++++++++------------------------- session.go | 23 +++++++++++++++++-- 2 files changed, 50 insertions(+), 32 deletions(-) diff --git a/cluster_test.go b/cluster_test.go index 69519a2d9..7874cab51 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -1152,41 +1152,40 @@ func (s *S) TestRemovalOfClusterMember(c *C) { } func (s *S) TestPoolLimitSimple(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - stats := mgo.GetStats() - for stats.MasterConns+stats.SlaveConns != 1 { - stats = mgo.GetStats() - c.Log("Waiting for connection to be established...") - time.Sleep(100 * time.Millisecond) - } - - c.Assert(stats.SocketsAlive, Equals, 1) - c.Assert(stats.SocketsInUse, Equals, 0) + for test := 0; test < 2; test++ { + var session *mgo.Session + var err error + if test == 0 { + session, err = mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + session.SetPoolLimit(1) + } else { + session, err = mgo.Dial("localhost:40001?maxPoolSize=1") + c.Assert(err, IsNil) + } + defer session.Close() - // Put one socket in use. - c.Assert(session.Ping(), IsNil) + // Put one socket in use. + c.Assert(session.Ping(), IsNil) - done := make(chan time.Duration) + done := make(chan time.Duration) - // Now block trying to get another one due to the pool limit. - go func() { - copy := session.Copy() - defer copy.Close() - copy.SetPoolLimit(1) - started := time.Now() - c.Check(copy.Ping(), IsNil) - done <- time.Now().Sub(started) - }() + // Now block trying to get another one due to the pool limit. + go func() { + copy := session.Copy() + defer copy.Close() + started := time.Now() + c.Check(copy.Ping(), IsNil) + done <- time.Now().Sub(started) + }() - time.Sleep(500 * time.Millisecond) + time.Sleep(300 * time.Millisecond) - // Put the one socket back in the pool, freeing it for the copy. - session.Refresh() - delay := <-done - c.Assert(delay > 500 * time.Millisecond, Equals, true, Commentf("Delay: %s", delay)) + // Put the one socket back in the pool, freeing it for the copy. + session.Refresh() + delay := <-done + c.Assert(delay > 300 * time.Millisecond, Equals, true, Commentf("Delay: %s", delay)) + } } func (s *S) TestPoolLimitMany(c *C) { diff --git a/session.go b/session.go index 50f367e7c..dffeca798 100644 --- a/session.go +++ b/session.go @@ -186,8 +186,13 @@ const defaultPrefetch = 0.25 // // gssapiServiceName= // -// Defines the service name to use when authenticating with the GSSAPI -// mechanism. Defaults to "mongodb". +// Defines the service name to use when authenticating with the GSSAPI +// mechanism. Defaults to "mongodb". +// +// maxPoolSize= +// +// Defines the per-server socket pool limit. Defaults to 4096. +// See Session.SetPoolLimit for details. // // // Relevant documentation: @@ -218,6 +223,7 @@ func DialWithTimeout(url string, timeout time.Duration) (*Session, error) { mechanism := "" service := "" source := "" + poolLimit := 0 for k, v := range uinfo.options { switch k { case "authSource": @@ -226,6 +232,11 @@ func DialWithTimeout(url string, timeout time.Duration) (*Session, error) { mechanism = v case "gssapiServiceName": service = v + case "maxPoolSize": + poolLimit, err = strconv.Atoi(v) + if err != nil { + return nil, errors.New("bad value for maxPoolSize: " + v) + } case "connect": if v == "direct" { direct = true @@ -249,6 +260,7 @@ func DialWithTimeout(url string, timeout time.Duration) (*Session, error) { Mechanism: mechanism, Service: service, Source: source, + PoolLimit: poolLimit, } return DialWithInfo(&info) } @@ -300,6 +312,10 @@ type DialInfo struct { Username string Password string + // PoolLimit defines the per-server socket pool limit. Defaults to 4096. + // See Session.SetPoolLimit for details. + PoolLimit int + // DialServer optionally specifies the dial function for establishing // connections with the MongoDB servers. DialServer func(addr *ServerAddr) (net.Conn, error) @@ -363,6 +379,9 @@ func DialWithInfo(info *DialInfo) (*Session, error) { } session.creds = []Credential{*session.dialCred} } + if info.PoolLimit > 0 { + session.poolLimit = info.PoolLimit + } cluster.Release() // People get confused when we return a session that is not actually From 0b0a7993b947f14f67633b018c6fe7b4ebc6fd9e Mon Sep 17 00:00:00 2001 From: Ryan Cox Date: Tue, 29 Jul 2014 17:55:51 -0700 Subject: [PATCH 012/305] strip spaces from supervisorctl output --- testdb/setup.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testdb/setup.sh b/testdb/setup.sh index ab841dadb..27200c090 100755 --- a/testdb/setup.sh +++ b/testdb/setup.sh @@ -9,10 +9,10 @@ start() { chmod 600 keyfile echo "Running supervisord..." supervisord || ( echo "Supervisord failed executing ($?)" && exit 1 ) - COUNT=$(grep '^\[program' supervisord.conf | wc -l) + COUNT=$(grep '^\[program' supervisord.conf | wc -l | tr -d ' ') echo "Supervisord is up, starting $COUNT processes..." for i in $(seq 10); do - RUNNING=$(supervisorctl status | grep RUNNING | wc -l) + RUNNING=$(supervisorctl status | grep RUNNING | wc -l | tr -d ' ') echo "$RUNNING processes running..." if [ x$COUNT = x$RUNNING ]; then echo "Running setup.js with mongo..." From fe78d18b077631b8f97d323514fe68d394f7791c Mon Sep 17 00:00:00 2001 From: Ryan Cox Date: Tue, 29 Jul 2014 17:59:32 -0700 Subject: [PATCH 013/305] fix slowdown probability calc. was incorrectly using kill chance --- txn/chaos.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/txn/chaos.go b/txn/chaos.go index a9258faec..c98adb91d 100644 --- a/txn/chaos.go +++ b/txn/chaos.go @@ -46,7 +46,7 @@ func chaos(bpname string) { return } sc := chaosSetting.SlowdownChance - if sc > 0 && mrand.Intn(1000) < int(kc*1000) { + if sc > 0 && mrand.Intn(1000) < int(sc*1000) { time.Sleep(chaosSetting.Slowdown) } } From 89e4375b022ba275dd9e81a2e903ed4de4a8f2a0 Mon Sep 17 00:00:00 2001 From: Ryan Cox Date: Tue, 29 Jul 2014 17:55:51 -0700 Subject: [PATCH 014/305] strip spaces from supervisorctl output --- testdb/setup.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testdb/setup.sh b/testdb/setup.sh index ab841dadb..27200c090 100755 --- a/testdb/setup.sh +++ b/testdb/setup.sh @@ -9,10 +9,10 @@ start() { chmod 600 keyfile echo "Running supervisord..." supervisord || ( echo "Supervisord failed executing ($?)" && exit 1 ) - COUNT=$(grep '^\[program' supervisord.conf | wc -l) + COUNT=$(grep '^\[program' supervisord.conf | wc -l | tr -d ' ') echo "Supervisord is up, starting $COUNT processes..." for i in $(seq 10); do - RUNNING=$(supervisorctl status | grep RUNNING | wc -l) + RUNNING=$(supervisorctl status | grep RUNNING | wc -l | tr -d ' ') echo "$RUNNING processes running..." if [ x$COUNT = x$RUNNING ]; then echo "Running setup.js with mongo..." From f9cf92fe713c512d715e761517533aaacdecbd2b Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 30 Jul 2014 14:11:33 -0300 Subject: [PATCH 015/305] txn: stabilize the tarjan sorting on the output. That's both faster and more obviously correct. --- txn/tarjan.go | 20 +++++++++----------- txn/tarjan_test.go | 8 ++++---- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/txn/tarjan.go b/txn/tarjan.go index 25cce5f5f..e56541c9b 100644 --- a/txn/tarjan.go +++ b/txn/tarjan.go @@ -13,18 +13,19 @@ func tarjanSort(successors map[bson.ObjectId][]bson.ObjectId) [][]bson.ObjectId index: make(map[bson.ObjectId]int, len(successors)), } - // Sort all nodes to stabilize the logic. - var all []string for id := range successors { - all = append(all, string(id)) - } - sort.Strings(all) - for _, strid := range all { - id := bson.ObjectId(strid) + id := bson.ObjectId(string(id)) if _, seen := data.index[id]; !seen { data.strongConnect(id) } } + + // Sort connected components to stabilize the algorithm. + for _, ids := range data.output { + if len(ids) > 1 { + sort.Sort(idList(ids)) + } + } return data.output } @@ -55,10 +56,7 @@ func (data *tarjanData) strongConnect(id bson.ObjectId) *tarjanNode { data.nodes = append(data.nodes, tarjanNode{index, true}) node := &data.nodes[index] - // Sort to stabilize the algorithm. - succids := idList(data.successors[id]) - sort.Sort(succids) - for _, succid := range succids { + for _, succid := range data.successors[id] { succindex, seen := data.index[succid] if !seen { succnode := data.strongConnect(succid) diff --git a/txn/tarjan_test.go b/txn/tarjan_test.go index 9586ce067..79745c39b 100644 --- a/txn/tarjan_test.go +++ b/txn/tarjan_test.go @@ -23,7 +23,7 @@ func bids(ns ...int) (ids []bson.ObjectId) { func (TarjanSuite) TestExample(c *C) { successors := map[bson.ObjectId][]bson.ObjectId{ - bid(1): bids(2), + bid(1): bids(2, 3), bid(2): bids(1, 5), bid(3): bids(4), bid(4): bids(3, 5), @@ -36,9 +36,9 @@ func (TarjanSuite) TestExample(c *C) { c.Assert(tarjanSort(successors), DeepEquals, [][]bson.ObjectId{ bids(9), - bids(8, 7, 6), + bids(6, 7, 8), bids(5), - bids(2, 1), - bids(4, 3), + bids(3, 4), + bids(1, 2), }) } From 4a4746db34c5c24602ca8e5ebb5307df51413a80 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 30 Jul 2014 14:32:22 -0300 Subject: [PATCH 016/305] txn: simplify txn-queue pulling logic --- txn/flusher.go | 64 +++++++++++++++++--------------------------------- 1 file changed, 22 insertions(+), 42 deletions(-) diff --git a/txn/flusher.go b/txn/flusher.go index d5e9f4528..586634201 100644 --- a/txn/flusher.go +++ b/txn/flusher.go @@ -352,19 +352,7 @@ NextDoc: f.debugf("Prepared queue with %s [has prereqs & not forced].", tt) return nil, errPreReqs } - for _, op := range t.Ops { - dkey := op.docKey() - revnos = append(revnos, revno[dkey]) - drevno := revno[dkey] - switch { - case op.Insert != nil && drevno < 0: - revno[dkey] = -drevno + 1 - case op.Update != nil && drevno >= 0: - revno[dkey] = drevno + 1 - case op.Remove && drevno >= 0: - revno[dkey] = -drevno - 1 - } - } + revnos = assembledRevnos(t.Ops, revno) if !prereqs { f.debugf("Prepared queue with %s [no prereqs]. Revnos: %v", tt, revnos) } else { @@ -483,13 +471,7 @@ func (f *flusher) rescan(t *transaction, force bool) (revnos []int64, err error) f.debugf("Rescanned queue with %s: has prereqs, not forced", tt) return nil, errPreReqs } - for _, op := range t.Ops { - dkey := op.docKey() - revnos = append(revnos, revno[dkey]) - if op.isChange() { - revno[dkey] += 1 - } - } + revnos = assembledRevnos(t.Ops, revno) if !prereqs { f.debugf("Rescanned queue with %s: no prereqs, revnos: %v", tt, revnos) } else { @@ -498,6 +480,24 @@ func (f *flusher) rescan(t *transaction, force bool) (revnos []int64, err error) return revnos, nil } +func assembledRevnos(ops []Op, revno map[docKey]int64) []int64 { + revnos := make([]int64, len(ops)) + for i, op := range ops { + dkey := op.docKey() + revnos[i] = revno[dkey] + drevno := revno[dkey] + switch { + case op.Insert != nil && drevno < 0: + revno[dkey] = -drevno + 1 + case op.Update != nil && drevno >= 0: + revno[dkey] = drevno + 1 + case op.Remove && drevno >= 0: + revno[dkey] = -drevno - 1 + } + } + return revnos +} + func (f *flusher) hasPreReqs(tt token, dkeys docKeys) (prereqs, found bool) { found = true NextDoc: @@ -688,18 +688,6 @@ func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) err pull = map[bson.ObjectId]*transaction{t.Id: t} } - // Compute the operation in which t's id may be pulled - // out of txn-queue. That's on its last change, or the - // first assertion. - pullOp := make(map[docKey]int) - for i := range t.Ops { - op := &t.Ops[i] - dkey := op.docKey() - if _, ok := pullOp[dkey]; !ok || op.isChange() { - pullOp[dkey] = i - } - } - logRevnos := append([]int64(nil), t.Revnos...) logDoc := bson.D{{"_id", t.Id}} @@ -732,12 +720,7 @@ func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) err qdoc[1].Value = bson.D{{"$exists", false}} } - dontPull := tt - isPullOp := pullOp[dkey] == i - if isPullOp { - dontPull = "" - } - pullAll := tokensToPull(dqueue, pull, dontPull) + pullAll := tokensToPull(dqueue, pull, tt) var d bson.D var outcome string @@ -851,13 +834,10 @@ func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) err f.debugf("Stash for document %v removed", dkey) } } - if pullOp[dkey] == i && len(pullAll) > 0 { - _ = f.sc.UpdateId(dkey, bson.D{{"$pullAll", bson.D{{"txn-queue", pullAll}}}}) - } } } case op.Assert != nil: - // TODO pullAll if pullOp[dkey] == i + // Pure assertion. No changes to apply. } if err == nil { outcome = "DONE" From 3bc3ddaa8017e178a2caa7d2d8e4b49fd17b27aa Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 30 Jul 2014 14:12:22 -0300 Subject: [PATCH 017/305] txn: drop bogus assumption in txn-queue popping. --- txn/flusher.go | 15 +++-------- txn/txn_test.go | 72 ++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 75 insertions(+), 12 deletions(-) diff --git a/txn/flusher.go b/txn/flusher.go index 586634201..5318ad0c9 100644 --- a/txn/flusher.go +++ b/txn/flusher.go @@ -897,17 +897,10 @@ func tokensToPull(dqueue []token, pull map[bson.ObjectId]*transaction, dontPull var result []token for j := len(dqueue) - 1; j >= 0; j-- { dtt := dqueue[j] - if dt, ok := pull[dtt.id()]; ok { - if dt.Nonce == dtt.nonce() { - // It's valid and is being pulled out, so everything - // preceding it must have been handled already. - if dtt == dontPull { - // Not time to pull this one out yet. - j-- - } - result = append(result, dqueue[:j+1]...) - break - } + if dtt == dontPull { + continue + } + if _, ok := pull[dtt.id()]; ok { // It was handled before and this is a leftover invalid // nonce in the queue. Cherry-pick it out. result = append(result, dtt) diff --git a/txn/txn_test.go b/txn/txn_test.go index a3aa07679..6ddfa2179 100644 --- a/txn/txn_test.go +++ b/txn/txn_test.go @@ -1,11 +1,14 @@ package txn_test import ( + "sync" + "testing" + "time" + "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" . "gopkg.in/check.v1" - "testing" ) func TestAll(t *testing.T) { @@ -38,6 +41,11 @@ func (s *S) SetUpTest(c *C) { s.runner = txn.NewRunner(s.tc) } +func (s *S) TearDownTest(c *C) { + txn.SetLogger(nil) + txn.SetDebug(false) +} + type Account struct { Id int `bson:"_id"` Balance int @@ -519,3 +527,65 @@ func (s *S) TestPurgeMissing(c *C) { } } } + +func (s *S) TestTxnQueueStressTest(c *C) { + txn.SetChaos(txn.Chaos{ + SlowdownChance: 0.3, + Slowdown: 50 * time.Millisecond, + }) + defer txn.SetChaos(txn.Chaos{}) + + // So we can run more iterations of the test in less time. + txn.SetDebug(false) + + err := s.accounts.Insert(M{"_id": 0, "balance": 0}, M{"_id": 1, "balance": 0}) + c.Assert(err, IsNil) + + // Run half of the operations changing account 0 and then 1, + // and the other half in the opposite order. + ops01 := []txn.Op{{ + C: "accounts", + Id: 0, + Update: M{"$inc": M{"balance": 1}}, + }, { + C: "accounts", + Id: 1, + Update: M{"$inc": M{"balance": 1}}, + }} + + ops10 := []txn.Op{{ + C: "accounts", + Id: 1, + Update: M{"$inc": M{"balance": 1}}, + }, { + C: "accounts", + Id: 0, + Update: M{"$inc": M{"balance": 1}}, + }} + + ops := [][]txn.Op{ops01, ops10} + + const runners = 4 + const changes = 20 + + var wg sync.WaitGroup + wg.Add(runners) + for n := 0; n < runners; n++ { + go func() { + defer wg.Done() + for i := 0; i < changes; i++ { + err = s.runner.Run(ops[n%2], "", nil) + c.Assert(err, IsNil) + } + }() + } + wg.Wait() + + for id := 0; id < 2; id++ { + var account Account + err = s.accounts.FindId(id).One(&account) + if account.Balance != runners * changes { + c.Errorf("Account should have balance of %d, got %d", runners * changes, account.Balance) + } + } +} From 6140b4b7e0586b63f4c7b1ef1fd8e26d5138febd Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 30 Jul 2014 15:43:02 -0300 Subject: [PATCH 018/305] txn: fix incorrect assumption on PurgeMissing test --- txn/txn_test.go | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/txn/txn_test.go b/txn/txn_test.go index 6ddfa2179..487949d16 100644 --- a/txn/txn_test.go +++ b/txn/txn_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" + . "gopkg.in/check.v1" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" - . "gopkg.in/check.v1" ) func TestAll(t *testing.T) { @@ -485,26 +485,36 @@ func (s *S) TestPurgeMissing(c *C) { Insert: M{"balance": 100}, }} - err = s.runner.Run(ops1, "", nil) + first := bson.NewObjectId() + c.Logf("---- Running ops1 under transaction %q, to be canceled by chaos", first.Hex()) + err = s.runner.Run(ops1, first, nil) c.Assert(err, Equals, txn.ErrChaos) last := bson.NewObjectId() + c.Logf("---- Running ops2 under transaction %q, to be canceled by chaos", last.Hex()) err = s.runner.Run(ops2, last, nil) c.Assert(err, Equals, txn.ErrChaos) + + c.Logf("---- Removing transaction %q", last.Hex()) err = s.tc.RemoveId(last) c.Assert(err, IsNil) + c.Logf("---- Disabling chaos and attempting to resume all") txn.SetChaos(txn.Chaos{}) err = s.runner.ResumeAll() c.Assert(err, IsNil) + again := bson.NewObjectId() + c.Logf("---- Running ops2 again under transaction %q, to fail for missing transaction", again.Hex()) err = s.runner.Run(ops2, "", nil) c.Assert(err, ErrorMatches, "cannot find transaction .*") + c.Logf("---- Puring missing transactions") err = s.runner.PurgeMissing("accounts") c.Assert(err, IsNil) - err = s.runner.Run(ops2, "", nil) + c.Logf("---- Resuming pending transactions") + err = s.runner.ResumeAll() c.Assert(err, IsNil) expect := []struct{ Id, Balance int }{ @@ -584,8 +594,8 @@ func (s *S) TestTxnQueueStressTest(c *C) { for id := 0; id < 2; id++ { var account Account err = s.accounts.FindId(id).One(&account) - if account.Balance != runners * changes { - c.Errorf("Account should have balance of %d, got %d", runners * changes, account.Balance) + if account.Balance != runners*changes { + c.Errorf("Account should have balance of %d, got %d", runners*changes, account.Balance) } } } From dc255bb679efa273b6544a03261c4053505498a4 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 30 Jul 2014 17:00:37 -0300 Subject: [PATCH 019/305] txn: fix test so each goroutine gets a different n --- txn/txn_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/txn/txn_test.go b/txn/txn_test.go index 487949d16..119bf21d6 100644 --- a/txn/txn_test.go +++ b/txn/txn_test.go @@ -576,11 +576,14 @@ func (s *S) TestTxnQueueStressTest(c *C) { ops := [][]txn.Op{ops01, ops10} const runners = 4 - const changes = 20 + const changes = 1000 + + txn.SetDebug(true) var wg sync.WaitGroup wg.Add(runners) for n := 0; n < runners; n++ { + n := n go func() { defer wg.Done() for i := 0; i < changes; i++ { From 55be583e47575b34c5fec28b7963c8d5d5c5ecde Mon Sep 17 00:00:00 2001 From: abhishekk Date: Tue, 5 Aug 2014 13:00:08 -0700 Subject: [PATCH 020/305] support MaxScan in queries --- session.go | 15 +++++++++++++++ socket.go | 1 + 2 files changed, 16 insertions(+) diff --git a/session.go b/session.go index dffeca798..c2c8d4d08 100644 --- a/session.go +++ b/session.go @@ -2264,6 +2264,21 @@ func (q *Query) Hint(indexKey ...string) *Query { return q } +// SetMaxScan constrains the query to only scan the specified number of +// documents when fulfilling the query. +// +// Relevant documentation: +// +// http://docs.mongodb.org/manual/reference/operator/meta/maxScan +// +func (q *Query) SetMaxScan(n int) *Query { + q.m.Lock() + q.op.options.MaxScan = n + q.op.hasOptions = true + q.m.Unlock() + return q +} + // Snapshot will force the performed query to make use of an available // index on the _id field to prevent the same document from being returned // more than once in a single iteration. This might happen without this diff --git a/socket.go b/socket.go index 227749fdd..1fb0dff77 100644 --- a/socket.go +++ b/socket.go @@ -86,6 +86,7 @@ type queryWrapper struct { Explain bool "$explain,omitempty" Snapshot bool "$snapshot,omitempty" ReadPreference bson.D "$readPreference,omitempty" + MaxScan int "$maxScan,omitempty" } func (op *queryOp) finalQuery(socket *mongoSocket) interface{} { From 6af013a101423239132715d5cccf1c8d41b6632c Mon Sep 17 00:00:00 2001 From: abhishekk Date: Tue, 5 Aug 2014 14:57:57 -0700 Subject: [PATCH 021/305] add a test for max scan --- session_test.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/session_test.go b/session_test.go index 788035bd5..753c47329 100644 --- a/session_test.go +++ b/session_test.go @@ -1042,6 +1042,25 @@ func (s *S) TestQueryExplain(c *C) { c.Assert(n, Equals, 2) } +func (s *S) TestMaxScanned(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + coll := session.DB("mydb").C("mycoll") + + ns := []int{40, 41, 42} + for _, n := range ns { + err := coll.Insert(M{"n": n}) + c.Assert(err, IsNil) + } + + query := coll.Find(nil).SetMaxScan(2) + var result []M + err = query.All(&result) + c.Assert(err, IsNil) + c.Assert(result, HasLen, 2) +} + func (s *S) TestQueryHint(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) From aec61dd37837f6ccac9e1eb14fa0b9ddc30b2fe0 Mon Sep 17 00:00:00 2001 From: Roger Peppe Date: Tue, 12 Aug 2014 13:17:12 +0100 Subject: [PATCH 022/305] mgo: optimize seeking to end of GridFS file --- bulk.go | 6 +++--- cluster.go | 2 +- cluster_test.go | 2 +- gridfs.go | 8 ++++++++ gridfs_test.go | 9 ++++++++- log.go | 6 +++--- raceoff.go | 1 - server.go | 2 +- 8 files changed, 25 insertions(+), 11 deletions(-) diff --git a/bulk.go b/bulk.go index 5a9d37b70..23f450853 100644 --- a/bulk.go +++ b/bulk.go @@ -10,8 +10,8 @@ package mgo // http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api // type Bulk struct { - c *Collection - ordered bool + c *Collection + ordered bool inserts []interface{} } @@ -44,7 +44,7 @@ func (c *Collection) Bulk() *Bulk { } // Unordered puts the bulk operation in unordered mode. -// +// // In unordered mode the indvidual operations may be sent // out of order, which means latter operations may proceed // even if prior ones have failed. diff --git a/cluster.go b/cluster.go index 3891bbfee..10db6372d 100644 --- a/cluster.go +++ b/cluster.go @@ -396,7 +396,7 @@ func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoSer func resolveAddr(addr string) (*net.TCPAddr, error) { // This hack allows having a timeout on resolution. - conn, err := net.DialTimeout("udp", addr, 10 * time.Second) + conn, err := net.DialTimeout("udp", addr, 10*time.Second) if err != nil { log("SYNC Failed to resolve server address: ", addr) return nil, errors.New("failed to resolve server address: " + addr) diff --git a/cluster_test.go b/cluster_test.go index 7874cab51..a29bd6f16 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -1184,7 +1184,7 @@ func (s *S) TestPoolLimitSimple(c *C) { // Put the one socket back in the pool, freeing it for the copy. session.Refresh() delay := <-done - c.Assert(delay > 300 * time.Millisecond, Equals, true, Commentf("Delay: %s", delay)) + c.Assert(delay > 300*time.Millisecond, Equals, true, Commentf("Delay: %s", delay)) } } diff --git a/gridfs.go b/gridfs.go index ce1b7f9f8..647305ad6 100644 --- a/gridfs.go +++ b/gridfs.go @@ -651,6 +651,14 @@ func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) { if offset > file.doc.Length { return file.offset, errors.New("seek past end of file") } + if offset == file.doc.Length { + // If we're seeking to the end of the file, + // no need to read anything. This enables + // a client to find the size of the file using only the + // io.ReadSeeker interface with low overhead. + file.offset = offset + return file.offset, nil + } chunk := int(offset / int64(file.doc.ChunkSize)) if chunk+1 == file.chunk && offset >= file.offset { file.rbuf = file.rbuf[int(offset-file.offset):] diff --git a/gridfs_test.go b/gridfs_test.go index 252d8f968..4cd5b7d02 100644 --- a/gridfs_test.go +++ b/gridfs_test.go @@ -31,9 +31,9 @@ import ( "os" "time" + . "gopkg.in/check.v1" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" - . "gopkg.in/check.v1" ) func (s *S) TestGridFSCreate(c *C) { @@ -485,6 +485,13 @@ func (s *S) TestGridFSSeek(c *C) { c.Assert(err, IsNil) c.Assert(b, DeepEquals, []byte("nopqr")) + o, err = file.Seek(0, os.SEEK_END) + c.Assert(err, IsNil) + c.Assert(o, Equals, int64(22)) + n, err = file.Read(b) + c.Assert(err, Equals, io.EOF) + c.Assert(n, Equals, 0) + o, err = file.Seek(-10, os.SEEK_END) c.Assert(err, IsNil) c.Assert(o, Equals, int64(12)) diff --git a/log.go b/log.go index 9abbe2103..53eb4237b 100644 --- a/log.go +++ b/log.go @@ -42,9 +42,9 @@ type log_Logger interface { } var ( - globalLogger log_Logger - globalDebug bool - globalMutex sync.Mutex + globalLogger log_Logger + globalDebug bool + globalMutex sync.Mutex ) // RACE WARNING: There are known data races when logging, which are manually diff --git a/raceoff.go b/raceoff.go index 7470dd68d..e60b14144 100644 --- a/raceoff.go +++ b/raceoff.go @@ -3,4 +3,3 @@ package mgo const raceDetector = false - diff --git a/server.go b/server.go index c86d22642..eb89dfd56 100644 --- a/server.go +++ b/server.go @@ -295,7 +295,7 @@ func (server *mongoServer) pinger(loop bool) { time.Sleep(delay) } op := op - socket, _, err := server.AcquireSocket(0, 3 * delay) + socket, _, err := server.AcquireSocket(0, 3*delay) if err == nil { start := time.Now() _, _ = socket.SimpleQuery(&op) From 0a99cd49208305a6c5d8e1fb987a5cde2230dfa6 Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Tue, 12 Aug 2014 11:15:19 -0400 Subject: [PATCH 023/305] Fix windows compile issues due to import order --- sasl/sasl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sasl/sasl.c b/sasl/sasl.c index 87c17c69a..cd8622260 100644 --- a/sasl/sasl.c +++ b/sasl/sasl.c @@ -1,7 +1,7 @@ -#include #include #include #include +#include static int mgo_sasl_simple(void *context, int id, const char **result, unsigned int *len) { From 8a27a67fc34d7a96f5037a616cf4ac56b9234268 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 14 Aug 2014 23:11:08 -0300 Subject: [PATCH 024/305] Tweak SetMaxScan documentation. --- session.go | 10 ++++------ session_test.go | 2 +- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/session.go b/session.go index c2c8d4d08..2d3d895b9 100644 --- a/session.go +++ b/session.go @@ -2264,13 +2264,11 @@ func (q *Query) Hint(indexKey ...string) *Query { return q } -// SetMaxScan constrains the query to only scan the specified number of -// documents when fulfilling the query. -// -// Relevant documentation: -// -// http://docs.mongodb.org/manual/reference/operator/meta/maxScan +// SetMaxScan constrains the query to stop after scanning the specified +// number of documents. // +// This modifier is generally used to prevent potentially long running +// queries from disrupting performance by scanning through too much data. func (q *Query) SetMaxScan(n int) *Query { q.m.Lock() q.op.options.MaxScan = n diff --git a/session_test.go b/session_test.go index 753c47329..c61bcb92f 100644 --- a/session_test.go +++ b/session_test.go @@ -1042,7 +1042,7 @@ func (s *S) TestQueryExplain(c *C) { c.Assert(n, Equals, 2) } -func (s *S) TestMaxScanned(c *C) { +func (s *S) TestQueryMaxScan(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() From c9fd3712fbf3e92924c974dce16da2d322508fe2 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 14 Aug 2014 23:31:54 -0300 Subject: [PATCH 025/305] Added trivial README.md pointing to labix.org. --- README.md | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 000000000..f4e452c04 --- /dev/null +++ b/README.md @@ -0,0 +1,4 @@ +The MongoDB driver for Go +------------------------- + +Please go to [http://labix.org/mgo](http://labix.org/mgo) for all project details. From 5b7de8850840cb3a032f67f929315ca1d7acbbb2 Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Mon, 18 Aug 2014 16:14:24 -0400 Subject: [PATCH 026/305] MGO-35 add support for ServiceHostname for GSSAPI, test for ServiceName --- auth.go | 8 +++++- auth_test.go | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++ session.go | 21 ++++++++++++---- 3 files changed, 93 insertions(+), 6 deletions(-) diff --git a/auth.go b/auth.go index 84c44afe3..4ba5bb064 100644 --- a/auth.go +++ b/auth.go @@ -246,7 +246,13 @@ func (socket *mongoSocket) loginPlain(cred Credential) error { } func (socket *mongoSocket) loginSASL(cred Credential) error { - sasl, err := saslNew(cred, socket.Server().Addr) + var sasl saslStepper; + var err error; + if len(cred.ServiceHostname) > 0 { + sasl, err = saslNew(cred, cred.ServiceHostname) + } else { + sasl, err = saslNew(cred, socket.Server().Addr) + } if err != nil { return err } diff --git a/auth_test.go b/auth_test.go index 07b41f073..867a1af05 100644 --- a/auth_test.go +++ b/auth_test.go @@ -936,3 +936,73 @@ func (s *S) TestAuthKerberosURL(c *C) { c.Assert(err, IsNil) c.Assert(len(names) > 0, Equals, true) } + +func (s *S) TestAuthKerberosServiceName(c *C) { + if !*kerberosFlag { + c.Skip("no -kerberos") + } + + wrongServiceName := "wrong" + rightServiceName := "mongodb" + + cred := &mgo.Credential{ + Username: kerberosUser, + Mechanism: "GSSAPI", + Service: wrongServiceName, + } + + c.Logf("Connecting to %s...", kerberosHost) + session, err := mgo.Dial(kerberosHost) + c.Assert(err, IsNil) + defer session.Close() + + c.Logf("Authenticating with incorrect service name...") + err = session.Login(cred) + c.Assert(err, ErrorMatches, + ".*Server wrong/mmscustmongo.10gen.me@10GEN.ME not found.*") + + cred.Service = rightServiceName + c.Logf("Authenticating with correct service name...") + err = session.Login(cred) + c.Assert(err, IsNil) + c.Logf("Authenticated!") + + names, err := session.DatabaseNames() + c.Assert(err, IsNil) + c.Assert(len(names) > 0, Equals, true) +} + +func (s *S) TestAuthKerberosServiceHostname(c *C) { + if !*kerberosFlag { + c.Skip("no -kerberos") + } + + wrongServiceHostname := "eggs.bacon.tk" + rightServiceHostname := "mmscustmongo.10gen.me" + + cred := &mgo.Credential{ + Username: kerberosUser, + Mechanism: "GSSAPI", + ServiceHostname: wrongServiceHostname, + } + + c.Logf("Connecting to %s...", kerberosHost) + session, err := mgo.Dial(kerberosHost) + c.Assert(err, IsNil) + defer session.Close() + + c.Logf("Authenticating with incorrect service hostname...") + err = session.Login(cred) + c.Assert(err, ErrorMatches, + ".*Server krbtgt/BACON.TK@10GEN.ME not found.*") + + cred.ServiceHostname = rightServiceHostname + c.Logf("Authenticating with correct service hostname...") + err = session.Login(cred) + c.Assert(err, IsNil) + c.Logf("Authenticated!") + + names, err := session.DatabaseNames() + c.Assert(err, IsNil) + c.Assert(len(names) > 0, Equals, true) +} diff --git a/session.go b/session.go index dffeca798..090eeeaa2 100644 --- a/session.go +++ b/session.go @@ -303,6 +303,11 @@ type DialInfo struct { // mechanism. Defaults to "mongodb". Service string + // ServiceHostname defines which hostname to use when authenticating + // with the GSSAPI mechanism. If not specified, defaults to the MongoDB + // server's address. + ServiceHostname string + // Mechanism defines the protocol for credential negotiation. // Defaults to "MONGODB-CR". Mechanism string @@ -371,11 +376,12 @@ func DialWithInfo(info *DialInfo) (*Session, error) { source = "$external" } session.dialCred = &Credential{ - Username: info.Username, - Password: info.Password, - Mechanism: info.Mechanism, - Service: info.Service, - Source: source, + Username: info.Username, + Password: info.Password, + Mechanism: info.Mechanism, + Service: info.Service, + ServiceHostname: info.ServiceHostname, + Source: source, } session.creds = []Credential{*session.dialCred} } @@ -596,6 +602,11 @@ type Credential struct { // mechanism. Defaults to "mongodb". Service string + // ServiceHostname defines which hostname to use when authenticating + // with the GSSAPI mechanism. If not specified, defaults to the MongoDB + // server's address. + ServiceHostname string + // Mechanism defines the protocol for credential negotiation. // Defaults to "MONGODB-CR". Mechanism string From a3e64e6284b22f33d6ddcaa2c5ecd1c986402601 Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Mon, 18 Aug 2014 18:01:06 -0400 Subject: [PATCH 027/305] MGO-35 address Gustavo's comments --- auth.go | 6 ++-- auth_test.go | 100 +++++++++++++++++++++++++-------------------------- session.go | 20 +++++------ 3 files changed, 62 insertions(+), 64 deletions(-) diff --git a/auth.go b/auth.go index 4ba5bb064..35cc6ee92 100644 --- a/auth.go +++ b/auth.go @@ -246,9 +246,9 @@ func (socket *mongoSocket) loginPlain(cred Credential) error { } func (socket *mongoSocket) loginSASL(cred Credential) error { - var sasl saslStepper; - var err error; - if len(cred.ServiceHostname) > 0 { + var sasl saslStepper + var err error + if len(cred.ServiceHost) > 0 { sasl, err = saslNew(cred, cred.ServiceHostname) } else { sasl, err = saslNew(cred, socket.Server().Addr) diff --git a/auth_test.go b/auth_test.go index 867a1af05..2d963671f 100644 --- a/auth_test.go +++ b/auth_test.go @@ -939,70 +939,68 @@ func (s *S) TestAuthKerberosURL(c *C) { func (s *S) TestAuthKerberosServiceName(c *C) { if !*kerberosFlag { - c.Skip("no -kerberos") - } + c.Skip("no -kerberos") + } wrongServiceName := "wrong" rightServiceName := "mongodb" - cred := &mgo.Credential{ - Username: kerberosUser, - Mechanism: "GSSAPI", - Service: wrongServiceName, - } + cred := &mgo.Credential{ + Username: kerberosUser, + Mechanism: "GSSAPI", + Service: wrongServiceName, + } c.Logf("Connecting to %s...", kerberosHost) - session, err := mgo.Dial(kerberosHost) - c.Assert(err, IsNil) - defer session.Close() + session, err := mgo.Dial(kerberosHost) + c.Assert(err, IsNil) + defer session.Close() c.Logf("Authenticating with incorrect service name...") - err = session.Login(cred) - c.Assert(err, ErrorMatches, - ".*Server wrong/mmscustmongo.10gen.me@10GEN.ME not found.*") + err = session.Login(cred) + c.Assert(err, ErrorMatches, ".*Server wrong/mmscustmongo.10gen.me@10GEN.ME not found.*") cred.Service = rightServiceName c.Logf("Authenticating with correct service name...") - err = session.Login(cred) - c.Assert(err, IsNil) - c.Logf("Authenticated!") + err = session.Login(cred) + c.Assert(err, IsNil) + c.Logf("Authenticated!") - names, err := session.DatabaseNames() - c.Assert(err, IsNil) - c.Assert(len(names) > 0, Equals, true) + names, err := session.DatabaseNames() + c.Assert(err, IsNil) + c.Assert(len(names) > 0, Equals, true) } -func (s *S) TestAuthKerberosServiceHostname(c *C) { +func (s *S) TestAuthKerberosServiceHost(c *C) { if !*kerberosFlag { - c.Skip("no -kerberos") - } - - wrongServiceHostname := "eggs.bacon.tk" - rightServiceHostname := "mmscustmongo.10gen.me" - - cred := &mgo.Credential{ - Username: kerberosUser, - Mechanism: "GSSAPI", - ServiceHostname: wrongServiceHostname, - } - - c.Logf("Connecting to %s...", kerberosHost) - session, err := mgo.Dial(kerberosHost) - c.Assert(err, IsNil) - defer session.Close() - - c.Logf("Authenticating with incorrect service hostname...") - err = session.Login(cred) - c.Assert(err, ErrorMatches, - ".*Server krbtgt/BACON.TK@10GEN.ME not found.*") - - cred.ServiceHostname = rightServiceHostname - c.Logf("Authenticating with correct service hostname...") - err = session.Login(cred) - c.Assert(err, IsNil) - c.Logf("Authenticated!") - - names, err := session.DatabaseNames() - c.Assert(err, IsNil) - c.Assert(len(names) > 0, Equals, true) + c.Skip("no -kerberos") + } + + wrongServiceHost := "eggs.bacon.tk" + rightServiceHost := "mmscustmongo.10gen.me" + + cred := &mgo.Credential{ + Username: kerberosUser, + Mechanism: "GSSAPI", + ServiceHost: wrongServiceHostname, + } + + c.Logf("Connecting to %s...", kerberosHost) + session, err := mgo.Dial(kerberosHost) + c.Assert(err, IsNil) + defer session.Close() + + c.Logf("Authenticating with incorrect service host...") + err = session.Login(cred) + c.Assert(err, ErrorMatches, ".*Server krbtgt/BACON.TK@10GEN.ME not found.*") + + cred.ServiceHost = rightServiceHostname + c.Logf("Authenticating with correct service host...") + err = session.Login(cred) + c.Assert(err, IsNil) + c.Logf("Authenticated!") + + names, err := session.DatabaseNames() + c.Assert(err, IsNil) + c.Assert(len(names) > 0, Equals, true) } diff --git a/session.go b/session.go index 090eeeaa2..5565485de 100644 --- a/session.go +++ b/session.go @@ -303,10 +303,10 @@ type DialInfo struct { // mechanism. Defaults to "mongodb". Service string - // ServiceHostname defines which hostname to use when authenticating + // ServiceHost defines which hostname to use when authenticating // with the GSSAPI mechanism. If not specified, defaults to the MongoDB // server's address. - ServiceHostname string + ServiceHost string // Mechanism defines the protocol for credential negotiation. // Defaults to "MONGODB-CR". @@ -376,12 +376,12 @@ func DialWithInfo(info *DialInfo) (*Session, error) { source = "$external" } session.dialCred = &Credential{ - Username: info.Username, - Password: info.Password, - Mechanism: info.Mechanism, - Service: info.Service, - ServiceHostname: info.ServiceHostname, - Source: source, + Username: info.Username, + Password: info.Password, + Mechanism: info.Mechanism, + Service: info.Service, + ServiceHost: info.ServiceHost, + Source: source, } session.creds = []Credential{*session.dialCred} } @@ -602,10 +602,10 @@ type Credential struct { // mechanism. Defaults to "mongodb". Service string - // ServiceHostname defines which hostname to use when authenticating + // ServiceHost defines which hostname to use when authenticating // with the GSSAPI mechanism. If not specified, defaults to the MongoDB // server's address. - ServiceHostname string + ServiceHost string // Mechanism defines the protocol for credential negotiation. // Defaults to "MONGODB-CR". From 71d1c4d612ace29d50502ec3aaf05d3d41350911 Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Mon, 18 Aug 2014 18:10:07 -0400 Subject: [PATCH 028/305] MGO-35 fix a couple places where I missed Hostname => Host conversion --- auth.go | 2 +- auth_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/auth.go b/auth.go index 35cc6ee92..72bb10175 100644 --- a/auth.go +++ b/auth.go @@ -249,7 +249,7 @@ func (socket *mongoSocket) loginSASL(cred Credential) error { var sasl saslStepper var err error if len(cred.ServiceHost) > 0 { - sasl, err = saslNew(cred, cred.ServiceHostname) + sasl, err = saslNew(cred, cred.ServiceHost) } else { sasl, err = saslNew(cred, socket.Server().Addr) } diff --git a/auth_test.go b/auth_test.go index 2d963671f..33ff4633a 100644 --- a/auth_test.go +++ b/auth_test.go @@ -982,7 +982,7 @@ func (s *S) TestAuthKerberosServiceHost(c *C) { cred := &mgo.Credential{ Username: kerberosUser, Mechanism: "GSSAPI", - ServiceHost: wrongServiceHostname, + ServiceHost: wrongServiceHost, } c.Logf("Connecting to %s...", kerberosHost) @@ -994,7 +994,7 @@ func (s *S) TestAuthKerberosServiceHost(c *C) { err = session.Login(cred) c.Assert(err, ErrorMatches, ".*Server krbtgt/BACON.TK@10GEN.ME not found.*") - cred.ServiceHost = rightServiceHostname + cred.ServiceHost = rightServiceHost c.Logf("Authenticating with correct service host...") err = session.Login(cred) c.Assert(err, IsNil) From 220b5da603537806741b9b6c6fdfa1487e38cde6 Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Mon, 18 Aug 2014 18:25:44 -0400 Subject: [PATCH 029/305] MGO-35 fix one more spacing issue --- session.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/session.go b/session.go index 5565485de..97faf774f 100644 --- a/session.go +++ b/session.go @@ -304,7 +304,7 @@ type DialInfo struct { Service string // ServiceHost defines which hostname to use when authenticating - // with the GSSAPI mechanism. If not specified, defaults to the MongoDB + // with the GSSAPI mechanism. If not specified, defaults to the MongoDB // server's address. ServiceHost string @@ -603,8 +603,8 @@ type Credential struct { Service string // ServiceHost defines which hostname to use when authenticating - // with the GSSAPI mechanism. If not specified, defaults to the MongoDB - // server's address. + // with the GSSAPI mechanism. If not specified, defaults to the MongoDB + // server's address. ServiceHost string // Mechanism defines the protocol for credential negotiation. From 2171024d0df98adbdbf0e11d7b955c60556f36bb Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Mon, 25 Aug 2014 10:31:54 -0400 Subject: [PATCH 030/305] Make it possible to run tests on windows (with a little cygwin help) --- suite_test.go | 42 ++++++++++------------- syscall_test.go | 27 +++++++++++++++ syscall_win_test.go | 11 ++++++ testdb/setup.sh | 10 +++++- testdb/supervisord-cygwin.conf | 61 ++++++++++++++++++++++++++++++++++ 5 files changed, 125 insertions(+), 26 deletions(-) create mode 100644 syscall_test.go create mode 100644 syscall_win_test.go create mode 100644 testdb/supervisord-cygwin.conf diff --git a/suite_test.go b/suite_test.go index 3f6b8f7f1..c92a737e5 100644 --- a/suite_test.go +++ b/suite_test.go @@ -32,8 +32,8 @@ import ( "fmt" "net" "os/exec" + "runtime" "strconv" - "syscall" "testing" "time" @@ -105,9 +105,11 @@ func (s *S) TearDownTest(c *C) { if s.stopped { s.StartAll() } - for _, host := range s.frozen { - if host != "" { - s.Thaw(host) + if runtime.GOOS != "windows" { + for _, host := range s.frozen { + if host != "" { + s.Thaw(host) + } } } var stats mgo.Stats @@ -137,6 +139,9 @@ func (s *S) TearDownTest(c *C) { func (s *S) Stop(host string) { // Give a moment for slaves to sync and avoid getting rollback issues. + if runtime.GOOS == "windows" { + panic("Stop() currently unsupported on windows!") + } time.Sleep(2 * time.Second) err := run("cd _testdb && supervisorctl stop " + supvName(host)) if err != nil { @@ -158,26 +163,6 @@ func (s *S) pid(host string) int { return pid } -func (s *S) Freeze(host string) { - err := syscall.Kill(s.pid(host), syscall.SIGSTOP) - if err != nil { - panic(err) - } - s.frozen = append(s.frozen, host) -} - -func (s *S) Thaw(host string) { - err := syscall.Kill(s.pid(host), syscall.SIGCONT) - if err != nil { - panic(err) - } - for i, frozen := range s.frozen { - if frozen == host { - s.frozen[i] = "" - } - } -} - func (s *S) StartAll() { // Restart any stopped nodes. run("cd _testdb && supervisorctl start all") @@ -189,7 +174,14 @@ func (s *S) StartAll() { } func run(command string) error { - output, err := exec.Command("/bin/sh", "-c", command).CombinedOutput() + var output []byte + var err error + if runtime.GOOS == "windows" { + output, err = exec.Command("cmd", "/C", command).CombinedOutput() + } else { + output, err = exec.Command("/bin/sh", "-c", command).CombinedOutput() + } + if err != nil { msg := fmt.Sprintf("Failed to execute: %s: %s\n%s", command, err.Error(), string(output)) return errors.New(msg) diff --git a/syscall_test.go b/syscall_test.go new file mode 100644 index 000000000..229f7bb44 --- /dev/null +++ b/syscall_test.go @@ -0,0 +1,27 @@ +// +build linux darwin + +package mgo_test + +import ( + "syscall" +) + +func (s *S) Freeze(host string) { + err := syscall.Kill(s.pid(host), syscall.SIGSTOP) + if err != nil { + panic(err) + } + s.frozen = append(s.frozen, host) +} + +func (s *S) Thaw(host string) { + err := syscall.Kill(s.pid(host), syscall.SIGCONT) + if err != nil { + panic(err) + } + for i, frozen := range s.frozen { + if frozen == host { + s.frozen[i] = "" + } + } +} diff --git a/syscall_win_test.go b/syscall_win_test.go new file mode 100644 index 000000000..26565d939 --- /dev/null +++ b/syscall_win_test.go @@ -0,0 +1,11 @@ +// +build windows + +package mgo_test + +func (s *S) Freeze(host string) { + panic("Freeze() not (currently) supported on Windows!") +} + +func (s *S) Thaw(host string) { + panic("Thaw() not (currently) supported on Windows!") +} diff --git a/testdb/setup.sh b/testdb/setup.sh index 27200c090..e837a007f 100755 --- a/testdb/setup.sh +++ b/testdb/setup.sh @@ -4,7 +4,15 @@ start() { mkdir _testdb cd _testdb mkdir db1 db2 rs1a rs1b rs1c rs2a rs2b rs2c rs3a rs3b rs3c rs4a cfg1 cfg2 cfg3 - ln -s ../testdb/supervisord.conf supervisord.conf + if [[ `uname` == 'CYGWIN_NT-6.1' ]]; then + # Supervisor will only run on windows through cygwin. However, to get + # it to start you need to pass the properly-escaped windows-style path + # to _testdb as an environment variable. For instance: + # env DIRECTORY="d:\\\\cygwin\\\\home\\\\Administrator\\\\mgo-gopath\\\\src\\\\gopkg.in\\\\mgo.v2\\\\_testdb\\\\" make startdb + ln -s ../testdb/supervisord-cygwin.conf supervisord.conf + else + ln -s ../testdb/supervisord.conf supervisord.conf + fi echo keyfile > keyfile chmod 600 keyfile echo "Running supervisord..." diff --git a/testdb/supervisord-cygwin.conf b/testdb/supervisord-cygwin.conf new file mode 100644 index 000000000..14be540cb --- /dev/null +++ b/testdb/supervisord-cygwin.conf @@ -0,0 +1,61 @@ +[supervisord] +logfile = %(here)s/supervisord.log +pidfile = %(here)s/supervisord.pid +#nodaemon = true + +[inet_http_server] +port = 127.0.0.1:9001 + +[supervisorctl] +serverurl = http://127.0.0.1:9001 + +[rpcinterface:supervisor] +supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface + +[program:db1] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(ENV_DIRECTORY)sdb1 --bind_ip=127.0.0.1 --port 40001 + +[program:db2] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(ENV_DIRECTORY)sdb2 --bind_ip=127.0.0.1 --port 40002 --auth + +[program:rs1a] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(ENV_DIRECTORY)srs1a --bind_ip=127.0.0.1 --port 40011 +[program:rs1b] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(ENV_DIRECTORY)srs1b --bind_ip=127.0.0.1 --port 40012 +[program:rs1c] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(ENV_DIRECTORY)srs1c --bind_ip=127.0.0.1 --port 40013 + +[program:rs2a] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(ENV_DIRECTORY)srs2a --bind_ip=127.0.0.1 --port 40021 +[program:rs2b] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(ENV_DIRECTORY)srs2b --bind_ip=127.0.0.1 --port 40022 +[program:rs2c] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(ENV_DIRECTORY)srs2c --bind_ip=127.0.0.1 --port 40023 + +[program:rs3a] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(ENV_DIRECTORY)srs3a --bind_ip=127.0.0.1 --port 40031 --auth --keyFile=%(ENV_DIRECTORY)skeyfile +[program:rs3b] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(ENV_DIRECTORY)srs3b --bind_ip=127.0.0.1 --port 40032 --auth --keyFile=%(ENV_DIRECTORY)skeyfile +[program:rs3c] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(ENV_DIRECTORY)srs3c --bind_ip=127.0.0.1 --port 40033 --auth --keyFile=%(ENV_DIRECTORY)skeyfile + +[program:rs4a] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs4 --dbpath %(ENV_DIRECTORY)srs4a --bind_ip=127.0.0.1 --port 40041 + +[program:cfg1] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(ENV_DIRECTORY)scfg1 --bind_ip=127.0.0.1 --port 40101 + +[program:cfg2] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(ENV_DIRECTORY)scfg2 --bind_ip=127.0.0.1 --port 40102 + +[program:cfg3] +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(ENV_DIRECTORY)scfg3 --bind_ip=127.0.0.1 --port 40103 --auth --keyFile=%(ENV_DIRECTORY)skeyfile + +[program:s1] +command = mongos --configdb 127.0.0.1:40101 --bind_ip=127.0.0.1 --port 40201 --chunkSize 1 + +[program:s2] +command = mongos --configdb 127.0.0.1:40102 --bind_ip=127.0.0.1 --port 40202 --chunkSize 1 + +[program:s3] +command = mongos --configdb 127.0.0.1:40103 --bind_ip=127.0.0.1 --port 40203 --chunkSize 1 --keyFile=%(ENV_DIRECTORY)skeyfile From 958e1206053fe8fab1e194b2e0da7d81eb77751e Mon Sep 17 00:00:00 2001 From: Kyle Erf Date: Wed, 3 Sep 2014 15:05:58 -0400 Subject: [PATCH 031/305] simple benchmarks --- bson/bson_test.go | 32 ++++++++++++++++++++++++++++++++ session_test.go | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+) diff --git a/bson/bson_test.go b/bson/bson_test.go index 3d9799850..143496356 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -1441,6 +1441,13 @@ type BenchT struct { A, B, C, D, E, F string } +type BenchRawT struct { + A string + B int + C bson.M + D []float64 +} + func BenchmarkUnmarhsalStruct(b *testing.B) { v := BenchT{A: "A", D: "D", E: "E"} data, err := bson.Marshal(&v) @@ -1470,3 +1477,28 @@ func BenchmarkUnmarhsalMap(b *testing.B) { panic(err) } } + +func BenchmarkUnmarshalRaw(b *testing.B) { + var err error + m := BenchRawT{ + A: "test_string", + B: 123, + C: bson.M{ + "subdoc_int": 12312, + "subdoc_doc": bson.M{"1": 1}, + }, + D: []float64{0.0, 1.3333, -99.9997, 3.1415}, + } + data, err := bson.Marshal(&m) + if err != nil { + panic(err) + } + raw := bson.Raw{} + b.ResetTimer() + for i := 0; i < b.N; i++ { + err = bson.Unmarshal(data, &raw) + } + if err != nil { + panic(err) + } +} diff --git a/session_test.go b/session_test.go index 788035bd5..16a6bbdbd 100644 --- a/session_test.go +++ b/session_test.go @@ -35,6 +35,7 @@ import ( "sort" "strconv" "strings" + "testing" "time" . "gopkg.in/check.v1" @@ -3254,3 +3255,45 @@ func (s *S) TestSetCursorTimeout(c *C) { c.Assert(result.N, Equals, 42) c.Assert(iter.Next(&result), Equals, false) } + +// -------------------------------------------------------------------------- +// Some benchmarks that require a running database. + +func BenchmarkFindIterRaw(b *testing.B) { + err := run("mongo --nodb testdb/dropall.js") + if err != nil { + panic(err) + } + session, err := mgo.Dial("localhost:40001") + if err != nil { + panic(err) + } + defer session.Close() + coll := session.DB("mydb").C("mycoll") + + // Insert 10,000 test documents + for i := 0; i < 10000; i++ { + doc := bson.M{ + "_id": i, + "f2": "a short string", + "f3": bson.M{"1": "one", "2": float64(2)}, + "f4": []string{"a", "b", "c", "d", "e", "f", "g"}, + } + err := coll.Insert(doc) + if err != nil { + panic(err) + } + } + raw := bson.Raw{} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + iter := coll.Find(nil).Iter() + for iter.Next(&raw) { + } + if err := iter.Err(); err != nil { + panic(err) + } + } + +} From cc4bdac6880058a56a68cb42a38d56ba09145e29 Mon Sep 17 00:00:00 2001 From: Kyle Erf Date: Wed, 3 Sep 2014 17:58:47 -0400 Subject: [PATCH 032/305] port benchmarks to gocheck --- bson/bson_test.go | 18 +++++++++--------- session_test.go | 7 +++---- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/bson/bson_test.go b/bson/bson_test.go index 143496356..0edcb099b 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -1448,14 +1448,14 @@ type BenchRawT struct { D []float64 } -func BenchmarkUnmarhsalStruct(b *testing.B) { +func (s *S) BenchmarkUnmarhsalStruct(c *C) { v := BenchT{A: "A", D: "D", E: "E"} data, err := bson.Marshal(&v) if err != nil { panic(err) } - b.ResetTimer() - for i := 0; i < b.N; i++ { + c.ResetTimer() + for i := 0; i < c.N; i++ { err = bson.Unmarshal(data, &v) } if err != nil { @@ -1463,14 +1463,14 @@ func BenchmarkUnmarhsalStruct(b *testing.B) { } } -func BenchmarkUnmarhsalMap(b *testing.B) { +func (s *S) BenchmarkUnmarhsalMap(c *C) { m := bson.M{"a": "a", "d": "d", "e": "e"} data, err := bson.Marshal(&m) if err != nil { panic(err) } - b.ResetTimer() - for i := 0; i < b.N; i++ { + c.ResetTimer() + for i := 0; i < c.N; i++ { err = bson.Unmarshal(data, &m) } if err != nil { @@ -1478,7 +1478,7 @@ func BenchmarkUnmarhsalMap(b *testing.B) { } } -func BenchmarkUnmarshalRaw(b *testing.B) { +func (s *S) BenchmarkUnmarshalRaw(c *C) { var err error m := BenchRawT{ A: "test_string", @@ -1494,8 +1494,8 @@ func BenchmarkUnmarshalRaw(b *testing.B) { panic(err) } raw := bson.Raw{} - b.ResetTimer() - for i := 0; i < b.N; i++ { + c.ResetTimer() + for i := 0; i < c.N; i++ { err = bson.Unmarshal(data, &raw) } if err != nil { diff --git a/session_test.go b/session_test.go index 16a6bbdbd..cfead13e7 100644 --- a/session_test.go +++ b/session_test.go @@ -35,7 +35,6 @@ import ( "sort" "strconv" "strings" - "testing" "time" . "gopkg.in/check.v1" @@ -3259,7 +3258,7 @@ func (s *S) TestSetCursorTimeout(c *C) { // -------------------------------------------------------------------------- // Some benchmarks that require a running database. -func BenchmarkFindIterRaw(b *testing.B) { +func (s *S) BenchmarkFindIterRaw(c *C) { err := run("mongo --nodb testdb/dropall.js") if err != nil { panic(err) @@ -3286,8 +3285,8 @@ func BenchmarkFindIterRaw(b *testing.B) { } raw := bson.Raw{} - b.ResetTimer() - for i := 0; i < b.N; i++ { + c.ResetTimer() + for i := 0; i < c.N; i++ { iter := coll.Find(nil).Iter() for iter.Next(&raw) { } From cba7874b9047ef27b4c9cda5b5da5ed2b2d842ff Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 3 Sep 2014 22:37:53 -0300 Subject: [PATCH 033/305] Tweak the Raw Iter benchmark to use c.N. --- session_test.go | 44 +++++++++++++++++--------------------------- 1 file changed, 17 insertions(+), 27 deletions(-) diff --git a/session_test.go b/session_test.go index 670056b21..c26d7b35c 100644 --- a/session_test.go +++ b/session_test.go @@ -3278,40 +3278,30 @@ func (s *S) TestSetCursorTimeout(c *C) { // Some benchmarks that require a running database. func (s *S) BenchmarkFindIterRaw(c *C) { - err := run("mongo --nodb testdb/dropall.js") - if err != nil { - panic(err) - } session, err := mgo.Dial("localhost:40001") - if err != nil { - panic(err) - } + c.Assert(err, IsNil) defer session.Close() + coll := session.DB("mydb").C("mycoll") + doc := bson.D{ + {"f2", "a short string"}, + {"f3", bson.D{{"1", "one"}, {"2", 2.0}}}, + {"f4", []string{"a", "b", "c", "d", "e", "f", "g"}}, + } - // Insert 10,000 test documents - for i := 0; i < 10000; i++ { - doc := bson.M{ - "_id": i, - "f2": "a short string", - "f3": bson.M{"1": "one", "2": float64(2)}, - "f4": []string{"a", "b", "c", "d", "e", "f", "g"}, - } + for i := 0; i < c.N; i++ { err := coll.Insert(doc) - if err != nil { - panic(err) - } + c.Assert(err, IsNil) } - raw := bson.Raw{} + var raw bson.Raw + iter := coll.Find(nil).Iter() c.ResetTimer() - for i := 0; i < c.N; i++ { - iter := coll.Find(nil).Iter() - for iter.Next(&raw) { - } - if err := iter.Err(); err != nil { - panic(err) - } + for iter.Next(&raw) { + // nothing + } + c.StopTimer() + if err := iter.Err(); err != nil { + panic(err) } - } From 056e1bf69921dcaf6106beb0b44a0bf46a9d865d Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 3 Sep 2014 23:00:38 -0300 Subject: [PATCH 034/305] Couple more tweaks in the Raw Iter benchmark. --- session_test.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/session_test.go b/session_test.go index c26d7b35c..00eaacda1 100644 --- a/session_test.go +++ b/session_test.go @@ -3289,19 +3289,22 @@ func (s *S) BenchmarkFindIterRaw(c *C) { {"f4", []string{"a", "b", "c", "d", "e", "f", "g"}}, } - for i := 0; i < c.N; i++ { + for i := 0; i < c.N+1; i++ { err := coll.Insert(doc) c.Assert(err, IsNil) } + session.SetBatch(c.N) + var raw bson.Raw iter := coll.Find(nil).Iter() + iter.Next(&raw) c.ResetTimer() + i := 0 for iter.Next(&raw) { - // nothing + i++ } c.StopTimer() - if err := iter.Err(); err != nil { - panic(err) - } + c.Assert(iter.Err(), IsNil) + c.Assert(i, Equals, c.N) } From 4de63ad8944db65d3e38ffa0229d99aec58630a8 Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Thu, 4 Sep 2014 11:44:36 -0400 Subject: [PATCH 035/305] Add POC for windows Kerberos with SSPI --- sasl/kerberos_sspi.c | 244 +++++++++++++++++++++++++++++++++++++++++++ sasl/kerberos_sspi.h | 106 +++++++++++++++++++ sasl/sasl.go | 3 + sasl/sasl_sspi.c | 122 ++++++++++++++++++++++ sasl/sasl_sspi.go | 140 +++++++++++++++++++++++++ sasl/sasl_sspi.h | 6 ++ 6 files changed, 621 insertions(+) create mode 100644 sasl/kerberos_sspi.c create mode 100644 sasl/kerberos_sspi.h create mode 100644 sasl/sasl_sspi.c create mode 100644 sasl/sasl_sspi.go create mode 100644 sasl/sasl_sspi.h diff --git a/sasl/kerberos_sspi.c b/sasl/kerberos_sspi.c new file mode 100644 index 000000000..57c19b4f3 --- /dev/null +++ b/sasl/kerberos_sspi.c @@ -0,0 +1,244 @@ +#include "kerberos_sspi.h" +#include +#include + +static HINSTANCE _sspi_security_dll = NULL; +static HINSTANCE _sspi_secur32_dll = NULL; + +/** + * Encrypt A Message + */ +SECURITY_STATUS SEC_ENTRY _sspi_EncryptMessage(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo) { + // Create function pointer instance + encryptMessage_fn pfn_encryptMessage = NULL; + + // Return error if library not loaded + if(_sspi_security_dll == NULL) return -1; + + // Map function to library method + pfn_encryptMessage = (encryptMessage_fn)GetProcAddress(_sspi_security_dll, "EncryptMessage"); + // Check if the we managed to map function pointer + if(!pfn_encryptMessage) { + printf("GetProcAddress failed.\n"); + return -2; + } + + // Call the function + return (*pfn_encryptMessage)(phContext, fQOP, pMessage, MessageSeqNo); +} + +/** + * Acquire Credentials + */ +SECURITY_STATUS SEC_ENTRY _sspi_AcquireCredentialsHandle( + LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse, + void * pvLogonId, void * pAuthData, SEC_GET_KEY_FN pGetKeyFn, void * pvGetKeyArgument, + PCredHandle phCredential, PTimeStamp ptsExpiry +) { + SECURITY_STATUS status; + // Create function pointer instance + acquireCredentialsHandle_fn pfn_acquireCredentialsHandle = NULL; + + // Return error if library not loaded + if(_sspi_security_dll == NULL) return -1; + + // Map function + #ifdef _UNICODE + pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn)GetProcAddress(_sspi_security_dll, "AcquireCredentialsHandleW"); + #else + pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn)GetProcAddress(_sspi_security_dll, "AcquireCredentialsHandleA"); + #endif + + // Check if the we managed to map function pointer + if(!pfn_acquireCredentialsHandle) { + printf("GetProcAddress failed.\n"); + return 42; + } + + // Status + status = (*pfn_acquireCredentialsHandle)(pszPrincipal, pszPackage, fCredentialUse, + pvLogonId, pAuthData, pGetKeyFn, pvGetKeyArgument, phCredential, ptsExpiry + ); + + // Call the function + return status; +} + +/** + * Delete Security Context + */ +SECURITY_STATUS SEC_ENTRY _sspi_DeleteSecurityContext(PCtxtHandle phContext) { + // Create function pointer instance + deleteSecurityContext_fn pfn_deleteSecurityContext = NULL; + + // Return error if library not loaded + if(_sspi_security_dll == NULL) return -1; + // Map function + pfn_deleteSecurityContext = (deleteSecurityContext_fn)GetProcAddress(_sspi_security_dll, "DeleteSecurityContext"); + + // Check if the we managed to map function pointer + if(!pfn_deleteSecurityContext) { + printf("GetProcAddress failed.\n"); + return -2; + } + + // Call the function + return (*pfn_deleteSecurityContext)(phContext); +} + +/** + * Decrypt Message + */ +SECURITY_STATUS SEC_ENTRY _sspi_DecryptMessage(PCtxtHandle phContext, PSecBufferDesc pMessage, unsigned long MessageSeqNo, unsigned long pfQOP) { + // Create function pointer instance + decryptMessage_fn pfn_decryptMessage = NULL; + + // Return error if library not loaded + if(_sspi_security_dll == NULL) return -1; + // Map function + pfn_decryptMessage = (decryptMessage_fn)GetProcAddress(_sspi_security_dll, "DecryptMessage"); + + // Check if the we managed to map function pointer + if(!pfn_decryptMessage) { + printf("GetProcAddress failed.\n"); + return -2; + } + + // Call the function + return (*pfn_decryptMessage)(phContext, pMessage, MessageSeqNo, pfQOP); +} + +/** + * Initialize Security Context + */ +SECURITY_STATUS SEC_ENTRY _sspi_initializeSecurityContext( + PCredHandle phCredential, PCtxtHandle phContext, + LPSTR pszTargetName, unsigned long fContextReq, + unsigned long Reserved1, unsigned long TargetDataRep, + PSecBufferDesc pInput, unsigned long Reserved2, + PCtxtHandle phNewContext, PSecBufferDesc pOutput, + unsigned long * pfContextAttr, PTimeStamp ptsExpiry +) { + SECURITY_STATUS status; + // Create function pointer instance + initializeSecurityContext_fn pfn_initializeSecurityContext = NULL; + + // Return error if library not loaded + if(_sspi_security_dll == NULL) return -1; + + // Map function + #ifdef _UNICODE + pfn_initializeSecurityContext = (initializeSecurityContext_fn)GetProcAddress(_sspi_security_dll, "InitializeSecurityContextW"); + #else + pfn_initializeSecurityContext = (initializeSecurityContext_fn)GetProcAddress(_sspi_security_dll, "InitializeSecurityContextA"); + #endif + + // Check if the we managed to map function pointer + if(!pfn_initializeSecurityContext) { + printf("GetProcAddress failed.\n"); + return -2; + } + + // Execute intialize context + status = (*pfn_initializeSecurityContext)( + phCredential, phContext, pszTargetName, fContextReq, + Reserved1, TargetDataRep, pInput, Reserved2, + phNewContext, pOutput, pfContextAttr, ptsExpiry + ); + + // Call the function + return status; +} +/** + * Query Context Attributes + */ +SECURITY_STATUS SEC_ENTRY _sspi_QueryContextAttributes( + PCtxtHandle phContext, unsigned long ulAttribute, void * pBuffer +) { + // Create function pointer instance + queryContextAttributes_fn pfn_queryContextAttributes = NULL; + + // Return error if library not loaded + if(_sspi_security_dll == NULL) return 22; + + #ifdef _UNICODE + pfn_queryContextAttributes = (queryContextAttributes_fn)GetProcAddress(_sspi_security_dll, "QueryContextAttributesW"); + #else + pfn_queryContextAttributes = (queryContextAttributes_fn)GetProcAddress(_sspi_security_dll, "QueryContextAttributesA"); + #endif + + // Check if the we managed to map function pointer + if(!pfn_queryContextAttributes) { + printf("GetProcAddress failed.\n"); + return 42; + } + + // Call the function + return (*pfn_queryContextAttributes)( + phContext, ulAttribute, pBuffer + ); +} + +/** + * InitSecurityInterface + */ +PSecurityFunctionTable _ssip_InitSecurityInterface() { + INIT_SECURITY_INTERFACE InitSecurityInterface; + PSecurityFunctionTable pSecurityInterface = NULL; + + // Return error if library not loaded + if(_sspi_security_dll == NULL) return NULL; + + #ifdef _UNICODE + // Get the address of the InitSecurityInterface function. + InitSecurityInterface = (INIT_SECURITY_INTERFACE) GetProcAddress ( + _sspi_secur32_dll, + TEXT("InitSecurityInterfaceW")); + #else + // Get the address of the InitSecurityInterface function. + InitSecurityInterface = (INIT_SECURITY_INTERFACE) GetProcAddress ( + _sspi_secur32_dll, + TEXT("InitSecurityInterfaceA")); + #endif + + if(!InitSecurityInterface) { + printf (TEXT("Failed in getting the function address, Error: %x"), GetLastError ()); + return NULL; + } + + // Use InitSecurityInterface to get the function table. + pSecurityInterface = (*InitSecurityInterface)(); + + if(!pSecurityInterface) { + printf (TEXT("Failed in getting the function table, Error: %x"), GetLastError ()); + return NULL; + } + + return pSecurityInterface; +} + +/** + * Load security.dll dynamically + */ +int load_library() { + DWORD err; + // Load the library + _sspi_security_dll = LoadLibrary("security.dll"); + + // Check if the library loaded + if(_sspi_security_dll == NULL) { + err = GetLastError(); + return err; + } + + // Load the library + _sspi_secur32_dll = LoadLibrary("secur32.dll"); + + // Check if the library loaded + if(_sspi_secur32_dll == NULL) { + err = GetLastError(); + return err; + } + + return 0; +} diff --git a/sasl/kerberos_sspi.h b/sasl/kerberos_sspi.h new file mode 100644 index 000000000..a3008dc53 --- /dev/null +++ b/sasl/kerberos_sspi.h @@ -0,0 +1,106 @@ +#ifndef SSPI_C_H +#define SSPI_C_H + +#define SECURITY_WIN32 1 + +#include +#include + +/** + * Encrypt A Message + */ +SECURITY_STATUS SEC_ENTRY _sspi_EncryptMessage(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo); + +typedef DWORD (WINAPI *encryptMessage_fn)(PCtxtHandle phContext, ULONG fQOP, PSecBufferDesc pMessage, ULONG MessageSeqNo); + +/** + * Acquire Credentials + */ +SECURITY_STATUS SEC_ENTRY _sspi_AcquireCredentialsHandle( + LPSTR pszPrincipal, // Name of principal + LPSTR pszPackage, // Name of package + unsigned long fCredentialUse, // Flags indicating use + void * pvLogonId, // Pointer to logon ID + void * pAuthData, // Package specific data + SEC_GET_KEY_FN pGetKeyFn, // Pointer to GetKey() func + void * pvGetKeyArgument, // Value to pass to GetKey() + PCredHandle phCredential, // (out) Cred Handle + PTimeStamp ptsExpiry // (out) Lifetime (optional) +); + +typedef DWORD (WINAPI *acquireCredentialsHandle_fn)( + LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse, + void * pvLogonId, void * pAuthData, SEC_GET_KEY_FN pGetKeyFn, void * pvGetKeyArgument, + PCredHandle phCredential, PTimeStamp ptsExpiry + ); + +/** + * Delete Security Context + */ +SECURITY_STATUS SEC_ENTRY _sspi_DeleteSecurityContext( + PCtxtHandle phContext // Context to delete +); + +typedef DWORD (WINAPI *deleteSecurityContext_fn)(PCtxtHandle phContext); + +/** + * Decrypt Message + */ +SECURITY_STATUS SEC_ENTRY _sspi_DecryptMessage( + PCtxtHandle phContext, + PSecBufferDesc pMessage, + unsigned long MessageSeqNo, + unsigned long pfQOP +); + +typedef DWORD (WINAPI *decryptMessage_fn)( + PCtxtHandle phContext, PSecBufferDesc pMessage, unsigned long MessageSeqNo, unsigned long pfQOP); + +/** + * Initialize Security Context + */ +SECURITY_STATUS SEC_ENTRY _sspi_initializeSecurityContext( + PCredHandle phCredential, // Cred to base context + PCtxtHandle phContext, // Existing context (OPT) + LPSTR pszTargetName, // Name of target + unsigned long fContextReq, // Context Requirements + unsigned long Reserved1, // Reserved, MBZ + unsigned long TargetDataRep, // Data rep of target + PSecBufferDesc pInput, // Input Buffers + unsigned long Reserved2, // Reserved, MBZ + PCtxtHandle phNewContext, // (out) New Context handle + PSecBufferDesc pOutput, // (inout) Output Buffers + unsigned long * pfContextAttr, // (out) Context attrs + PTimeStamp ptsExpiry // (out) Life span (OPT) +); + +typedef DWORD (WINAPI *initializeSecurityContext_fn)( + PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName, unsigned long fContextReq, + unsigned long Reserved1, unsigned long TargetDataRep, PSecBufferDesc pInput, unsigned long Reserved2, + PCtxtHandle phNewContext, PSecBufferDesc pOutput, unsigned long * pfContextAttr, PTimeStamp ptsExpiry); + +/** + * Query Context Attributes + */ +SECURITY_STATUS SEC_ENTRY _sspi_QueryContextAttributes( + PCtxtHandle phContext, // Context to query + unsigned long ulAttribute, // Attribute to query + void * pBuffer // Buffer for attributes +); + +typedef DWORD (WINAPI *queryContextAttributes_fn)( + PCtxtHandle phContext, unsigned long ulAttribute, void * pBuffer); + +/** + * InitSecurityInterface + */ +PSecurityFunctionTable _ssip_InitSecurityInterface(); + +typedef DWORD (WINAPI *initSecurityInterface_fn) (); + +/** + * Load security.dll dynamically + */ +int load_library(); + +#endif \ No newline at end of file diff --git a/sasl/sasl.go b/sasl/sasl.go index e4a170463..b606afbc5 100644 --- a/sasl/sasl.go +++ b/sasl/sasl.go @@ -2,6 +2,9 @@ // // This package is not meant to be used by itself. // + +// +build linux darwin + package sasl // #cgo LDFLAGS: -lsasl2 diff --git a/sasl/sasl_sspi.c b/sasl/sasl_sspi.c new file mode 100644 index 000000000..ef90cc43f --- /dev/null +++ b/sasl/sasl_sspi.c @@ -0,0 +1,122 @@ +#include "sasl_sspi.h" + +static const LPSTR SSPI_PACKAGE_NAME = "kerberos"; + +SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle* cred_handle, char* username, char* password, char* domain) { + SEC_WINNT_AUTH_IDENTITY auth_identity; + SECURITY_INTEGER ignored; + + auth_identity.Flags = SEC_WINNT_AUTH_IDENTITY_ANSI; + auth_identity.User = (LPSTR) username; + auth_identity.UserLength = strlen(username); + auth_identity.Password = (LPSTR) password; + auth_identity.PasswordLength = strlen(password); + auth_identity.Domain = (LPSTR) domain; + auth_identity.DomainLength = strlen(domain); + return _sspi_AcquireCredentialsHandle( + NULL, + SSPI_PACKAGE_NAME, + SECPKG_CRED_OUTBOUND, + NULL, + &auth_identity, + NULL, + NULL, + cred_handle, + &ignored); +} + +int sspi_step(CredHandle* cred_handle, int has_context, CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* target) { + SecBufferDesc inbuf; + SecBuffer in_bufs[1]; + SecBufferDesc outbuf; + SecBuffer out_bufs[1]; + + if (has_context > 0) { + // If we already have a context, we now have data to send. + // Put this data in an inbuf. + inbuf.ulVersion = SECBUFFER_VERSION; + inbuf.cBuffers = 1; + inbuf.pBuffers = in_bufs; + in_bufs[0].pvBuffer = *buffer; + in_bufs[0].cbBuffer = *buffer_length; + in_bufs[0].BufferType = SECBUFFER_TOKEN; + } + + outbuf.ulVersion = SECBUFFER_VERSION; + outbuf.cBuffers = 1; + outbuf.pBuffers = out_bufs; + out_bufs[0].pvBuffer = NULL; + out_bufs[0].cbBuffer = 0; + out_bufs[0].BufferType = SECBUFFER_TOKEN; + + ULONG context_attr = 0; + + int ret = _sspi_initializeSecurityContext( + cred_handle, + has_context > 0 ? context : NULL, + (LPSTR) target, + ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_MUTUAL_AUTH, + 0, + SECURITY_NETWORK_DREP, + has_context > 0 ? &inbuf : NULL, + 0, + context, + &outbuf, + &context_attr, + NULL); + + *buffer = malloc(out_bufs[0].cbBuffer); + *buffer_length = out_bufs[0].cbBuffer; + memcpy(*buffer, out_bufs[0].pvBuffer, *buffer_length); + + return ret; +} + +int sspi_send_client_authz_id(CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* user_plus_realm) { + SecPkgContext_Sizes sizes; + SECURITY_STATUS status = _sspi_QueryContextAttributes(context, SECPKG_ATTR_SIZES, &sizes); + + if (status != SEC_E_OK) { + return status; + } + + int msgSize = 4 + 25; + char* msg = malloc((sizes.cbSecurityTrailer + msgSize + sizes.cbBlockSize) * sizeof(char)); + msg[sizes.cbSecurityTrailer + 0] = 1; + msg[sizes.cbSecurityTrailer + 1] = 0; + msg[sizes.cbSecurityTrailer + 2] = 0; + msg[sizes.cbSecurityTrailer + 3] = 0; + memcpy(&msg[sizes.cbSecurityTrailer + 4], user_plus_realm, 25); + + SecBuffer wrapBufs[3]; + SecBufferDesc wrapBufDesc; + wrapBufDesc.cBuffers = 3; + wrapBufDesc.pBuffers = wrapBufs; + wrapBufDesc.ulVersion = SECBUFFER_VERSION; + + wrapBufs[0].cbBuffer = sizes.cbSecurityTrailer; + wrapBufs[0].BufferType = SECBUFFER_TOKEN; + wrapBufs[0].pvBuffer = msg; + + wrapBufs[1].cbBuffer = msgSize; + wrapBufs[1].BufferType = SECBUFFER_DATA; + wrapBufs[1].pvBuffer = msg + sizes.cbSecurityTrailer; + + wrapBufs[2].cbBuffer = sizes.cbBlockSize; + wrapBufs[2].BufferType = SECBUFFER_PADDING; + wrapBufs[2].pvBuffer = msg + sizes.cbSecurityTrailer + msgSize; + + status = _sspi_EncryptMessage(context, SECQOP_WRAP_NO_ENCRYPT, &wrapBufDesc, 0); + if (status != SEC_E_OK) { + return status; + } + + *buffer_length = wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer + wrapBufs[2].cbBuffer; + *buffer = malloc(*buffer_length); + + memcpy(*buffer, wrapBufs[0].pvBuffer, wrapBufs[0].cbBuffer); + memcpy(*buffer + wrapBufs[0].cbBuffer, wrapBufs[1].pvBuffer, wrapBufs[1].cbBuffer); + memcpy(*buffer + wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer, wrapBufs[2].pvBuffer, wrapBufs[2].cbBuffer); + + return SEC_E_OK; +} diff --git a/sasl/sasl_sspi.go b/sasl/sasl_sspi.go new file mode 100644 index 000000000..39a54e7fa --- /dev/null +++ b/sasl/sasl_sspi.go @@ -0,0 +1,140 @@ +// +build windows + +package sasl + +// +// #include "sasl_sspi.h" +// +import "C" + +import ( + "fmt" + "strings" + "sync" + "unsafe" +) + +type saslStepper interface { + Step(serverData []byte) (clientData []byte, done bool, err error) + Close() +} + +type saslSession struct { + // Credentials + mech string + service string + host string + userPlusRealm string + + // Internal state + authComplete bool + errored bool + step int + + // C internal state + credHandle C.CredHandle + context C.CtxtHandle + hasContext C.int + + stringsToFree []*C.char + buffersToFree []C.PVOID +} + +var initError error +var initOnce sync.Once + +func initSSPI() { + rc := C.load_library() + if rc != 0 { + initError = fmt.Errorf("Error loading libraries: %v", rc) + } +} + +func New(username, password, mechanism, service, host string) (saslStepper, error) { + initOnce.Do(initSSPI) + ss := &saslSession{mech: mechanism, hasContext: 0, userPlusRealm: username} + if service == "" { + service = "mongodb" + } + if i := strings.Index(host, ":"); i >= 0 { + host = host[:i] + } + ss.service = service + ss.host = host + + usernameComponents := strings.Split(username, "@") + if len(usernameComponents) < 2 { + return nil, fmt.Errorf("Username '%v' doesn't contain a realm!", username) + } + user := usernameComponents[0] + domain := usernameComponents[1] + + var status C.SECURITY_STATUS + if len(password) > 0 { + status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), ss.cstr(password), ss.cstr(domain)) + } else { + status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), nil, ss.cstr(domain)) + } + + if status != C.SEC_E_OK { + ss.errored = true + return nil, fmt.Errorf("Couldn't create new SSPI client, error code %v", status) + } + + return ss, nil +} + +func (ss *saslSession) cstr(s string) *C.char { + cstr := C.CString(s) + ss.stringsToFree = append(ss.stringsToFree, cstr) + return cstr +} + +func (ss *saslSession) Close() { + for _, cstr := range ss.stringsToFree { + C.free(unsafe.Pointer(cstr)) + } + + // Make sure we've cleaned up all the buffers we malloced when we're sure we don't need em anymore + if ss.authComplete || ss.errored { + for _, cbuf := range ss.buffersToFree { + C.free(unsafe.Pointer(cbuf)) + } + } +} + +func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, err error) { + ss.step++ + if ss.step > 10 { + return nil, false, fmt.Errorf("too many SSPI steps without authentication") + } + var buffer C.PVOID + var bufferLength C.ULONG + if len(serverData) > 0 { + buffer = (C.PVOID)(unsafe.Pointer(&serverData[0])) + bufferLength = C.ULONG(len(serverData)) + } + var status C.int + if ss.authComplete { + status = C.sspi_send_client_authz_id(&ss.context, &buffer, &bufferLength, ss.cstr(ss.userPlusRealm)) + ss.buffersToFree = append(ss.buffersToFree, buffer) + } else { + target := fmt.Sprintf("%s/%s", ss.service, ss.host) + status = C.sspi_step(&ss.credHandle, ss.hasContext, &ss.context, &buffer, &bufferLength, ss.cstr(target)) + ss.buffersToFree = append(ss.buffersToFree, buffer) + } + + if status != C.SEC_E_OK && status != C.SEC_I_CONTINUE_NEEDED { + ss.errored = true + return nil, false, fmt.Errorf("Error doing step %v, error code %v", ss.step, status) + } + + clientData = C.GoBytes(unsafe.Pointer(buffer), C.int(bufferLength)) + if status == C.SEC_E_OK { + ss.authComplete = true + return clientData, true, nil + } else { + ss.hasContext = 1 + return clientData, false, nil + } +} diff --git a/sasl/sasl_sspi.h b/sasl/sasl_sspi.h new file mode 100644 index 000000000..d33e7f814 --- /dev/null +++ b/sasl/sasl_sspi.h @@ -0,0 +1,6 @@ +#include +#include "kerberos_sspi.h" + +SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle* cred_handle, char* username, char* password, char* domain); +int sspi_step(CredHandle* cred_handle, int has_context, CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* target); +int sspi_send_client_authz_id(CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* user_plus_realm); From 9bd76be098b9298f29c42c547d0581bb90f4a41d Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Thu, 4 Sep 2014 11:50:08 -0400 Subject: [PATCH 036/305] gofmt + couple basic comments --- sasl/sasl_sspi.go | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/sasl/sasl_sspi.go b/sasl/sasl_sspi.go index 39a54e7fa..822e1e291 100644 --- a/sasl/sasl_sspi.go +++ b/sasl/sasl_sspi.go @@ -21,21 +21,22 @@ type saslStepper interface { type saslSession struct { // Credentials - mech string - service string - host string + mech string + service string + host string userPlusRealm string // Internal state authComplete bool - errored bool - step int + errored bool + step int // C internal state credHandle C.CredHandle - context C.CtxtHandle + context C.CtxtHandle hasContext C.int + // Keep track of pointers we need to explicitly free stringsToFree []*C.char buffersToFree []C.PVOID } @@ -70,6 +71,7 @@ func New(username, password, mechanism, service, host string) (saslStepper, erro domain := usernameComponents[1] var status C.SECURITY_STATUS + // Step 0: call AcquireCredentialsHandle to get a nice SSPI CredHandle if len(password) > 0 { status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), ss.cstr(password), ss.cstr(domain)) } else { @@ -78,7 +80,7 @@ func New(username, password, mechanism, service, host string) (saslStepper, erro if status != C.SEC_E_OK { ss.errored = true - return nil, fmt.Errorf("Couldn't create new SSPI client, error code %v", status) + return nil, fmt.Errorf("Couldn't create new SSPI client, error code %v", status) } return ss, nil @@ -106,8 +108,8 @@ func (ss *saslSession) Close() { func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, err error) { ss.step++ if ss.step > 10 { - return nil, false, fmt.Errorf("too many SSPI steps without authentication") - } + return nil, false, fmt.Errorf("too many SSPI steps without authentication") + } var buffer C.PVOID var bufferLength C.ULONG if len(serverData) > 0 { @@ -116,9 +118,11 @@ func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, er } var status C.int if ss.authComplete { + // Step 3: last bit of magic to use the correct server credentials status = C.sspi_send_client_authz_id(&ss.context, &buffer, &bufferLength, ss.cstr(ss.userPlusRealm)) ss.buffersToFree = append(ss.buffersToFree, buffer) } else { + // Step 1 + Step 2: set up security context with the server and TGT target := fmt.Sprintf("%s/%s", ss.service, ss.host) status = C.sspi_step(&ss.credHandle, ss.hasContext, &ss.context, &buffer, &bufferLength, ss.cstr(target)) ss.buffersToFree = append(ss.buffersToFree, buffer) @@ -126,15 +130,15 @@ func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, er if status != C.SEC_E_OK && status != C.SEC_I_CONTINUE_NEEDED { ss.errored = true - return nil, false, fmt.Errorf("Error doing step %v, error code %v", ss.step, status) - } + return nil, false, fmt.Errorf("Error doing step %v, error code %v", ss.step, status) + } clientData = C.GoBytes(unsafe.Pointer(buffer), C.int(bufferLength)) if status == C.SEC_E_OK { ss.authComplete = true - return clientData, true, nil + return clientData, true, nil } else { - ss.hasContext = 1 - return clientData, false, nil + ss.hasContext = 1 + return clientData, false, nil } } From 124110ff3f099042a63fb4f2dbba5cc5281857bc Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Fri, 5 Sep 2014 14:19:52 -0400 Subject: [PATCH 037/305] Remove changes to supervisor conf --- testdb/setup.sh | 10 +----- testdb/supervisord-cygwin.conf | 61 ---------------------------------- 2 files changed, 1 insertion(+), 70 deletions(-) delete mode 100644 testdb/supervisord-cygwin.conf diff --git a/testdb/setup.sh b/testdb/setup.sh index e837a007f..27200c090 100755 --- a/testdb/setup.sh +++ b/testdb/setup.sh @@ -4,15 +4,7 @@ start() { mkdir _testdb cd _testdb mkdir db1 db2 rs1a rs1b rs1c rs2a rs2b rs2c rs3a rs3b rs3c rs4a cfg1 cfg2 cfg3 - if [[ `uname` == 'CYGWIN_NT-6.1' ]]; then - # Supervisor will only run on windows through cygwin. However, to get - # it to start you need to pass the properly-escaped windows-style path - # to _testdb as an environment variable. For instance: - # env DIRECTORY="d:\\\\cygwin\\\\home\\\\Administrator\\\\mgo-gopath\\\\src\\\\gopkg.in\\\\mgo.v2\\\\_testdb\\\\" make startdb - ln -s ../testdb/supervisord-cygwin.conf supervisord.conf - else - ln -s ../testdb/supervisord.conf supervisord.conf - fi + ln -s ../testdb/supervisord.conf supervisord.conf echo keyfile > keyfile chmod 600 keyfile echo "Running supervisord..." diff --git a/testdb/supervisord-cygwin.conf b/testdb/supervisord-cygwin.conf deleted file mode 100644 index 14be540cb..000000000 --- a/testdb/supervisord-cygwin.conf +++ /dev/null @@ -1,61 +0,0 @@ -[supervisord] -logfile = %(here)s/supervisord.log -pidfile = %(here)s/supervisord.pid -#nodaemon = true - -[inet_http_server] -port = 127.0.0.1:9001 - -[supervisorctl] -serverurl = http://127.0.0.1:9001 - -[rpcinterface:supervisor] -supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface - -[program:db1] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(ENV_DIRECTORY)sdb1 --bind_ip=127.0.0.1 --port 40001 - -[program:db2] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(ENV_DIRECTORY)sdb2 --bind_ip=127.0.0.1 --port 40002 --auth - -[program:rs1a] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(ENV_DIRECTORY)srs1a --bind_ip=127.0.0.1 --port 40011 -[program:rs1b] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(ENV_DIRECTORY)srs1b --bind_ip=127.0.0.1 --port 40012 -[program:rs1c] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(ENV_DIRECTORY)srs1c --bind_ip=127.0.0.1 --port 40013 - -[program:rs2a] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(ENV_DIRECTORY)srs2a --bind_ip=127.0.0.1 --port 40021 -[program:rs2b] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(ENV_DIRECTORY)srs2b --bind_ip=127.0.0.1 --port 40022 -[program:rs2c] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(ENV_DIRECTORY)srs2c --bind_ip=127.0.0.1 --port 40023 - -[program:rs3a] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(ENV_DIRECTORY)srs3a --bind_ip=127.0.0.1 --port 40031 --auth --keyFile=%(ENV_DIRECTORY)skeyfile -[program:rs3b] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(ENV_DIRECTORY)srs3b --bind_ip=127.0.0.1 --port 40032 --auth --keyFile=%(ENV_DIRECTORY)skeyfile -[program:rs3c] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(ENV_DIRECTORY)srs3c --bind_ip=127.0.0.1 --port 40033 --auth --keyFile=%(ENV_DIRECTORY)skeyfile - -[program:rs4a] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs4 --dbpath %(ENV_DIRECTORY)srs4a --bind_ip=127.0.0.1 --port 40041 - -[program:cfg1] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(ENV_DIRECTORY)scfg1 --bind_ip=127.0.0.1 --port 40101 - -[program:cfg2] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(ENV_DIRECTORY)scfg2 --bind_ip=127.0.0.1 --port 40102 - -[program:cfg3] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(ENV_DIRECTORY)scfg3 --bind_ip=127.0.0.1 --port 40103 --auth --keyFile=%(ENV_DIRECTORY)skeyfile - -[program:s1] -command = mongos --configdb 127.0.0.1:40101 --bind_ip=127.0.0.1 --port 40201 --chunkSize 1 - -[program:s2] -command = mongos --configdb 127.0.0.1:40102 --bind_ip=127.0.0.1 --port 40202 --chunkSize 1 - -[program:s3] -command = mongos --configdb 127.0.0.1:40103 --bind_ip=127.0.0.1 --port 40203 --chunkSize 1 --keyFile=%(ENV_DIRECTORY)skeyfile From ac370edbf6fc0a11a7b1435cf43d1465b521082b Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Fri, 5 Sep 2014 14:28:21 -0400 Subject: [PATCH 038/305] Abstract out syscalls as per Gustavo's suggestion --- suite_test.go | 20 ++++++++++++++++++++ syscall_test.go | 20 ++++---------------- syscall_win_test.go | 8 ++++---- 3 files changed, 28 insertions(+), 20 deletions(-) diff --git a/suite_test.go b/suite_test.go index c92a737e5..94414ddbd 100644 --- a/suite_test.go +++ b/suite_test.go @@ -163,6 +163,26 @@ func (s *S) pid(host string) int { return pid } +func (s *S) Freeze(host string) { + err := s.Stop(host) + if err != nil { + panic(err) + } + s.frozen = append(s.frozen, host) +} + +func (s *S) Thaw(host string) { + err := s.Continue(host) + if err != nil { + panic(err) + } + for i, frozen := range s.frozen { + if frozen == host { + s.frozen[i] = "" + } + } +} + func (s *S) StartAll() { // Restart any stopped nodes. run("cd _testdb && supervisorctl start all") diff --git a/syscall_test.go b/syscall_test.go index 229f7bb44..1899d6c1a 100644 --- a/syscall_test.go +++ b/syscall_test.go @@ -6,22 +6,10 @@ import ( "syscall" ) -func (s *S) Freeze(host string) { - err := syscall.Kill(s.pid(host), syscall.SIGSTOP) - if err != nil { - panic(err) - } - s.frozen = append(s.frozen, host) +func (s *S) Stop(host string) error { + return syscall.Kill(s.pid(host), syscall.SIGSTOP) } -func (s *S) Thaw(host string) { - err := syscall.Kill(s.pid(host), syscall.SIGCONT) - if err != nil { - panic(err) - } - for i, frozen := range s.frozen { - if frozen == host { - s.frozen[i] = "" - } - } +func (s *S) Continue(host string) error { + return syscall.Kill(s.pid(host), syscall.SIGCONT) } diff --git a/syscall_win_test.go b/syscall_win_test.go index 26565d939..9b383d5f5 100644 --- a/syscall_win_test.go +++ b/syscall_win_test.go @@ -2,10 +2,10 @@ package mgo_test -func (s *S) Freeze(host string) { - panic("Freeze() not (currently) supported on Windows!") +func (s *S) Stop(host string) { + panic("Stop() not (currently) supported on Windows!") } -func (s *S) Thaw(host string) { - panic("Thaw() not (currently) supported on Windows!") +func (s *S) Continue(host string) { + panic("Continue() not (currently) supported on Windows!") } From 91b7eab11b56acbe8721f95f8a827224a56082b3 Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Fri, 5 Sep 2014 16:08:19 -0400 Subject: [PATCH 039/305] Address Gustavo's comments --- sasl/kerberos_sspi.c | 208 ++++++++++++++----------------------------- sasl/kerberos_sspi.h | 43 ++------- sasl/sasl_sspi.c | 12 ++- sasl/sasl_sspi.go | 2 +- sasl/sasl_sspi.h | 3 + 5 files changed, 86 insertions(+), 182 deletions(-) diff --git a/sasl/kerberos_sspi.c b/sasl/kerberos_sspi.c index 57c19b4f3..c4c271958 100644 --- a/sasl/kerberos_sspi.c +++ b/sasl/kerberos_sspi.c @@ -1,6 +1,6 @@ +#ifdef _WIN32 #include "kerberos_sspi.h" #include -#include static HINSTANCE _sspi_security_dll = NULL; static HINSTANCE _sspi_secur32_dll = NULL; @@ -8,18 +8,19 @@ static HINSTANCE _sspi_secur32_dll = NULL; /** * Encrypt A Message */ -SECURITY_STATUS SEC_ENTRY _sspi_EncryptMessage(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo) { +SECURITY_STATUS SEC_ENTRY _sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo) { // Create function pointer instance encryptMessage_fn pfn_encryptMessage = NULL; // Return error if library not loaded - if(_sspi_security_dll == NULL) return -1; + if (_sspi_security_dll == NULL) { + return -1; + } // Map function to library method - pfn_encryptMessage = (encryptMessage_fn)GetProcAddress(_sspi_security_dll, "EncryptMessage"); + pfn_encryptMessage = (encryptMessage_fn) GetProcAddress(_sspi_security_dll, "EncryptMessage"); // Check if the we managed to map function pointer - if(!pfn_encryptMessage) { - printf("GetProcAddress failed.\n"); + if (!pfn_encryptMessage) { return -2; } @@ -30,203 +31,124 @@ SECURITY_STATUS SEC_ENTRY _sspi_EncryptMessage(PCtxtHandle phContext, unsigned l /** * Acquire Credentials */ -SECURITY_STATUS SEC_ENTRY _sspi_AcquireCredentialsHandle( - LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse, - void * pvLogonId, void * pAuthData, SEC_GET_KEY_FN pGetKeyFn, void * pvGetKeyArgument, - PCredHandle phCredential, PTimeStamp ptsExpiry -) { - SECURITY_STATUS status; +SECURITY_STATUS SEC_ENTRY _sspi_acquire_credentials_handle(LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse, + void* pvLogonId, void* pAuthData, SEC_GET_KEY_FN pGetKeyFn, void* pvGetKeyArgument, PCredHandle phCredential, + PTimeStamp ptsExpiry) { // Create function pointer instance acquireCredentialsHandle_fn pfn_acquireCredentialsHandle = NULL; // Return error if library not loaded - if(_sspi_security_dll == NULL) return -1; + if (_sspi_security_dll == NULL) { + return -1; + } // Map function #ifdef _UNICODE - pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn)GetProcAddress(_sspi_security_dll, "AcquireCredentialsHandleW"); + pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(_sspi_security_dll, "AcquireCredentialsHandleW"); #else - pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn)GetProcAddress(_sspi_security_dll, "AcquireCredentialsHandleA"); + pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(_sspi_security_dll, "AcquireCredentialsHandleA"); #endif // Check if the we managed to map function pointer - if(!pfn_acquireCredentialsHandle) { - printf("GetProcAddress failed.\n"); - return 42; - } - - // Status - status = (*pfn_acquireCredentialsHandle)(pszPrincipal, pszPackage, fCredentialUse, - pvLogonId, pAuthData, pGetKeyFn, pvGetKeyArgument, phCredential, ptsExpiry - ); - - // Call the function - return status; -} - -/** - * Delete Security Context - */ -SECURITY_STATUS SEC_ENTRY _sspi_DeleteSecurityContext(PCtxtHandle phContext) { - // Create function pointer instance - deleteSecurityContext_fn pfn_deleteSecurityContext = NULL; - - // Return error if library not loaded - if(_sspi_security_dll == NULL) return -1; - // Map function - pfn_deleteSecurityContext = (deleteSecurityContext_fn)GetProcAddress(_sspi_security_dll, "DeleteSecurityContext"); - - // Check if the we managed to map function pointer - if(!pfn_deleteSecurityContext) { - printf("GetProcAddress failed.\n"); - return -2; - } - - // Call the function - return (*pfn_deleteSecurityContext)(phContext); -} - -/** - * Decrypt Message - */ -SECURITY_STATUS SEC_ENTRY _sspi_DecryptMessage(PCtxtHandle phContext, PSecBufferDesc pMessage, unsigned long MessageSeqNo, unsigned long pfQOP) { - // Create function pointer instance - decryptMessage_fn pfn_decryptMessage = NULL; - - // Return error if library not loaded - if(_sspi_security_dll == NULL) return -1; - // Map function - pfn_decryptMessage = (decryptMessage_fn)GetProcAddress(_sspi_security_dll, "DecryptMessage"); - - // Check if the we managed to map function pointer - if(!pfn_decryptMessage) { - printf("GetProcAddress failed.\n"); + if (!pfn_acquireCredentialsHandle) { return -2; } - // Call the function - return (*pfn_decryptMessage)(phContext, pMessage, MessageSeqNo, pfQOP); + // Status + return (*pfn_acquireCredentialsHandle)( + pszPrincipal, + pszPackage, + fCredentialUse, + pvLogonId, + pAuthData, + pGetKeyFn, + pvGetKeyArgument, + phCredential, + ptsExpiry); } /** * Initialize Security Context */ -SECURITY_STATUS SEC_ENTRY _sspi_initializeSecurityContext( - PCredHandle phCredential, PCtxtHandle phContext, - LPSTR pszTargetName, unsigned long fContextReq, - unsigned long Reserved1, unsigned long TargetDataRep, - PSecBufferDesc pInput, unsigned long Reserved2, - PCtxtHandle phNewContext, PSecBufferDesc pOutput, - unsigned long * pfContextAttr, PTimeStamp ptsExpiry -) { - SECURITY_STATUS status; +SECURITY_STATUS SEC_ENTRY _sspi_initialize_security_context(PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName, + unsigned long fContextReq, unsigned long Reserved1, unsigned long TargetDataRep, PSecBufferDesc pInput, unsigned long Reserved2, + PCtxtHandle phNewContext, PSecBufferDesc pOutput, unsigned long * pfContextAttr, PTimeStamp ptsExpiry) { // Create function pointer instance initializeSecurityContext_fn pfn_initializeSecurityContext = NULL; // Return error if library not loaded - if(_sspi_security_dll == NULL) return -1; + if (_sspi_security_dll == NULL) { + return -1; + } // Map function #ifdef _UNICODE - pfn_initializeSecurityContext = (initializeSecurityContext_fn)GetProcAddress(_sspi_security_dll, "InitializeSecurityContextW"); + pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(_sspi_security_dll, "InitializeSecurityContextW"); #else - pfn_initializeSecurityContext = (initializeSecurityContext_fn)GetProcAddress(_sspi_security_dll, "InitializeSecurityContextA"); + pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(_sspi_security_dll, "InitializeSecurityContextA"); #endif // Check if the we managed to map function pointer - if(!pfn_initializeSecurityContext) { - printf("GetProcAddress failed.\n"); + if (!pfn_initializeSecurityContext) { return -2; } // Execute intialize context - status = (*pfn_initializeSecurityContext)( - phCredential, phContext, pszTargetName, fContextReq, - Reserved1, TargetDataRep, pInput, Reserved2, - phNewContext, pOutput, pfContextAttr, ptsExpiry - ); - - // Call the function - return status; + return (*pfn_initializeSecurityContext)( + phCredential, + phContext, + pszTargetName, + fContextReq, + Reserved1, + TargetDataRep, + pInput, + Reserved2, + phNewContext, + pOutput, + pfContextAttr, + ptsExpiry); } + /** * Query Context Attributes */ -SECURITY_STATUS SEC_ENTRY _sspi_QueryContextAttributes( - PCtxtHandle phContext, unsigned long ulAttribute, void * pBuffer -) { +SECURITY_STATUS SEC_ENTRY _sspi_query_context_attributes(PCtxtHandle phContext, unsigned long ulAttribute, void * pBuffer) { // Create function pointer instance queryContextAttributes_fn pfn_queryContextAttributes = NULL; // Return error if library not loaded - if(_sspi_security_dll == NULL) return 22; + if (_sspi_security_dll == NULL) { + return -1; + } #ifdef _UNICODE - pfn_queryContextAttributes = (queryContextAttributes_fn)GetProcAddress(_sspi_security_dll, "QueryContextAttributesW"); + pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(_sspi_security_dll, "QueryContextAttributesW"); #else - pfn_queryContextAttributes = (queryContextAttributes_fn)GetProcAddress(_sspi_security_dll, "QueryContextAttributesA"); + pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(_sspi_security_dll, "QueryContextAttributesA"); #endif // Check if the we managed to map function pointer - if(!pfn_queryContextAttributes) { - printf("GetProcAddress failed.\n"); - return 42; + if (!pfn_queryContextAttributes) { + return -2; } // Call the function return (*pfn_queryContextAttributes)( - phContext, ulAttribute, pBuffer - ); -} - -/** - * InitSecurityInterface - */ -PSecurityFunctionTable _ssip_InitSecurityInterface() { - INIT_SECURITY_INTERFACE InitSecurityInterface; - PSecurityFunctionTable pSecurityInterface = NULL; - - // Return error if library not loaded - if(_sspi_security_dll == NULL) return NULL; - - #ifdef _UNICODE - // Get the address of the InitSecurityInterface function. - InitSecurityInterface = (INIT_SECURITY_INTERFACE) GetProcAddress ( - _sspi_secur32_dll, - TEXT("InitSecurityInterfaceW")); - #else - // Get the address of the InitSecurityInterface function. - InitSecurityInterface = (INIT_SECURITY_INTERFACE) GetProcAddress ( - _sspi_secur32_dll, - TEXT("InitSecurityInterfaceA")); - #endif - - if(!InitSecurityInterface) { - printf (TEXT("Failed in getting the function address, Error: %x"), GetLastError ()); - return NULL; - } - - // Use InitSecurityInterface to get the function table. - pSecurityInterface = (*InitSecurityInterface)(); - - if(!pSecurityInterface) { - printf (TEXT("Failed in getting the function table, Error: %x"), GetLastError ()); - return NULL; - } - - return pSecurityInterface; + phContext, + ulAttribute, + pBuffer); } /** * Load security.dll dynamically */ -int load_library() { +int _load_library() { DWORD err; // Load the library _sspi_security_dll = LoadLibrary("security.dll"); // Check if the library loaded - if(_sspi_security_dll == NULL) { + if (_sspi_security_dll == NULL) { err = GetLastError(); return err; } @@ -235,10 +157,12 @@ int load_library() { _sspi_secur32_dll = LoadLibrary("secur32.dll"); // Check if the library loaded - if(_sspi_secur32_dll == NULL) { + if (_sspi_secur32_dll == NULL) { err = GetLastError(); return err; } return 0; } + +#endif diff --git a/sasl/kerberos_sspi.h b/sasl/kerberos_sspi.h index a3008dc53..d2a6a30a4 100644 --- a/sasl/kerberos_sspi.h +++ b/sasl/kerberos_sspi.h @@ -1,3 +1,4 @@ +#ifdef _WIN32 #ifndef SSPI_C_H #define SSPI_C_H @@ -9,14 +10,14 @@ /** * Encrypt A Message */ -SECURITY_STATUS SEC_ENTRY _sspi_EncryptMessage(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo); +SECURITY_STATUS SEC_ENTRY _sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo); typedef DWORD (WINAPI *encryptMessage_fn)(PCtxtHandle phContext, ULONG fQOP, PSecBufferDesc pMessage, ULONG MessageSeqNo); /** * Acquire Credentials */ -SECURITY_STATUS SEC_ENTRY _sspi_AcquireCredentialsHandle( +SECURITY_STATUS SEC_ENTRY _sspi_acquire_credentials_handle( LPSTR pszPrincipal, // Name of principal LPSTR pszPackage, // Name of package unsigned long fCredentialUse, // Flags indicating use @@ -34,32 +35,10 @@ typedef DWORD (WINAPI *acquireCredentialsHandle_fn)( PCredHandle phCredential, PTimeStamp ptsExpiry ); -/** - * Delete Security Context - */ -SECURITY_STATUS SEC_ENTRY _sspi_DeleteSecurityContext( - PCtxtHandle phContext // Context to delete -); - -typedef DWORD (WINAPI *deleteSecurityContext_fn)(PCtxtHandle phContext); - -/** - * Decrypt Message - */ -SECURITY_STATUS SEC_ENTRY _sspi_DecryptMessage( - PCtxtHandle phContext, - PSecBufferDesc pMessage, - unsigned long MessageSeqNo, - unsigned long pfQOP -); - -typedef DWORD (WINAPI *decryptMessage_fn)( - PCtxtHandle phContext, PSecBufferDesc pMessage, unsigned long MessageSeqNo, unsigned long pfQOP); - /** * Initialize Security Context */ -SECURITY_STATUS SEC_ENTRY _sspi_initializeSecurityContext( +SECURITY_STATUS SEC_ENTRY _sspi_initialize_security_context( PCredHandle phCredential, // Cred to base context PCtxtHandle phContext, // Existing context (OPT) LPSTR pszTargetName, // Name of target @@ -82,7 +61,7 @@ typedef DWORD (WINAPI *initializeSecurityContext_fn)( /** * Query Context Attributes */ -SECURITY_STATUS SEC_ENTRY _sspi_QueryContextAttributes( +SECURITY_STATUS SEC_ENTRY _sspi_query_context_attributes( PCtxtHandle phContext, // Context to query unsigned long ulAttribute, // Attribute to query void * pBuffer // Buffer for attributes @@ -91,16 +70,10 @@ SECURITY_STATUS SEC_ENTRY _sspi_QueryContextAttributes( typedef DWORD (WINAPI *queryContextAttributes_fn)( PCtxtHandle phContext, unsigned long ulAttribute, void * pBuffer); -/** - * InitSecurityInterface - */ -PSecurityFunctionTable _ssip_InitSecurityInterface(); - -typedef DWORD (WINAPI *initSecurityInterface_fn) (); - /** * Load security.dll dynamically */ -int load_library(); +int _load_library(); -#endif \ No newline at end of file +#endif +#endif diff --git a/sasl/sasl_sspi.c b/sasl/sasl_sspi.c index ef90cc43f..bb4239b6c 100644 --- a/sasl/sasl_sspi.c +++ b/sasl/sasl_sspi.c @@ -1,3 +1,5 @@ +#ifdef _WIN32 + #include "sasl_sspi.h" static const LPSTR SSPI_PACKAGE_NAME = "kerberos"; @@ -13,7 +15,7 @@ SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle* cred_handl auth_identity.PasswordLength = strlen(password); auth_identity.Domain = (LPSTR) domain; auth_identity.DomainLength = strlen(domain); - return _sspi_AcquireCredentialsHandle( + return _sspi_acquire_credentials_handle( NULL, SSPI_PACKAGE_NAME, SECPKG_CRED_OUTBOUND, @@ -51,7 +53,7 @@ int sspi_step(CredHandle* cred_handle, int has_context, CtxtHandle* context, PVO ULONG context_attr = 0; - int ret = _sspi_initializeSecurityContext( + int ret = _sspi_initialize_security_context( cred_handle, has_context > 0 ? context : NULL, (LPSTR) target, @@ -74,7 +76,7 @@ int sspi_step(CredHandle* cred_handle, int has_context, CtxtHandle* context, PVO int sspi_send_client_authz_id(CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* user_plus_realm) { SecPkgContext_Sizes sizes; - SECURITY_STATUS status = _sspi_QueryContextAttributes(context, SECPKG_ATTR_SIZES, &sizes); + SECURITY_STATUS status = _sspi_query_context_attributes(context, SECPKG_ATTR_SIZES, &sizes); if (status != SEC_E_OK) { return status; @@ -106,7 +108,7 @@ int sspi_send_client_authz_id(CtxtHandle* context, PVOID* buffer, ULONG* buffer_ wrapBufs[2].BufferType = SECBUFFER_PADDING; wrapBufs[2].pvBuffer = msg + sizes.cbSecurityTrailer + msgSize; - status = _sspi_EncryptMessage(context, SECQOP_WRAP_NO_ENCRYPT, &wrapBufDesc, 0); + status = _sspi_encrypt_message(context, SECQOP_WRAP_NO_ENCRYPT, &wrapBufDesc, 0); if (status != SEC_E_OK) { return status; } @@ -120,3 +122,5 @@ int sspi_send_client_authz_id(CtxtHandle* context, PVOID* buffer, ULONG* buffer_ return SEC_E_OK; } + +#endif diff --git a/sasl/sasl_sspi.go b/sasl/sasl_sspi.go index 822e1e291..30d478eb9 100644 --- a/sasl/sasl_sspi.go +++ b/sasl/sasl_sspi.go @@ -45,7 +45,7 @@ var initError error var initOnce sync.Once func initSSPI() { - rc := C.load_library() + rc := C._load_library() if rc != 0 { initError = fmt.Errorf("Error loading libraries: %v", rc) } diff --git a/sasl/sasl_sspi.h b/sasl/sasl_sspi.h index d33e7f814..4ef6dc20f 100644 --- a/sasl/sasl_sspi.h +++ b/sasl/sasl_sspi.h @@ -1,6 +1,9 @@ +#ifdef _WIN32 #include #include "kerberos_sspi.h" SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle* cred_handle, char* username, char* password, char* domain); int sspi_step(CredHandle* cred_handle, int has_context, CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* target); int sspi_send_client_authz_id(CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* user_plus_realm); + +#endif From 00004f72d5d800d7a1f49645019fec1383a7b34d Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 11 Sep 2014 02:06:54 -0300 Subject: [PATCH 040/305] Bypass decoding when unmarshalling bson.Raw. --- bson/bson.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/bson/bson.go b/bson/bson.go index 3ebfd8438..aad190749 100644 --- a/bson/bson.go +++ b/bson/bson.go @@ -117,7 +117,7 @@ type M map[string]interface{} // using a map is generally more comfortable. See bson.M and bson.RawD. type D []DocElem -// See the D type. +// DocElem is an element of the bson.D document representation. type DocElem struct { Name string Value interface{} @@ -484,7 +484,14 @@ func Unmarshal(in []byte, out interface{}) (err error) { defer handleErr(&err) v := reflect.ValueOf(out) switch v.Kind() { - case reflect.Map, reflect.Ptr: + case reflect.Ptr: + if raw, ok := out.(*Raw); ok { + raw.Kind = 3 + raw.Data = in + return nil + } + fallthrough + case reflect.Map: d := newDecoder(in) d.readDocTo(v) case reflect.Struct: From 0b099f71ab1fdd45671f2b125c360c6524bcbbac Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 11 Sep 2014 02:34:26 -0300 Subject: [PATCH 041/305] Do the bson.Raw decoding bypass before defer. This cuts down the timing further to 22.5ns/op, or 1/300th of the original. --- bson/bson.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/bson/bson.go b/bson/bson.go index aad190749..d1f673086 100644 --- a/bson/bson.go +++ b/bson/bson.go @@ -481,15 +481,15 @@ func Marshal(in interface{}) (out []byte, err error) { // // Pointer values are initialized when necessary. func Unmarshal(in []byte, out interface{}) (err error) { + if raw, ok := out.(*Raw); ok { + raw.Kind = 3 + raw.Data = in + return nil + } defer handleErr(&err) v := reflect.ValueOf(out) switch v.Kind() { case reflect.Ptr: - if raw, ok := out.(*Raw); ok { - raw.Kind = 3 - raw.Data = in - return nil - } fallthrough case reflect.Map: d := newDecoder(in) From 33cb11b3aaeab2b21a5ac72978f9a99e1b8ff005 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 11 Sep 2014 17:54:04 -0300 Subject: [PATCH 042/305] Change default GridFS chunk size to 255k. This prevents blocks of 512k from being allocated to hold the 256k data field plus metadata. Fixes MGO-28. --- gridfs.go | 2 +- gridfs_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/gridfs.go b/gridfs.go index 647305ad6..6eea5f8c5 100644 --- a/gridfs.go +++ b/gridfs.go @@ -154,7 +154,7 @@ func (gfs *GridFS) Create(name string) (file *GridFile, err error) { file = gfs.newFile() file.mode = gfsWriting file.wsum = md5.New() - file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 256 * 1024, Filename: name} + file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 255 * 1024, Filename: name} return } diff --git a/gridfs_test.go b/gridfs_test.go index 4cd5b7d02..7a9533449 100644 --- a/gridfs_test.go +++ b/gridfs_test.go @@ -76,7 +76,7 @@ func (s *S) TestGridFSCreate(c *C) { expected := M{ "_id": "", "length": 9, - "chunkSize": 262144, + "chunkSize": 255 * 1024, "uploadDate": "", "md5": "1e50210a0202497fb79bc38b6ade6c34", } @@ -173,7 +173,7 @@ func (s *S) TestGridFSFileDetails(c *C) { expected := M{ "_id": "myid", "length": 9, - "chunkSize": 262144, + "chunkSize": 255 * 1024, "uploadDate": "", "md5": "1e50210a0202497fb79bc38b6ade6c34", "filename": "myfile2.txt", From 74139fe9b326201a9a5bb795068ce06fc0caca66 Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Wed, 17 Sep 2014 16:13:38 -0400 Subject: [PATCH 043/305] Address Gustavo's comments re: windows syscalls --- suite_test.go | 4 ++-- syscall_test.go | 8 ++++---- syscall_win_test.go | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/suite_test.go b/suite_test.go index 94414ddbd..589ef0eec 100644 --- a/suite_test.go +++ b/suite_test.go @@ -164,7 +164,7 @@ func (s *S) pid(host string) int { } func (s *S) Freeze(host string) { - err := s.Stop(host) + err := stop(s.pid(host)) if err != nil { panic(err) } @@ -172,7 +172,7 @@ func (s *S) Freeze(host string) { } func (s *S) Thaw(host string) { - err := s.Continue(host) + err := cont(s.pid(host)) if err != nil { panic(err) } diff --git a/syscall_test.go b/syscall_test.go index 1899d6c1a..d6ebebf21 100644 --- a/syscall_test.go +++ b/syscall_test.go @@ -6,10 +6,10 @@ import ( "syscall" ) -func (s *S) Stop(host string) error { - return syscall.Kill(s.pid(host), syscall.SIGSTOP) +func stop(pid int) (err error) { + return syscall.Kill(pid, syscall.SIGSTOP) } -func (s *S) Continue(host string) error { - return syscall.Kill(s.pid(host), syscall.SIGCONT) +func cont(pid int) (err error) { + return syscall.Kill(pid, syscall.SIGCONT) } diff --git a/syscall_win_test.go b/syscall_win_test.go index 9b383d5f5..3c0678ff4 100644 --- a/syscall_win_test.go +++ b/syscall_win_test.go @@ -2,10 +2,10 @@ package mgo_test -func (s *S) Stop(host string) { - panic("Stop() not (currently) supported on Windows!") +func stop(pid int) (err error) { + panic("stop not currently implemented on windows!") } -func (s *S) Continue(host string) { - panic("Continue() not (currently) supported on Windows!") +func cont(pid int) (err error) { + panic("cont not currently implemented on windows!") } From 4dead2a72b94b2a61d68b516cb71ad5e0a38f44d Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Wed, 17 Sep 2014 16:49:07 -0400 Subject: [PATCH 044/305] Address more of Gustavo's comments --- suite_test.go | 19 +++++++++++-------- syscall_win_test.go | 11 ----------- syscall_windows_test.go | 13 +++++++++++++ 3 files changed, 24 insertions(+), 19 deletions(-) delete mode 100644 syscall_win_test.go create mode 100644 syscall_windows_test.go diff --git a/suite_test.go b/suite_test.go index 589ef0eec..f48456057 100644 --- a/suite_test.go +++ b/suite_test.go @@ -105,11 +105,10 @@ func (s *S) TearDownTest(c *C) { if s.stopped { s.StartAll() } - if runtime.GOOS != "windows" { - for _, host := range s.frozen { - if host != "" { - s.Thaw(host) - } + panicOnWindows() + for _, host := range s.frozen { + if host != "" { + s.Thaw(host) } } var stats mgo.Stats @@ -139,9 +138,7 @@ func (s *S) TearDownTest(c *C) { func (s *S) Stop(host string) { // Give a moment for slaves to sync and avoid getting rollback issues. - if runtime.GOOS == "windows" { - panic("Stop() currently unsupported on windows!") - } + panicOnWindows() time.Sleep(2 * time.Second) err := run("cd _testdb && supervisorctl stop " + supvName(host)) if err != nil { @@ -250,3 +247,9 @@ func hostPort(host string) string { } return port } + +func panicOnWindows() { + if runtime.GOOS == "windows" { + panic("the test suite is not yet fully supported on Windows") + } +} diff --git a/syscall_win_test.go b/syscall_win_test.go deleted file mode 100644 index 3c0678ff4..000000000 --- a/syscall_win_test.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build windows - -package mgo_test - -func stop(pid int) (err error) { - panic("stop not currently implemented on windows!") -} - -func cont(pid int) (err error) { - panic("cont not currently implemented on windows!") -} diff --git a/syscall_windows_test.go b/syscall_windows_test.go new file mode 100644 index 000000000..2d40d8151 --- /dev/null +++ b/syscall_windows_test.go @@ -0,0 +1,13 @@ +package mgo_test + +func stop(pid int) (err error) { + // Should always panic + panicOnWindows() + return nil +} + +func cont(pid int) (err error) { + // Should always panic + panicOnWindows() + return nil +} From 186e2f4a9334e28fe0470c80c2171099edaf0639 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 17 Sep 2014 23:34:03 -0300 Subject: [PATCH 045/305] Drop panic making the test suite not run on windows. --- suite_test.go | 1 - syscall_test.go | 2 +- syscall_windows_test.go | 6 ++---- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/suite_test.go b/suite_test.go index f48456057..334407e31 100644 --- a/suite_test.go +++ b/suite_test.go @@ -105,7 +105,6 @@ func (s *S) TearDownTest(c *C) { if s.stopped { s.StartAll() } - panicOnWindows() for _, host := range s.frozen { if host != "" { s.Thaw(host) diff --git a/syscall_test.go b/syscall_test.go index d6ebebf21..b8bbd7b34 100644 --- a/syscall_test.go +++ b/syscall_test.go @@ -1,4 +1,4 @@ -// +build linux darwin +// +build !windows package mgo_test diff --git a/syscall_windows_test.go b/syscall_windows_test.go index 2d40d8151..f2deaca86 100644 --- a/syscall_windows_test.go +++ b/syscall_windows_test.go @@ -1,13 +1,11 @@ package mgo_test func stop(pid int) (err error) { - // Should always panic - panicOnWindows() + panicOnWindows() // Always does. return nil } func cont(pid int) (err error) { - // Should always panic - panicOnWindows() + panicOnWindows() // Always does. return nil } From 5c137f56eb5545db0ffe2f2643e13bf1fa2c6ab8 Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Thu, 18 Sep 2014 10:46:41 -0400 Subject: [PATCH 046/305] Some better naming conventions --- sasl/kerberos_sspi.c | 15 ++++++++++----- sasl/kerberos_sspi.h | 15 ++++++++++----- sasl/{sasl_sspi.c => sasl_windows.c} | 10 +++++----- sasl/{sasl_sspi.go => sasl_windows.go} | 6 ++---- sasl/{sasl_sspi.h => sasl_windows.h} | 0 5 files changed, 27 insertions(+), 19 deletions(-) rename sasl/{sasl_sspi.c => sasl_windows.c} (91%) rename sasl/{sasl_sspi.go => sasl_windows.go} (98%) rename sasl/{sasl_sspi.h => sasl_windows.h} (100%) diff --git a/sasl/kerberos_sspi.c b/sasl/kerberos_sspi.c index c4c271958..a6bee0544 100644 --- a/sasl/kerberos_sspi.c +++ b/sasl/kerberos_sspi.c @@ -1,3 +1,8 @@ +/** This code is adapted from the NodeJS kerberos library: + * https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.c + * Under the terms of the Apache License, Version 2.0: + * http://www.apache.org/licenses/LICENSE-2.0 */ + #ifdef _WIN32 #include "kerberos_sspi.h" #include @@ -8,7 +13,7 @@ static HINSTANCE _sspi_secur32_dll = NULL; /** * Encrypt A Message */ -SECURITY_STATUS SEC_ENTRY _sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo) { +SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo) { // Create function pointer instance encryptMessage_fn pfn_encryptMessage = NULL; @@ -31,7 +36,7 @@ SECURITY_STATUS SEC_ENTRY _sspi_encrypt_message(PCtxtHandle phContext, unsigned /** * Acquire Credentials */ -SECURITY_STATUS SEC_ENTRY _sspi_acquire_credentials_handle(LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse, +SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle(LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse, void* pvLogonId, void* pAuthData, SEC_GET_KEY_FN pGetKeyFn, void* pvGetKeyArgument, PCredHandle phCredential, PTimeStamp ptsExpiry) { // Create function pointer instance @@ -70,7 +75,7 @@ SECURITY_STATUS SEC_ENTRY _sspi_acquire_credentials_handle(LPSTR pszPrincipal, L /** * Initialize Security Context */ -SECURITY_STATUS SEC_ENTRY _sspi_initialize_security_context(PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName, +SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context(PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName, unsigned long fContextReq, unsigned long Reserved1, unsigned long TargetDataRep, PSecBufferDesc pInput, unsigned long Reserved2, PCtxtHandle phNewContext, PSecBufferDesc pOutput, unsigned long * pfContextAttr, PTimeStamp ptsExpiry) { // Create function pointer instance @@ -112,7 +117,7 @@ SECURITY_STATUS SEC_ENTRY _sspi_initialize_security_context(PCredHandle phCreden /** * Query Context Attributes */ -SECURITY_STATUS SEC_ENTRY _sspi_query_context_attributes(PCtxtHandle phContext, unsigned long ulAttribute, void * pBuffer) { +SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes(PCtxtHandle phContext, unsigned long ulAttribute, void * pBuffer) { // Create function pointer instance queryContextAttributes_fn pfn_queryContextAttributes = NULL; @@ -142,7 +147,7 @@ SECURITY_STATUS SEC_ENTRY _sspi_query_context_attributes(PCtxtHandle phContext, /** * Load security.dll dynamically */ -int _load_library() { +int load_library() { DWORD err; // Load the library _sspi_security_dll = LoadLibrary("security.dll"); diff --git a/sasl/kerberos_sspi.h b/sasl/kerberos_sspi.h index d2a6a30a4..a1f7b04a2 100644 --- a/sasl/kerberos_sspi.h +++ b/sasl/kerberos_sspi.h @@ -1,3 +1,8 @@ +/** This code is adapted from the NodeJS kerberos library: + * https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.h + * Under the terms of the Apache License, Version 2.0: + * http://www.apache.org/licenses/LICENSE-2.0 */ + #ifdef _WIN32 #ifndef SSPI_C_H #define SSPI_C_H @@ -10,14 +15,14 @@ /** * Encrypt A Message */ -SECURITY_STATUS SEC_ENTRY _sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo); +SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo); typedef DWORD (WINAPI *encryptMessage_fn)(PCtxtHandle phContext, ULONG fQOP, PSecBufferDesc pMessage, ULONG MessageSeqNo); /** * Acquire Credentials */ -SECURITY_STATUS SEC_ENTRY _sspi_acquire_credentials_handle( +SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle( LPSTR pszPrincipal, // Name of principal LPSTR pszPackage, // Name of package unsigned long fCredentialUse, // Flags indicating use @@ -38,7 +43,7 @@ typedef DWORD (WINAPI *acquireCredentialsHandle_fn)( /** * Initialize Security Context */ -SECURITY_STATUS SEC_ENTRY _sspi_initialize_security_context( +SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context( PCredHandle phCredential, // Cred to base context PCtxtHandle phContext, // Existing context (OPT) LPSTR pszTargetName, // Name of target @@ -61,7 +66,7 @@ typedef DWORD (WINAPI *initializeSecurityContext_fn)( /** * Query Context Attributes */ -SECURITY_STATUS SEC_ENTRY _sspi_query_context_attributes( +SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes( PCtxtHandle phContext, // Context to query unsigned long ulAttribute, // Attribute to query void * pBuffer // Buffer for attributes @@ -73,7 +78,7 @@ typedef DWORD (WINAPI *queryContextAttributes_fn)( /** * Load security.dll dynamically */ -int _load_library(); +int load_library(); #endif #endif diff --git a/sasl/sasl_sspi.c b/sasl/sasl_windows.c similarity index 91% rename from sasl/sasl_sspi.c rename to sasl/sasl_windows.c index bb4239b6c..70c287b97 100644 --- a/sasl/sasl_sspi.c +++ b/sasl/sasl_windows.c @@ -1,6 +1,6 @@ #ifdef _WIN32 -#include "sasl_sspi.h" +#include "sasl_windows.h" static const LPSTR SSPI_PACKAGE_NAME = "kerberos"; @@ -15,7 +15,7 @@ SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle* cred_handl auth_identity.PasswordLength = strlen(password); auth_identity.Domain = (LPSTR) domain; auth_identity.DomainLength = strlen(domain); - return _sspi_acquire_credentials_handle( + return call_sspi_acquire_credentials_handle( NULL, SSPI_PACKAGE_NAME, SECPKG_CRED_OUTBOUND, @@ -53,7 +53,7 @@ int sspi_step(CredHandle* cred_handle, int has_context, CtxtHandle* context, PVO ULONG context_attr = 0; - int ret = _sspi_initialize_security_context( + int ret = call_sspi_initialize_security_context( cred_handle, has_context > 0 ? context : NULL, (LPSTR) target, @@ -76,7 +76,7 @@ int sspi_step(CredHandle* cred_handle, int has_context, CtxtHandle* context, PVO int sspi_send_client_authz_id(CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* user_plus_realm) { SecPkgContext_Sizes sizes; - SECURITY_STATUS status = _sspi_query_context_attributes(context, SECPKG_ATTR_SIZES, &sizes); + SECURITY_STATUS status = call_sspi_query_context_attributes(context, SECPKG_ATTR_SIZES, &sizes); if (status != SEC_E_OK) { return status; @@ -108,7 +108,7 @@ int sspi_send_client_authz_id(CtxtHandle* context, PVOID* buffer, ULONG* buffer_ wrapBufs[2].BufferType = SECBUFFER_PADDING; wrapBufs[2].pvBuffer = msg + sizes.cbSecurityTrailer + msgSize; - status = _sspi_encrypt_message(context, SECQOP_WRAP_NO_ENCRYPT, &wrapBufDesc, 0); + status = call_sspi_encrypt_message(context, SECQOP_WRAP_NO_ENCRYPT, &wrapBufDesc, 0); if (status != SEC_E_OK) { return status; } diff --git a/sasl/sasl_sspi.go b/sasl/sasl_windows.go similarity index 98% rename from sasl/sasl_sspi.go rename to sasl/sasl_windows.go index 30d478eb9..f45398306 100644 --- a/sasl/sasl_sspi.go +++ b/sasl/sasl_windows.go @@ -1,9 +1,7 @@ -// +build windows - package sasl // -// #include "sasl_sspi.h" +// #include "sasl_windows.h" // import "C" @@ -45,7 +43,7 @@ var initError error var initOnce sync.Once func initSSPI() { - rc := C._load_library() + rc := C.load_library() if rc != 0 { initError = fmt.Errorf("Error loading libraries: %v", rc) } diff --git a/sasl/sasl_sspi.h b/sasl/sasl_windows.h similarity index 100% rename from sasl/sasl_sspi.h rename to sasl/sasl_windows.h From 39705fb87f09f53d2195e88b7786a9eb5a92b2a1 Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Thu, 18 Sep 2014 11:49:14 -0400 Subject: [PATCH 047/305] Address Gustavo's latest round of comments --- sasl/sasl_windows.c | 4 ---- sasl/sasl_windows.go | 7 ++----- sasl/sasl_windows.h | 3 --- 3 files changed, 2 insertions(+), 12 deletions(-) diff --git a/sasl/sasl_windows.c b/sasl/sasl_windows.c index 70c287b97..181f493ae 100644 --- a/sasl/sasl_windows.c +++ b/sasl/sasl_windows.c @@ -1,5 +1,3 @@ -#ifdef _WIN32 - #include "sasl_windows.h" static const LPSTR SSPI_PACKAGE_NAME = "kerberos"; @@ -122,5 +120,3 @@ int sspi_send_client_authz_id(CtxtHandle* context, PVOID* buffer, ULONG* buffer_ return SEC_E_OK; } - -#endif diff --git a/sasl/sasl_windows.go b/sasl/sasl_windows.go index f45398306..6adba4044 100644 --- a/sasl/sasl_windows.go +++ b/sasl/sasl_windows.go @@ -95,11 +95,8 @@ func (ss *saslSession) Close() { C.free(unsafe.Pointer(cstr)) } - // Make sure we've cleaned up all the buffers we malloced when we're sure we don't need em anymore - if ss.authComplete || ss.errored { - for _, cbuf := range ss.buffersToFree { - C.free(unsafe.Pointer(cbuf)) - } + for _, cbuf := range ss.buffersToFree { + C.free(unsafe.Pointer(cbuf)) } } diff --git a/sasl/sasl_windows.h b/sasl/sasl_windows.h index 4ef6dc20f..d33e7f814 100644 --- a/sasl/sasl_windows.h +++ b/sasl/sasl_windows.h @@ -1,9 +1,6 @@ -#ifdef _WIN32 #include #include "kerberos_sspi.h" SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle* cred_handle, char* username, char* password, char* domain); int sspi_step(CredHandle* cred_handle, int has_context, CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* target); int sspi_send_client_authz_id(CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* user_plus_realm); - -#endif From 13e715b61b25e7b30c6ed9d855464b673362c67c Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Thu, 18 Sep 2014 14:39:16 -0400 Subject: [PATCH 048/305] Change build tag in sasl.go --- sasl/sasl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sasl/sasl.go b/sasl/sasl.go index b606afbc5..8375dddf8 100644 --- a/sasl/sasl.go +++ b/sasl/sasl.go @@ -3,7 +3,7 @@ // This package is not meant to be used by itself. // -// +build linux darwin +// +build !windows package sasl From 0ccc1500385ea3750ca8f9fc94464abd5a7d7774 Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Thu, 18 Sep 2014 16:19:29 -0400 Subject: [PATCH 049/305] Test cases for SSPI --- auth_test.go | 70 +++++++++++++++++++++++++++++++------------- remote_test.go | 47 +++++++++++++++++++++++++++++ sasl/sasl_windows.go | 24 +++++++++++---- 3 files changed, 114 insertions(+), 27 deletions(-) create mode 100644 remote_test.go diff --git a/auth_test.go b/auth_test.go index 33ff4633a..9a985a26e 100644 --- a/auth_test.go +++ b/auth_test.go @@ -30,6 +30,8 @@ import ( "flag" "fmt" "net/url" + "os" + "runtime" "sync" "time" @@ -893,11 +895,13 @@ func (s *S) TestAuthPlainURL(c *C) { var ( kerberosFlag = flag.Bool("kerberos", false, "Test Kerberos authentication (depends on custom environment)") - kerberosHost = "mmscustmongo.10gen.me" - kerberosUser = "mmsagent/mmscustagent.10gen.me@10GEN.ME" + kerberosHost = "ldaptest.10gen.cc" + kerberosUser = "drivers@LDAPTEST.10GEN.CC" + + winKerberosPasswordEnv = "MGO_KERBEROS_PASSWORD" ) -func (s *S) TestAuthKerberosCred(c *C) { +func (remoteSuite *RemoteSuite) TestAuthKerberosCred(c *C) { if !*kerberosFlag { c.Skip("no -kerberos") } @@ -905,39 +909,44 @@ func (s *S) TestAuthKerberosCred(c *C) { Username: kerberosUser, Mechanism: "GSSAPI", } + windowsAppendPasswordToCredential(cred) c.Logf("Connecting to %s...", kerberosHost) session, err := mgo.Dial(kerberosHost) c.Assert(err, IsNil) defer session.Close() c.Logf("Connected! Testing the need for authentication...") - names, err := session.DatabaseNames() - c.Assert(err, ErrorMatches, "unauthorized") + n, err := session.DB("kerberos").C("test").Find(M{}).Count() + c.Assert(err, ErrorMatches, ".*authorized.*") c.Logf("Authenticating...") err = session.Login(cred) c.Assert(err, IsNil) c.Logf("Authenticated!") - names, err = session.DatabaseNames() + n, err = session.DB("kerberos").C("test").Find(M{}).Count() c.Assert(err, IsNil) - c.Assert(len(names) > 0, Equals, true) + c.Assert(n, Equals, 1) } -func (s *S) TestAuthKerberosURL(c *C) { +func (remoteSuite *RemoteSuite) TestAuthKerberosURL(c *C) { if !*kerberosFlag { c.Skip("no -kerberos") } c.Logf("Connecting to %s...", kerberosHost) - session, err := mgo.Dial(url.QueryEscape(kerberosUser) + "@" + kerberosHost + "?authMechanism=GSSAPI") + connectUri := url.QueryEscape(kerberosUser) + "@" + kerberosHost + "?authMechanism=GSSAPI" + if runtime.GOOS == "windows" { + connectUri = url.QueryEscape(kerberosUser) + ":" + url.QueryEscape(getWindowsKerberosPassword()) + "@" + kerberosHost + "?authMechanism=GSSAPI" + } + session, err := mgo.Dial(connectUri) c.Assert(err, IsNil) defer session.Close() - names, err := session.DatabaseNames() + n, err := session.DB("kerberos").C("test").Find(M{}).Count() c.Assert(err, IsNil) - c.Assert(len(names) > 0, Equals, true) + c.Assert(n, Equals, 1) } -func (s *S) TestAuthKerberosServiceName(c *C) { +func (remoteSuite *RemoteSuite) TestAuthKerberosServiceName(c *C) { if !*kerberosFlag { c.Skip("no -kerberos") } @@ -948,8 +957,9 @@ func (s *S) TestAuthKerberosServiceName(c *C) { cred := &mgo.Credential{ Username: kerberosUser, Mechanism: "GSSAPI", - Service: wrongServiceName, + Service: wrongServiceName, } + windowsAppendPasswordToCredential(cred) c.Logf("Connecting to %s...", kerberosHost) session, err := mgo.Dial(kerberosHost) @@ -958,7 +968,7 @@ func (s *S) TestAuthKerberosServiceName(c *C) { c.Logf("Authenticating with incorrect service name...") err = session.Login(cred) - c.Assert(err, ErrorMatches, ".*Server wrong/mmscustmongo.10gen.me@10GEN.ME not found.*") + c.Assert(err, ErrorMatches, ".*wrong/ldaptest.10gen.cc@LDAPTEST.10GEN.CC not found.*") cred.Service = rightServiceName c.Logf("Authenticating with correct service name...") @@ -966,24 +976,25 @@ func (s *S) TestAuthKerberosServiceName(c *C) { c.Assert(err, IsNil) c.Logf("Authenticated!") - names, err := session.DatabaseNames() + n, err := session.DB("kerberos").C("test").Find(M{}).Count() c.Assert(err, IsNil) - c.Assert(len(names) > 0, Equals, true) + c.Assert(n, Equals, 1) } -func (s *S) TestAuthKerberosServiceHost(c *C) { +func (remoteSuite *RemoteSuite) TestAuthKerberosServiceHost(c *C) { if !*kerberosFlag { c.Skip("no -kerberos") } wrongServiceHost := "eggs.bacon.tk" - rightServiceHost := "mmscustmongo.10gen.me" + rightServiceHost := kerberosHost cred := &mgo.Credential{ Username: kerberosUser, Mechanism: "GSSAPI", ServiceHost: wrongServiceHost, } + windowsAppendPasswordToCredential(cred) c.Logf("Connecting to %s...", kerberosHost) session, err := mgo.Dial(kerberosHost) @@ -992,7 +1003,7 @@ func (s *S) TestAuthKerberosServiceHost(c *C) { c.Logf("Authenticating with incorrect service host...") err = session.Login(cred) - c.Assert(err, ErrorMatches, ".*Server krbtgt/BACON.TK@10GEN.ME not found.*") + c.Assert(err, ErrorMatches, ".*@LDAPTEST.10GEN.CC not found.*") cred.ServiceHost = rightServiceHost c.Logf("Authenticating with correct service host...") @@ -1000,7 +1011,24 @@ func (s *S) TestAuthKerberosServiceHost(c *C) { c.Assert(err, IsNil) c.Logf("Authenticated!") - names, err := session.DatabaseNames() + n, err := session.DB("kerberos").C("test").Find(M{}).Count() c.Assert(err, IsNil) - c.Assert(len(names) > 0, Equals, true) + c.Assert(n, Equals, 1) +} + +// No kinit on SSPI-style Kerberos, so we need to provide a password. In order +// to avoid inlining password, require it to be set as an environment variable, +// for instance: `SET MGO_KERBEROS_PASSWORD=this_isnt_the_password` +func getWindowsKerberosPassword() string { + pw := os.Getenv(winKerberosPasswordEnv) + if pw == "" { + panic(fmt.Sprintf("Need to set %v environment variable to run Kerberos tests on Windows", winKerberosPasswordEnv)) + } + return pw +} + +func windowsAppendPasswordToCredential(cred *mgo.Credential) { + if runtime.GOOS == "windows" { + cred.Password = getWindowsKerberosPassword() + } } diff --git a/remote_test.go b/remote_test.go new file mode 100644 index 000000000..164adb752 --- /dev/null +++ b/remote_test.go @@ -0,0 +1,47 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo_test + +import ( + . "gopkg.in/check.v1" + "gopkg.in/mgo.v2" +) + +type RemoteSuite struct { +} + +var _ = Suite(&RemoteSuite{}) + +func (remoteSuite *RemoteSuite) SetUpSuite(c *C) { + mgo.SetDebug(true) + mgo.SetStats(true) +} + +func (remoteSuite *RemoteSuite) SetUpTest(c *C) { + mgo.SetLogger((*cLogger)(c)) + mgo.ResetStats() +} diff --git a/sasl/sasl_windows.go b/sasl/sasl_windows.go index 6adba4044..bd403f7a0 100644 --- a/sasl/sasl_windows.go +++ b/sasl/sasl_windows.go @@ -23,6 +23,8 @@ type saslSession struct { service string host string userPlusRealm string + target string + domain string // Internal state authComplete bool @@ -66,14 +68,16 @@ func New(username, password, mechanism, service, host string) (saslStepper, erro return nil, fmt.Errorf("Username '%v' doesn't contain a realm!", username) } user := usernameComponents[0] - domain := usernameComponents[1] + ss.domain = usernameComponents[1] + ss.target = fmt.Sprintf("%s/%s", ss.service, ss.host) var status C.SECURITY_STATUS // Step 0: call AcquireCredentialsHandle to get a nice SSPI CredHandle if len(password) > 0 { - status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), ss.cstr(password), ss.cstr(domain)) + status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), ss.cstr(password), ss.cstr(ss.domain)) } else { - status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), nil, ss.cstr(domain)) + panic("Need password!") + status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), nil, ss.cstr(ss.domain)) } if status != C.SEC_E_OK { @@ -118,14 +122,13 @@ func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, er ss.buffersToFree = append(ss.buffersToFree, buffer) } else { // Step 1 + Step 2: set up security context with the server and TGT - target := fmt.Sprintf("%s/%s", ss.service, ss.host) - status = C.sspi_step(&ss.credHandle, ss.hasContext, &ss.context, &buffer, &bufferLength, ss.cstr(target)) + status = C.sspi_step(&ss.credHandle, ss.hasContext, &ss.context, &buffer, &bufferLength, ss.cstr(ss.target)) ss.buffersToFree = append(ss.buffersToFree, buffer) } if status != C.SEC_E_OK && status != C.SEC_I_CONTINUE_NEEDED { ss.errored = true - return nil, false, fmt.Errorf("Error doing step %v, error code %v", ss.step, status) + return nil, false, ss.handleSSPIErrorCode(status) } clientData = C.GoBytes(unsafe.Pointer(buffer), C.int(bufferLength)) @@ -137,3 +140,12 @@ func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, er return clientData, false, nil } } + +func (ss *saslSession) handleSSPIErrorCode(code C.int) error { + switch { + case code == C.SEC_E_TARGET_UNKNOWN: + return fmt.Errorf("Target %v@%v not found", ss.target, ss.domain) + } + + return fmt.Errorf("Unknown error doing step %v, error code %v", ss.step, code) +} From 9f5f592a161729e328d5a4c36b74a345ff3b97dc Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Mon, 22 Sep 2014 15:21:54 -0400 Subject: [PATCH 050/305] Remove debug panic statement --- sasl/sasl_windows.go | 1 - 1 file changed, 1 deletion(-) diff --git a/sasl/sasl_windows.go b/sasl/sasl_windows.go index bd403f7a0..ccaf02d20 100644 --- a/sasl/sasl_windows.go +++ b/sasl/sasl_windows.go @@ -76,7 +76,6 @@ func New(username, password, mechanism, service, host string) (saslStepper, erro if len(password) > 0 { status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), ss.cstr(password), ss.cstr(ss.domain)) } else { - panic("Need password!") status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), nil, ss.cstr(ss.domain)) } From d37f05456dc22e083cfadafb5d9678a3636f808a Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Tue, 23 Sep 2014 10:58:50 -0400 Subject: [PATCH 051/305] Rename remoteSuite -> kerberosSuite --- auth_test.go | 25 +++++++++++++++++++++---- remote_test.go | 47 ----------------------------------------------- 2 files changed, 21 insertions(+), 51 deletions(-) delete mode 100644 remote_test.go diff --git a/auth_test.go b/auth_test.go index 9a985a26e..a632185d9 100644 --- a/auth_test.go +++ b/auth_test.go @@ -901,7 +901,24 @@ var ( winKerberosPasswordEnv = "MGO_KERBEROS_PASSWORD" ) -func (remoteSuite *RemoteSuite) TestAuthKerberosCred(c *C) { +// Kerberos has its own suite because it talks to a remote server and thus +// doesn't need the usual Setup() and Teardown() +type KerberosSuite struct { +} + +var _ = Suite(&KerberosSuite{}) + +func (kerberosSuite *KerberosSuite) SetUpSuite(c *C) { + mgo.SetDebug(true) + mgo.SetStats(true) +} + +func (kerberosSuite *KerberosSuite) SetUpTest(c *C) { + mgo.SetLogger((*cLogger)(c)) + mgo.ResetStats() +} + +func (kerberosSuite *KerberosSuite) TestAuthKerberosCred(c *C) { if !*kerberosFlag { c.Skip("no -kerberos") } @@ -929,7 +946,7 @@ func (remoteSuite *RemoteSuite) TestAuthKerberosCred(c *C) { c.Assert(n, Equals, 1) } -func (remoteSuite *RemoteSuite) TestAuthKerberosURL(c *C) { +func (kerberosSuite *KerberosSuite) TestAuthKerberosURL(c *C) { if !*kerberosFlag { c.Skip("no -kerberos") } @@ -946,7 +963,7 @@ func (remoteSuite *RemoteSuite) TestAuthKerberosURL(c *C) { c.Assert(n, Equals, 1) } -func (remoteSuite *RemoteSuite) TestAuthKerberosServiceName(c *C) { +func (kerberosSuite *KerberosSuite) TestAuthKerberosServiceName(c *C) { if !*kerberosFlag { c.Skip("no -kerberos") } @@ -981,7 +998,7 @@ func (remoteSuite *RemoteSuite) TestAuthKerberosServiceName(c *C) { c.Assert(n, Equals, 1) } -func (remoteSuite *RemoteSuite) TestAuthKerberosServiceHost(c *C) { +func (kerberosSuite *KerberosSuite) TestAuthKerberosServiceHost(c *C) { if !*kerberosFlag { c.Skip("no -kerberos") } diff --git a/remote_test.go b/remote_test.go deleted file mode 100644 index 164adb752..000000000 --- a/remote_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// mgo - MongoDB driver for Go -// -// Copyright (c) 2010-2012 - Gustavo Niemeyer -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package mgo_test - -import ( - . "gopkg.in/check.v1" - "gopkg.in/mgo.v2" -) - -type RemoteSuite struct { -} - -var _ = Suite(&RemoteSuite{}) - -func (remoteSuite *RemoteSuite) SetUpSuite(c *C) { - mgo.SetDebug(true) - mgo.SetStats(true) -} - -func (remoteSuite *RemoteSuite) SetUpTest(c *C) { - mgo.SetLogger((*cLogger)(c)) - mgo.ResetStats() -} From 10ffa59258a3c5e271ca694388f0bcc4a655a5c7 Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Tue, 23 Sep 2014 13:18:40 -0400 Subject: [PATCH 052/305] Make test pattern a bit more flexible so checks out on both windows and linux --- auth_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/auth_test.go b/auth_test.go index a632185d9..93d8aa5dd 100644 --- a/auth_test.go +++ b/auth_test.go @@ -985,7 +985,7 @@ func (kerberosSuite *KerberosSuite) TestAuthKerberosServiceName(c *C) { c.Logf("Authenticating with incorrect service name...") err = session.Login(cred) - c.Assert(err, ErrorMatches, ".*wrong/ldaptest.10gen.cc@LDAPTEST.10GEN.CC not found.*") + c.Assert(err, ErrorMatches, ".*@LDAPTEST.10GEN.CC not found.*") cred.Service = rightServiceName c.Logf("Authenticating with correct service name...") From 76bfcd064543767642fed89b22b95528b80b6bae Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sun, 28 Sep 2014 21:13:31 -0300 Subject: [PATCH 053/305] Blind Windows SASL tweaks (such a bad idea). --- sasl/kerberos_sspi.c | 173 ---------------------------------- sasl/kerberos_sspi.h | 84 ----------------- sasl/sasl.c | 4 +- sasl/sasl_windows.c | 219 +++++++++++++++++++++---------------------- sasl/sasl_windows.go | 9 +- sasl/sasl_windows.h | 1 + sasl/sspi_windows.c | 96 +++++++++++++++++++ sasl/sspi_windows.h | 70 ++++++++++++++ 8 files changed, 276 insertions(+), 380 deletions(-) delete mode 100644 sasl/kerberos_sspi.c delete mode 100644 sasl/kerberos_sspi.h create mode 100644 sasl/sspi_windows.c create mode 100644 sasl/sspi_windows.h diff --git a/sasl/kerberos_sspi.c b/sasl/kerberos_sspi.c deleted file mode 100644 index a6bee0544..000000000 --- a/sasl/kerberos_sspi.c +++ /dev/null @@ -1,173 +0,0 @@ -/** This code is adapted from the NodeJS kerberos library: - * https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.c - * Under the terms of the Apache License, Version 2.0: - * http://www.apache.org/licenses/LICENSE-2.0 */ - -#ifdef _WIN32 -#include "kerberos_sspi.h" -#include - -static HINSTANCE _sspi_security_dll = NULL; -static HINSTANCE _sspi_secur32_dll = NULL; - -/** - * Encrypt A Message - */ -SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo) { - // Create function pointer instance - encryptMessage_fn pfn_encryptMessage = NULL; - - // Return error if library not loaded - if (_sspi_security_dll == NULL) { - return -1; - } - - // Map function to library method - pfn_encryptMessage = (encryptMessage_fn) GetProcAddress(_sspi_security_dll, "EncryptMessage"); - // Check if the we managed to map function pointer - if (!pfn_encryptMessage) { - return -2; - } - - // Call the function - return (*pfn_encryptMessage)(phContext, fQOP, pMessage, MessageSeqNo); -} - -/** - * Acquire Credentials - */ -SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle(LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse, - void* pvLogonId, void* pAuthData, SEC_GET_KEY_FN pGetKeyFn, void* pvGetKeyArgument, PCredHandle phCredential, - PTimeStamp ptsExpiry) { - // Create function pointer instance - acquireCredentialsHandle_fn pfn_acquireCredentialsHandle = NULL; - - // Return error if library not loaded - if (_sspi_security_dll == NULL) { - return -1; - } - - // Map function - #ifdef _UNICODE - pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(_sspi_security_dll, "AcquireCredentialsHandleW"); - #else - pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(_sspi_security_dll, "AcquireCredentialsHandleA"); - #endif - - // Check if the we managed to map function pointer - if (!pfn_acquireCredentialsHandle) { - return -2; - } - - // Status - return (*pfn_acquireCredentialsHandle)( - pszPrincipal, - pszPackage, - fCredentialUse, - pvLogonId, - pAuthData, - pGetKeyFn, - pvGetKeyArgument, - phCredential, - ptsExpiry); -} - -/** - * Initialize Security Context - */ -SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context(PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName, - unsigned long fContextReq, unsigned long Reserved1, unsigned long TargetDataRep, PSecBufferDesc pInput, unsigned long Reserved2, - PCtxtHandle phNewContext, PSecBufferDesc pOutput, unsigned long * pfContextAttr, PTimeStamp ptsExpiry) { - // Create function pointer instance - initializeSecurityContext_fn pfn_initializeSecurityContext = NULL; - - // Return error if library not loaded - if (_sspi_security_dll == NULL) { - return -1; - } - - // Map function - #ifdef _UNICODE - pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(_sspi_security_dll, "InitializeSecurityContextW"); - #else - pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(_sspi_security_dll, "InitializeSecurityContextA"); - #endif - - // Check if the we managed to map function pointer - if (!pfn_initializeSecurityContext) { - return -2; - } - - // Execute intialize context - return (*pfn_initializeSecurityContext)( - phCredential, - phContext, - pszTargetName, - fContextReq, - Reserved1, - TargetDataRep, - pInput, - Reserved2, - phNewContext, - pOutput, - pfContextAttr, - ptsExpiry); -} - -/** - * Query Context Attributes - */ -SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes(PCtxtHandle phContext, unsigned long ulAttribute, void * pBuffer) { - // Create function pointer instance - queryContextAttributes_fn pfn_queryContextAttributes = NULL; - - // Return error if library not loaded - if (_sspi_security_dll == NULL) { - return -1; - } - - #ifdef _UNICODE - pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(_sspi_security_dll, "QueryContextAttributesW"); - #else - pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(_sspi_security_dll, "QueryContextAttributesA"); - #endif - - // Check if the we managed to map function pointer - if (!pfn_queryContextAttributes) { - return -2; - } - - // Call the function - return (*pfn_queryContextAttributes)( - phContext, - ulAttribute, - pBuffer); -} - -/** - * Load security.dll dynamically - */ -int load_library() { - DWORD err; - // Load the library - _sspi_security_dll = LoadLibrary("security.dll"); - - // Check if the library loaded - if (_sspi_security_dll == NULL) { - err = GetLastError(); - return err; - } - - // Load the library - _sspi_secur32_dll = LoadLibrary("secur32.dll"); - - // Check if the library loaded - if (_sspi_secur32_dll == NULL) { - err = GetLastError(); - return err; - } - - return 0; -} - -#endif diff --git a/sasl/kerberos_sspi.h b/sasl/kerberos_sspi.h deleted file mode 100644 index a1f7b04a2..000000000 --- a/sasl/kerberos_sspi.h +++ /dev/null @@ -1,84 +0,0 @@ -/** This code is adapted from the NodeJS kerberos library: - * https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.h - * Under the terms of the Apache License, Version 2.0: - * http://www.apache.org/licenses/LICENSE-2.0 */ - -#ifdef _WIN32 -#ifndef SSPI_C_H -#define SSPI_C_H - -#define SECURITY_WIN32 1 - -#include -#include - -/** - * Encrypt A Message - */ -SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo); - -typedef DWORD (WINAPI *encryptMessage_fn)(PCtxtHandle phContext, ULONG fQOP, PSecBufferDesc pMessage, ULONG MessageSeqNo); - -/** - * Acquire Credentials - */ -SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle( - LPSTR pszPrincipal, // Name of principal - LPSTR pszPackage, // Name of package - unsigned long fCredentialUse, // Flags indicating use - void * pvLogonId, // Pointer to logon ID - void * pAuthData, // Package specific data - SEC_GET_KEY_FN pGetKeyFn, // Pointer to GetKey() func - void * pvGetKeyArgument, // Value to pass to GetKey() - PCredHandle phCredential, // (out) Cred Handle - PTimeStamp ptsExpiry // (out) Lifetime (optional) -); - -typedef DWORD (WINAPI *acquireCredentialsHandle_fn)( - LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse, - void * pvLogonId, void * pAuthData, SEC_GET_KEY_FN pGetKeyFn, void * pvGetKeyArgument, - PCredHandle phCredential, PTimeStamp ptsExpiry - ); - -/** - * Initialize Security Context - */ -SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context( - PCredHandle phCredential, // Cred to base context - PCtxtHandle phContext, // Existing context (OPT) - LPSTR pszTargetName, // Name of target - unsigned long fContextReq, // Context Requirements - unsigned long Reserved1, // Reserved, MBZ - unsigned long TargetDataRep, // Data rep of target - PSecBufferDesc pInput, // Input Buffers - unsigned long Reserved2, // Reserved, MBZ - PCtxtHandle phNewContext, // (out) New Context handle - PSecBufferDesc pOutput, // (inout) Output Buffers - unsigned long * pfContextAttr, // (out) Context attrs - PTimeStamp ptsExpiry // (out) Life span (OPT) -); - -typedef DWORD (WINAPI *initializeSecurityContext_fn)( - PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName, unsigned long fContextReq, - unsigned long Reserved1, unsigned long TargetDataRep, PSecBufferDesc pInput, unsigned long Reserved2, - PCtxtHandle phNewContext, PSecBufferDesc pOutput, unsigned long * pfContextAttr, PTimeStamp ptsExpiry); - -/** - * Query Context Attributes - */ -SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes( - PCtxtHandle phContext, // Context to query - unsigned long ulAttribute, // Attribute to query - void * pBuffer // Buffer for attributes -); - -typedef DWORD (WINAPI *queryContextAttributes_fn)( - PCtxtHandle phContext, unsigned long ulAttribute, void * pBuffer); - -/** - * Load security.dll dynamically - */ -int load_library(); - -#endif -#endif diff --git a/sasl/sasl.c b/sasl/sasl.c index cd8622260..f518abd88 100644 --- a/sasl/sasl.c +++ b/sasl/sasl.c @@ -10,10 +10,10 @@ static int mgo_sasl_simple(void *context, int id, const char **result, unsigned } switch (id) { case SASL_CB_USER: - *result = (char *) context; + *result = (char *)context; break; case SASL_CB_AUTHNAME: - *result = (char *) context; + *result = (char *)context; break; case SASL_CB_LANGUAGE: *result = NULL; diff --git a/sasl/sasl_windows.c b/sasl/sasl_windows.c index 181f493ae..4fa175c14 100644 --- a/sasl/sasl_windows.c +++ b/sasl/sasl_windows.c @@ -2,121 +2,114 @@ static const LPSTR SSPI_PACKAGE_NAME = "kerberos"; -SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle* cred_handle, char* username, char* password, char* domain) { - SEC_WINNT_AUTH_IDENTITY auth_identity; - SECURITY_INTEGER ignored; - - auth_identity.Flags = SEC_WINNT_AUTH_IDENTITY_ANSI; - auth_identity.User = (LPSTR) username; - auth_identity.UserLength = strlen(username); - auth_identity.Password = (LPSTR) password; - auth_identity.PasswordLength = strlen(password); - auth_identity.Domain = (LPSTR) domain; - auth_identity.DomainLength = strlen(domain); - return call_sspi_acquire_credentials_handle( - NULL, - SSPI_PACKAGE_NAME, - SECPKG_CRED_OUTBOUND, - NULL, - &auth_identity, - NULL, - NULL, - cred_handle, - &ignored); +SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle *cred_handle, char *username, char *password, char *domain) +{ + SEC_WINNT_AUTH_IDENTITY auth_identity; + SECURITY_INTEGER ignored; + + auth_identity.Flags = SEC_WINNT_AUTH_IDENTITY_ANSI; + auth_identity.User = (LPSTR) username; + auth_identity.UserLength = strlen(username); + auth_identity.Password = (LPSTR) password; + auth_identity.PasswordLength = strlen(password); + auth_identity.Domain = (LPSTR) domain; + auth_identity.DomainLength = strlen(domain); + return call_sspi_acquire_credentials_handle(NULL, SSPI_PACKAGE_NAME, SECPKG_CRED_OUTBOUND, NULL, &auth_identity, NULL, NULL, cred_handle, &ignored); } -int sspi_step(CredHandle* cred_handle, int has_context, CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* target) { - SecBufferDesc inbuf; - SecBuffer in_bufs[1]; - SecBufferDesc outbuf; - SecBuffer out_bufs[1]; - - if (has_context > 0) { - // If we already have a context, we now have data to send. - // Put this data in an inbuf. - inbuf.ulVersion = SECBUFFER_VERSION; - inbuf.cBuffers = 1; - inbuf.pBuffers = in_bufs; - in_bufs[0].pvBuffer = *buffer; - in_bufs[0].cbBuffer = *buffer_length; - in_bufs[0].BufferType = SECBUFFER_TOKEN; - } - - outbuf.ulVersion = SECBUFFER_VERSION; - outbuf.cBuffers = 1; - outbuf.pBuffers = out_bufs; - out_bufs[0].pvBuffer = NULL; - out_bufs[0].cbBuffer = 0; - out_bufs[0].BufferType = SECBUFFER_TOKEN; - - ULONG context_attr = 0; - - int ret = call_sspi_initialize_security_context( - cred_handle, - has_context > 0 ? context : NULL, - (LPSTR) target, - ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_MUTUAL_AUTH, - 0, - SECURITY_NETWORK_DREP, - has_context > 0 ? &inbuf : NULL, - 0, - context, - &outbuf, - &context_attr, - NULL); - - *buffer = malloc(out_bufs[0].cbBuffer); - *buffer_length = out_bufs[0].cbBuffer; - memcpy(*buffer, out_bufs[0].pvBuffer, *buffer_length); - - return ret; +int sspi_step(CredHandle *cred_handle, int has_context, CtxtHandle *context, PVOID *buffer, ULONG *buffer_length, char *target) +{ + SecBufferDesc inbuf; + SecBuffer in_bufs[1]; + SecBufferDesc outbuf; + SecBuffer out_bufs[1]; + + if (has_context > 0) { + // If we already have a context, we now have data to send. + // Put this data in an inbuf. + inbuf.ulVersion = SECBUFFER_VERSION; + inbuf.cBuffers = 1; + inbuf.pBuffers = in_bufs; + in_bufs[0].pvBuffer = *buffer; + in_bufs[0].cbBuffer = *buffer_length; + in_bufs[0].BufferType = SECBUFFER_TOKEN; + } + + outbuf.ulVersion = SECBUFFER_VERSION; + outbuf.cBuffers = 1; + outbuf.pBuffers = out_bufs; + out_bufs[0].pvBuffer = NULL; + out_bufs[0].cbBuffer = 0; + out_bufs[0].BufferType = SECBUFFER_TOKEN; + + ULONG context_attr = 0; + + int ret = call_sspi_initialize_security_context(cred_handle, + has_context > 0 ? context : NULL, + (LPSTR) target, + ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_MUTUAL_AUTH, + 0, + SECURITY_NETWORK_DREP, + has_context > 0 ? &inbuf : NULL, + 0, + context, + &outbuf, + &context_attr, + NULL); + + *buffer = malloc(out_bufs[0].cbBuffer); + *buffer_length = out_bufs[0].cbBuffer; + memcpy(*buffer, out_bufs[0].pvBuffer, *buffer_length); + + return ret; } -int sspi_send_client_authz_id(CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* user_plus_realm) { - SecPkgContext_Sizes sizes; - SECURITY_STATUS status = call_sspi_query_context_attributes(context, SECPKG_ATTR_SIZES, &sizes); - - if (status != SEC_E_OK) { - return status; - } - - int msgSize = 4 + 25; - char* msg = malloc((sizes.cbSecurityTrailer + msgSize + sizes.cbBlockSize) * sizeof(char)); - msg[sizes.cbSecurityTrailer + 0] = 1; - msg[sizes.cbSecurityTrailer + 1] = 0; - msg[sizes.cbSecurityTrailer + 2] = 0; - msg[sizes.cbSecurityTrailer + 3] = 0; - memcpy(&msg[sizes.cbSecurityTrailer + 4], user_plus_realm, 25); - - SecBuffer wrapBufs[3]; - SecBufferDesc wrapBufDesc; - wrapBufDesc.cBuffers = 3; - wrapBufDesc.pBuffers = wrapBufs; - wrapBufDesc.ulVersion = SECBUFFER_VERSION; - - wrapBufs[0].cbBuffer = sizes.cbSecurityTrailer; - wrapBufs[0].BufferType = SECBUFFER_TOKEN; - wrapBufs[0].pvBuffer = msg; - - wrapBufs[1].cbBuffer = msgSize; - wrapBufs[1].BufferType = SECBUFFER_DATA; - wrapBufs[1].pvBuffer = msg + sizes.cbSecurityTrailer; - - wrapBufs[2].cbBuffer = sizes.cbBlockSize; - wrapBufs[2].BufferType = SECBUFFER_PADDING; - wrapBufs[2].pvBuffer = msg + sizes.cbSecurityTrailer + msgSize; - - status = call_sspi_encrypt_message(context, SECQOP_WRAP_NO_ENCRYPT, &wrapBufDesc, 0); - if (status != SEC_E_OK) { - return status; - } - - *buffer_length = wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer + wrapBufs[2].cbBuffer; - *buffer = malloc(*buffer_length); - - memcpy(*buffer, wrapBufs[0].pvBuffer, wrapBufs[0].cbBuffer); - memcpy(*buffer + wrapBufs[0].cbBuffer, wrapBufs[1].pvBuffer, wrapBufs[1].cbBuffer); - memcpy(*buffer + wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer, wrapBufs[2].pvBuffer, wrapBufs[2].cbBuffer); - - return SEC_E_OK; +int sspi_send_client_authz_id(CtxtHandle *context, PVOID *buffer, ULONG *buffer_length, char *user_plus_realm) +{ + SecPkgContext_Sizes sizes; + SECURITY_STATUS status = call_sspi_query_context_attributes(context, SECPKG_ATTR_SIZES, &sizes); + + if (status != SEC_E_OK) { + return status; + } + + int msgSize = 4 + 25; + char *msg = malloc((sizes.cbSecurityTrailer + msgSize + sizes.cbBlockSize) * sizeof(char)); + msg[sizes.cbSecurityTrailer + 0] = 1; + msg[sizes.cbSecurityTrailer + 1] = 0; + msg[sizes.cbSecurityTrailer + 2] = 0; + msg[sizes.cbSecurityTrailer + 3] = 0; + memcpy(&msg[sizes.cbSecurityTrailer + 4], user_plus_realm, 25); + + SecBuffer wrapBufs[3]; + SecBufferDesc wrapBufDesc; + wrapBufDesc.cBuffers = 3; + wrapBufDesc.pBuffers = wrapBufs; + wrapBufDesc.ulVersion = SECBUFFER_VERSION; + + wrapBufs[0].cbBuffer = sizes.cbSecurityTrailer; + wrapBufs[0].BufferType = SECBUFFER_TOKEN; + wrapBufs[0].pvBuffer = msg; + + wrapBufs[1].cbBuffer = msgSize; + wrapBufs[1].BufferType = SECBUFFER_DATA; + wrapBufs[1].pvBuffer = msg + sizes.cbSecurityTrailer; + + wrapBufs[2].cbBuffer = sizes.cbBlockSize; + wrapBufs[2].BufferType = SECBUFFER_PADDING; + wrapBufs[2].pvBuffer = msg + sizes.cbSecurityTrailer + msgSize; + + status = call_sspi_encrypt_message(context, SECQOP_WRAP_NO_ENCRYPT, &wrapBufDesc, 0); + if (status != SEC_E_OK) { + return status; + } + + *buffer_length = wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer + wrapBufs[2].cbBuffer; + *buffer = malloc(*buffer_length); + + memcpy(*buffer, wrapBufs[0].pvBuffer, wrapBufs[0].cbBuffer); + memcpy(*buffer + wrapBufs[0].cbBuffer, wrapBufs[1].pvBuffer, wrapBufs[1].cbBuffer); + memcpy(*buffer + wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer, wrapBufs[2].pvBuffer, wrapBufs[2].cbBuffer); + + return SEC_E_OK; } diff --git a/sasl/sasl_windows.go b/sasl/sasl_windows.go index ccaf02d20..82e904cd9 100644 --- a/sasl/sasl_windows.go +++ b/sasl/sasl_windows.go @@ -1,8 +1,6 @@ package sasl -// // #include "sasl_windows.h" -// import "C" import ( @@ -45,7 +43,7 @@ var initError error var initOnce sync.Once func initSSPI() { - rc := C.load_library() + rc := C.load_secur32_dll() if rc != 0 { initError = fmt.Errorf("Error loading libraries: %v", rc) } @@ -78,12 +76,10 @@ func New(username, password, mechanism, service, host string) (saslStepper, erro } else { status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), nil, ss.cstr(ss.domain)) } - if status != C.SEC_E_OK { ss.errored = true return nil, fmt.Errorf("Couldn't create new SSPI client, error code %v", status) } - return ss, nil } @@ -97,7 +93,6 @@ func (ss *saslSession) Close() { for _, cstr := range ss.stringsToFree { C.free(unsafe.Pointer(cstr)) } - for _, cbuf := range ss.buffersToFree { C.free(unsafe.Pointer(cbuf)) } @@ -124,7 +119,6 @@ func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, er status = C.sspi_step(&ss.credHandle, ss.hasContext, &ss.context, &buffer, &bufferLength, ss.cstr(ss.target)) ss.buffersToFree = append(ss.buffersToFree, buffer) } - if status != C.SEC_E_OK && status != C.SEC_I_CONTINUE_NEEDED { ss.errored = true return nil, false, ss.handleSSPIErrorCode(status) @@ -145,6 +139,5 @@ func (ss *saslSession) handleSSPIErrorCode(code C.int) error { case code == C.SEC_E_TARGET_UNKNOWN: return fmt.Errorf("Target %v@%v not found", ss.target, ss.domain) } - return fmt.Errorf("Unknown error doing step %v, error code %v", ss.step, code) } diff --git a/sasl/sasl_windows.h b/sasl/sasl_windows.h index d33e7f814..a5b9c9f46 100644 --- a/sasl/sasl_windows.h +++ b/sasl/sasl_windows.h @@ -1,4 +1,5 @@ #include + #include "kerberos_sspi.h" SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle* cred_handle, char* username, char* password, char* domain); diff --git a/sasl/sspi_windows.c b/sasl/sspi_windows.c new file mode 100644 index 000000000..63f9a6f86 --- /dev/null +++ b/sasl/sspi_windows.c @@ -0,0 +1,96 @@ +// Code adapted from the NodeJS kerberos library: +// +// https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.c +// +// Under the terms of the Apache License, Version 2.0: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +#include + +#include "sspi_windows.h" + +static HINSTANCE sspi_secur32_dll = NULL; + +int load_secur32_dll() +{ + sspi_secur32_dll = LoadLibrary("secur32.dll"); + if (sspi_secur32_dll == NULL) { + return GetLastError(); + } + return 0; +} + +SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo) +{ + if (sspi_secur32_dll == NULL) { + return -1; + } + encryptMessage_fn pfn_encryptMessage = (encryptMessage_fn) GetProcAddress(sspi_secur32_dll, "EncryptMessage"); + if (!pfn_encryptMessage) { + return -2; + } + return (*pfn_encryptMessage)(phContext, fQOP, pMessage, MessageSeqNo); +} + +SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle( + LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse, + void *pvLogonId, void *pAuthData, SEC_GET_KEY_FN pGetKeyFn, void *pvGetKeyArgument, + PCredHandle phCredential, PTimeStamp ptsExpiry) +{ + if (sspi_secur32_dll == NULL) { + return -1; + } + acquireCredentialsHandle_fn pfn_acquireCredentialsHandle; +#ifdef _UNICODE + pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(sspi_secur32_dll, "AcquireCredentialsHandleW"); +#else + pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(sspi_secur32_dll, "AcquireCredentialsHandleA"); +#endif + if (!pfn_acquireCredentialsHandle) { + return -2; + } + return (*pfn_acquireCredentialsHandle)( + pszPrincipal, pszPackage, fCredentialUse, pvLogonId, pAuthData, + pGetKeyFn, pvGetKeyArgument, phCredential, ptsExpiry); +} + +SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context( + PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName, + unsigned long fContextReq, unsigned long Reserved1, unsigned long TargetDataRep, + PSecBufferDesc pInput, unsigned long Reserved2, PCtxtHandle phNewContext, + PSecBufferDesc pOutput, unsigned long *pfContextAttr, PTimeStamp ptsExpiry) +{ + if (sspi_secur32_dll == NULL) { + return -1; + } + initializeSecurityContext_fn pfn_initializeSecurityContext; +#ifdef _UNICODE + pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(sspi_secur32_dll, "InitializeSecurityContextW"); +#else + pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(sspi_secur32_dll, "InitializeSecurityContextA"); +#endif + if (!pfn_initializeSecurityContext) { + return -2; + } + return (*pfn_initializeSecurityContext)( + phCredential, phContext, pszTargetName, fContextReq, Reserved1, TargetDataRep, + pInput, Reserved2, phNewContext, pOutput, pfContextAttr, ptsExpiry); +} + +SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes(PCtxtHandle phContext, unsigned long ulAttribute, void *pBuffer) +{ + if (sspi_secur32_dll == NULL) { + return -1; + } + queryContextAttributes_fn pfn_queryContextAttributes; +#ifdef _UNICODE + pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(sspi_secur32_dll, "QueryContextAttributesW"); +#else + pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(sspi_secur32_dll, "QueryContextAttributesA"); +#endif + if (!pfn_queryContextAttributes) { + return -2; + } + return (*pfn_queryContextAttributes)(phContext, ulAttribute, pBuffer); +} diff --git a/sasl/sspi_windows.h b/sasl/sspi_windows.h new file mode 100644 index 000000000..d28327031 --- /dev/null +++ b/sasl/sspi_windows.h @@ -0,0 +1,70 @@ +// Code adapted from the NodeJS kerberos library: +// +// https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.h +// +// Under the terms of the Apache License, Version 2.0: +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +#ifndef SSPI_WINDOWS_H +#define SSPI_WINDOWS_H + +#define SECURITY_WIN32 1 + +#include +#include + +int load_secur32_dll(); + +SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo); + +typedef DWORD (WINAPI *encryptMessage_fn)(PCtxtHandle phContext, ULONG fQOP, PSecBufferDesc pMessage, ULONG MessageSeqNo); + +SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle( + LPSTR pszPrincipal, // Name of principal + LPSTR pszPackage, // Name of package + unsigned long fCredentialUse, // Flags indicating use + void *pvLogonId, // Pointer to logon ID + void *pAuthData, // Package specific data + SEC_GET_KEY_FN pGetKeyFn, // Pointer to GetKey() func + void *pvGetKeyArgument, // Value to pass to GetKey() + PCredHandle phCredential, // (out) Cred Handle + PTimeStamp ptsExpiry // (out) Lifetime (optional) +); + +typedef DWORD (WINAPI *acquireCredentialsHandle_fn)( + LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse, + void *pvLogonId, void *pAuthData, SEC_GET_KEY_FN pGetKeyFn, void *pvGetKeyArgument, + PCredHandle phCredential, PTimeStamp ptsExpiry +); + +SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context( + PCredHandle phCredential, // Cred to base context + PCtxtHandle phContext, // Existing context (OPT) + LPSTR pszTargetName, // Name of target + unsigned long fContextReq, // Context Requirements + unsigned long Reserved1, // Reserved, MBZ + unsigned long TargetDataRep, // Data rep of target + PSecBufferDesc pInput, // Input Buffers + unsigned long Reserved2, // Reserved, MBZ + PCtxtHandle phNewContext, // (out) New Context handle + PSecBufferDesc pOutput, // (inout) Output Buffers + unsigned long *pfContextAttr, // (out) Context attrs + PTimeStamp ptsExpiry // (out) Life span (OPT) +); + +typedef DWORD (WINAPI *initializeSecurityContext_fn)( + PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName, unsigned long fContextReq, + unsigned long Reserved1, unsigned long TargetDataRep, PSecBufferDesc pInput, unsigned long Reserved2, + PCtxtHandle phNewContext, PSecBufferDesc pOutput, unsigned long *pfContextAttr, PTimeStamp ptsExpiry); + +SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes( + PCtxtHandle phContext, // Context to query + unsigned long ulAttribute, // Attribute to query + void *pBuffer // Buffer for attributes +); + +typedef DWORD (WINAPI *queryContextAttributes_fn)( + PCtxtHandle phContext, unsigned long ulAttribute, void *pBuffer); + +#endif // SSPI_WINDOWS_H From 207a8e14e61d55930bde123f3fc389327a830ae3 Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Mon, 29 Sep 2014 11:50:45 -0400 Subject: [PATCH 054/305] Fix SSPI include path --- sasl/sasl_windows.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sasl/sasl_windows.h b/sasl/sasl_windows.h index a5b9c9f46..94321b208 100644 --- a/sasl/sasl_windows.h +++ b/sasl/sasl_windows.h @@ -1,6 +1,6 @@ #include -#include "kerberos_sspi.h" +#include "sspi_windows.h" SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle* cred_handle, char* username, char* password, char* domain); int sspi_step(CredHandle* cred_handle, int has_context, CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* target); From 514f4fc05f45332731438180dc973f35eba85fe2 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 29 Sep 2014 16:44:23 -0300 Subject: [PATCH 055/305] Replace count command's nil query with empty doc. Fixes MGO-47, identified by Daniel Gottlieb as having been introduced by the 2.7.7-pre change d71566a5 in the server. --- session.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/session.go b/session.go index 8459c8c1d..7e5f31e14 100644 --- a/session.go +++ b/session.go @@ -2976,9 +2976,12 @@ func (q *Query) Count() (n int, err error) { dbname := op.collection[:c] cname := op.collection[c+1:] - + query := op.query + if query == nil { + query = bson.D{} + } result := struct{ N int }{} - err = session.DB(dbname).Run(countCmd{cname, op.query, limit, op.skip}, &result) + err = session.DB(dbname).Run(countCmd{cname, query, limit, op.skip}, &result) return result.N, err } @@ -3164,7 +3167,7 @@ func (q *Query) MapReduce(job *MapReduce, result interface{}) (info *MapReduceIn } if cmd.Out == nil { - cmd.Out = bson.M{"inline": 1} + cmd.Out = bson.D{{"inline", 1}} } var doc mapReduceResult From 8df95abd92507f1c9f3c919b84003d6ed71ad63b Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 30 Sep 2014 03:15:31 -0300 Subject: [PATCH 056/305] Support SCRAM-SHA-1 authentication. Fixes MGO-37. --- auth.go | 26 +++- auth_test.go | 51 ++++++- internal/scram/scram.go | 266 +++++++++++++++++++++++++++++++++++ internal/scram/scram_test.go | 67 +++++++++ 4 files changed, 405 insertions(+), 5 deletions(-) create mode 100644 internal/scram/scram.go create mode 100644 internal/scram/scram_test.go diff --git a/auth.go b/auth.go index 72bb10175..81b0a132d 100644 --- a/auth.go +++ b/auth.go @@ -28,12 +28,14 @@ package mgo import ( "crypto/md5" + "crypto/sha1" "encoding/hex" "errors" "fmt" "sync" "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2/internal/scram" ) type authCmd struct { @@ -248,7 +250,10 @@ func (socket *mongoSocket) loginPlain(cred Credential) error { func (socket *mongoSocket) loginSASL(cred Credential) error { var sasl saslStepper var err error - if len(cred.ServiceHost) > 0 { + if cred.Mechanism == "SCRAM-SHA-1" { + // SCRAM is handled without external libraries. + sasl = saslNewScram(cred) + } else if len(cred.ServiceHost) > 0 { sasl, err = saslNew(cred, cred.ServiceHost) } else { sasl, err = saslNew(cred, socket.Server().Addr) @@ -324,6 +329,25 @@ func (socket *mongoSocket) loginSASL(cred Credential) error { return nil } +func saslNewScram(cred Credential) *saslScram { + credsum := md5.New() + credsum.Write([]byte(cred.Username + ":mongo:" + cred.Password)) + client := scram.NewClient(sha1.New, cred.Username, hex.EncodeToString(credsum.Sum(nil))) + return &saslScram{cred: cred, client: client} +} + +type saslScram struct { + cred Credential + client *scram.Client +} + +func (s *saslScram) Close() {} + +func (s *saslScram) Step(serverData []byte) (clientData []byte, done bool, err error) { + more := s.client.Step(serverData) + return s.client.Out(), !more, s.client.Err() +} + func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error { var mutex sync.Mutex var replyErr error diff --git a/auth_test.go b/auth_test.go index 93d8aa5dd..46e0d2b11 100644 --- a/auth_test.go +++ b/auth_test.go @@ -842,6 +842,40 @@ func (s *S) TestAuthDirectWithLogin(c *C) { } } +// TODO SCRAM-SHA-1 will become the default, and this flag will go away. +var scramFlag = flag.String("scram", "", "Host to test SCRAM-SHA-1 authentication against (depends on custom environment)") + +func (s *S) TestAuthScramSha1Cred(c *C) { + if *scramFlag == "" { + c.Skip("no -plain") + } + cred := &mgo.Credential{ + Username: "root", + Password: "rapadura", + Mechanism: "SCRAM-SHA-1", + Source: "admin", + } + c.Logf("Connecting to %s...", *scramFlag) + session, err := mgo.Dial(*scramFlag) + c.Assert(err, IsNil) + defer session.Close() + + mycoll := session.DB("admin").C("mycoll") + + c.Logf("Connected! Testing the need for authentication...") + err = mycoll.Find(nil).One(nil) + c.Assert(err, ErrorMatches, "unauthorized|not authorized .*") + + c.Logf("Authenticating...") + err = session.Login(cred) + c.Assert(err, IsNil) + c.Logf("Authenticated!") + + c.Logf("Connected! Testing the need for authentication...") + err = mycoll.Find(nil).One(nil) + c.Assert(err, Equals, mgo.ErrNotFound) +} + var ( plainFlag = flag.String("plain", "", "Host to test PLAIN authentication against (depends on custom environment)") plainUser = "einstein" @@ -901,10 +935,10 @@ var ( winKerberosPasswordEnv = "MGO_KERBEROS_PASSWORD" ) -// Kerberos has its own suite because it talks to a remote server and thus -// doesn't need the usual Setup() and Teardown() -type KerberosSuite struct { -} + +// Kerberos has its own suite because it talks to a remote server +// that is prepared to authenticate against a kerberos deployment. +type KerberosSuite struct{} var _ = Suite(&KerberosSuite{}) @@ -913,11 +947,20 @@ func (kerberosSuite *KerberosSuite) SetUpSuite(c *C) { mgo.SetStats(true) } +func (kerberosSuite *KerberosSuite) TearDownSuite(c *C) { + mgo.SetDebug(false) + mgo.SetStats(false) +} + func (kerberosSuite *KerberosSuite) SetUpTest(c *C) { mgo.SetLogger((*cLogger)(c)) mgo.ResetStats() } +func (kerberosSuite *KerberosSuite) TearDownTest(c *C) { + mgo.SetLogger(nil) +} + func (kerberosSuite *KerberosSuite) TestAuthKerberosCred(c *C) { if !*kerberosFlag { c.Skip("no -kerberos") diff --git a/internal/scram/scram.go b/internal/scram/scram.go new file mode 100644 index 000000000..80cda9135 --- /dev/null +++ b/internal/scram/scram.go @@ -0,0 +1,266 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2014 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Pacakage scram implements a SCRAM-{SHA-1,etc} client per RFC5802. +// +// http://tools.ietf.org/html/rfc5802 +// +package scram + +import ( + "bytes" + "crypto/hmac" + "crypto/rand" + "encoding/base64" + "fmt" + "hash" + "strconv" + "strings" +) + +// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc). +// +// A Client may be used within a SASL conversation with logic resembling: +// +// var in []byte +// var client = scram.NewClient(sha1.New, user, pass) +// for client.Step(in) { +// out := client.Out() +// // send out to server +// in := serverOut +// } +// if client.Err() != nil { +// // auth failed +// } +// +type Client struct { + newHash func() hash.Hash + + user string + pass string + step int + out bytes.Buffer + err error + + clientNonce []byte + serverNonce []byte + saltedPass []byte + authMsg bytes.Buffer +} + +// NewClient returns a new SCRAM-* client with the provided hash algorithm. +// +// For SCRAM-SHA-1, for example, use: +// +// client := scram.NewClient(sha1.New, user, pass) +// +func NewClient(newHash func() hash.Hash, user, pass string) *Client { + c := &Client{ + newHash: newHash, + user: user, + pass: pass, + } + c.out.Grow(256) + c.authMsg.Grow(256) + return c +} + +// Out returns the data to be sent to the server in the current step. +func (c *Client) Out() []byte { + if c.out.Len() == 0 { + return nil + } + return c.out.Bytes() +} + +// Err returns the error that ocurred, or nil if there were no errors. +func (c *Client) Err() error { + return c.err +} + +// SetNonce sets the client nonce to the provided value. +// If not set, the nonce is generated automatically out of crypto/rand on the first step. +func (c *Client) SetNonce(nonce []byte) { + c.clientNonce = nonce +} + +var escaper = strings.NewReplacer("=", "=3D", ",", "=2C") + +// Step processes the incoming data from the server and makes the +// next round of data for the server available via Client.Out. +// Step returns false if there are no errors and more data is +// still expected. +func (c *Client) Step(in []byte) bool { + c.out.Reset() + if c.step > 2 || c.err != nil { + return false + } + c.step++ + switch c.step { + case 1: + c.err = c.step1(in) + case 2: + c.err = c.step2(in) + case 3: + c.err = c.step3(in) + } + return c.step > 2 || c.err != nil +} + +func (c *Client) step1(in []byte) error { + if len(c.clientNonce) == 0 { + const nonceLen = 6 + buf := make([]byte, nonceLen + b64.EncodedLen(nonceLen)) + if _, err := rand.Read(buf[:nonceLen]); err != nil { + return fmt.Errorf("cannot read random SCRAM-SHA-1 nonce from operating system: %v", err) + } + c.clientNonce = buf[nonceLen:] + b64.Encode(c.clientNonce, buf[:nonceLen]) + } + c.authMsg.WriteString("n=") + escaper.WriteString(&c.authMsg, c.user) + c.authMsg.WriteString(",r=") + c.authMsg.Write(c.clientNonce) + + c.out.WriteString("n,,") + c.out.Write(c.authMsg.Bytes()) + return nil +} + +var b64 = base64.StdEncoding + +func (c *Client) step2(in []byte) error { + c.authMsg.WriteByte(',') + c.authMsg.Write(in) + + fields := bytes.Split(in, []byte(",")) + if len(fields) != 3 { + return fmt.Errorf("expected 3 fields in first SCRAM-SHA-1 server message, got %d: %q", len(fields), in) + } + if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-1 nonce: %q", fields[0]) + } + if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-1 salt: %q", fields[1]) + } + if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-1 iteration count: %q", fields[2]) + } + + c.serverNonce = fields[0][2:] + if !bytes.HasPrefix(c.serverNonce, c.clientNonce) { + return fmt.Errorf("server SCRAM-SHA-1 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce) + } + + salt := make([]byte, b64.DecodedLen(len(fields[1][2:]))) + n, err := b64.Decode(salt, fields[1][2:]) + if err != nil { + return fmt.Errorf("cannot decode SCRAM-SHA-1 salt sent by server: %q", fields[1]) + } + salt = salt[:n] + iterCount, err := strconv.Atoi(string(fields[2][2:])) + if err != nil { + return fmt.Errorf("server sent an invalid SCRAM-SHA-1 iteration count: %q", fields[2]) + } + c.saltPassword(salt, iterCount) + + c.authMsg.WriteString(",c=biws,r=") + c.authMsg.Write(c.serverNonce) + + c.out.WriteString("c=biws,r=") + c.out.Write(c.serverNonce) + c.out.WriteString(",p=") + c.out.Write(c.clientProof()) + return nil +} + +func (c *Client) step3(in []byte) error { + var isv, ise bool + var fields = bytes.Split(in, []byte(",")) + if len(fields) == 1 { + isv = bytes.HasPrefix(fields[0], []byte("v=")) + ise = bytes.HasPrefix(fields[0], []byte("e=")) + } + if ise { + return fmt.Errorf("SCRAM-SHA-1 authentication error: %s", fields[0][2:]) + } else if !isv { + return fmt.Errorf("unsupported SCRAM-SHA-1 final message from server: %q", in) + } + if !bytes.Equal(c.serverSignature(), fields[0][2:]) { + return fmt.Errorf("cannot authenticate SCRAM-SHA-1 server signature: %q", fields[0][2:]) + } + return nil +} + +func (c *Client) saltPassword(salt []byte, iterCount int) { + mac := hmac.New(c.newHash, []byte(c.pass)) + mac.Write(salt) + mac.Write([]byte{0, 0, 0, 1}) + ui := mac.Sum(nil) + hi := make([]byte, len(ui)) + copy(hi, ui) + for i := 1; i < iterCount; i++ { + mac.Reset() + mac.Write(ui) + mac.Sum(ui[:0]) + for j, b := range ui { + hi[j] ^= b + } + } + c.saltedPass = hi +} + +func (c *Client) clientProof() []byte { + mac := hmac.New(c.newHash, c.saltedPass) + mac.Write([]byte("Client Key")) + clientKey := mac.Sum(nil) + hash := c.newHash() + hash.Write(clientKey) + storedKey := hash.Sum(nil) + mac = hmac.New(c.newHash, storedKey) + mac.Write(c.authMsg.Bytes()) + clientProof := mac.Sum(nil) + for i, b := range clientKey { + clientProof[i] ^= b + } + clientProof64 := make([]byte, b64.EncodedLen(len(clientProof))) + b64.Encode(clientProof64, clientProof) + return clientProof64 +} + +func (c *Client) serverSignature() []byte { + mac := hmac.New(c.newHash, c.saltedPass) + mac.Write([]byte("Server Key")) + serverKey := mac.Sum(nil) + + mac = hmac.New(c.newHash, serverKey) + mac.Write(c.authMsg.Bytes()) + serverSignature := mac.Sum(nil) + + encoded := make([]byte, b64.EncodedLen(len(serverSignature))) + b64.Encode(encoded, serverSignature) + return encoded +} diff --git a/internal/scram/scram_test.go b/internal/scram/scram_test.go new file mode 100644 index 000000000..9c20fdfc4 --- /dev/null +++ b/internal/scram/scram_test.go @@ -0,0 +1,67 @@ +package scram_test + +import ( + "crypto/sha1" + "testing" + + . "gopkg.in/check.v1" + "gopkg.in/mgo.v2/internal/scram" + "strings" +) + +var _ = Suite(&S{}) + +func Test(t *testing.T) { TestingT(t) } + +type S struct{} + +var tests = [][]string{{ + "U: user pencil", + "N: fyko+d2lbbFgONRv9qkxdawL", + "C: n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL", + "S: r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096", + "C: c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts=", + "S: v=rmF9pqV8S7suAoZWja4dJRkFsKQ=", +}, { + "U: root fe8c89e308ec08763df36333cbf5d3a2", + "N: OTcxNDk5NjM2MzE5", + "C: n,,n=root,r=OTcxNDk5NjM2MzE5", + "S: r=OTcxNDk5NjM2MzE581Ra3provgG0iDsMkDiIAlrh4532dDLp,s=XRDkVrFC9JuL7/F4tG0acQ==,i=10000", + "C: c=biws,r=OTcxNDk5NjM2MzE581Ra3provgG0iDsMkDiIAlrh4532dDLp,p=6y1jp9R7ETyouTXS9fW9k5UHdBc=", + "S: v=LBnd9dUJRxdqZiEq91NKP3z/bHA=", +}} + +func (s *S) TestExamples(c *C) { + for _, steps := range tests { + if len(steps) < 2 || len(steps[0]) < 3 || !strings.HasPrefix(steps[0], "U: ") { + c.Fatalf("Invalid test: %#v", steps) + } + auth := strings.Fields(steps[0][3:]) + client := scram.NewClient(sha1.New, auth[0], auth[1]) + first, done := true, false + c.Logf("-----") + c.Logf("%s", steps[0]) + for _, step := range steps[1:] { + c.Logf("%s", step) + switch step[:3] { + case "N: ": + client.SetNonce([]byte(step[3:])) + case "C: ": + if first { + first = false + done = client.Step(nil) + } + c.Assert(done, Equals, false) + c.Assert(client.Err(), IsNil) + c.Assert(string(client.Out()), Equals, step[3:]) + case "S: ": + first = false + done = client.Step([]byte(step[3:])) + default: + panic("invalid test line: " + step) + } + } + c.Assert(done, Equals, true) + c.Assert(client.Err(), IsNil) + } +} From 7744dd3be3c243e335e1cc520fa13680d66535d9 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 30 Sep 2014 23:35:50 -0300 Subject: [PATCH 057/305] Do not build sasl/sasl.c on Windows. --- sasl/sasl.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sasl/sasl.c b/sasl/sasl.c index f518abd88..8be0bc459 100644 --- a/sasl/sasl.c +++ b/sasl/sasl.c @@ -1,3 +1,5 @@ +// +build !windows + #include #include #include From 251dc97da589d3483d8810e6b69e1668461457b8 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 30 Sep 2014 23:37:48 -0300 Subject: [PATCH 058/305] Support MONGODB-CR auth as an alias to MONGO-CR. Name was changed to MONGODB-CR in SERVER-8501. --- auth.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/auth.go b/auth.go index 81b0a132d..a0fe5146b 100644 --- a/auth.go +++ b/auth.go @@ -179,7 +179,7 @@ func (socket *mongoSocket) Login(cred Credential) error { var err error switch cred.Mechanism { - case "", "MONGO-CR": + case "", "MONGODB-CR", "MONGO-CR": // Name changed to MONGODB-CR in SERVER-8501. err = socket.loginClassic(cred) case "PLAIN": err = socket.loginPlain(cred) From 28c995153f09e16a618cf395781a62fe71701cff Mon Sep 17 00:00:00 2001 From: mike o'brien Date: Wed, 1 Oct 2014 14:17:25 -0400 Subject: [PATCH 059/305] Support for DBPointer in bson package --- bson/bson.go | 8 ++++++++ bson/bson_test.go | 2 ++ bson/decode.go | 2 ++ bson/encode.go | 10 ++++++++++ 4 files changed, 22 insertions(+) diff --git a/bson/bson.go b/bson/bson.go index 3ebfd8438..9943e374b 100644 --- a/bson/bson.go +++ b/bson/bson.go @@ -386,6 +386,14 @@ type JavaScript struct { Scope interface{} } +// DBPointer is a type that refers to a document in some namespace by wrapping +// a string containing the namespace itself, and the ObjectId in which the _id +// of the document is contained +type DBPointer struct { + Namespace string + Id ObjectId +} + const initialBufferSize = 64 func handleErr(err *error) { diff --git a/bson/bson_test.go b/bson/bson_test.go index 3d9799850..654df5308 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -139,6 +139,8 @@ var allItems = []testItemType{ "\x06_\x00"}, {bson.M{"_": bson.ObjectId("0123456789ab")}, "\x07_\x000123456789ab"}, + {bson.M{"_": bson.DBPointer{"testnamespace", bson.ObjectId("0123456789ab")}}, + "\x0C_\x00\x0e\x00\x00\x00testnamespace\x000123456789ab"}, {bson.M{"_": false}, "\x08_\x00\x00"}, {bson.M{"_": true}, diff --git a/bson/decode.go b/bson/decode.go index f1c8b4f7c..b389f60e4 100644 --- a/bson/decode.go +++ b/bson/decode.go @@ -499,6 +499,8 @@ func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { in = nil case 0x0B: // RegEx in = d.readRegEx() + case 0x0C: + in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))} case 0x0D: // JavaScript without scope in = JavaScript{Code: d.readStr()} case 0x0E: // Symbol diff --git a/bson/encode.go b/bson/encode.go index 6544748cb..03a15484d 100644 --- a/bson/encode.go +++ b/bson/encode.go @@ -43,6 +43,7 @@ import ( var ( typeBinary = reflect.TypeOf(Binary{}) typeObjectId = reflect.TypeOf(ObjectId("")) + typeDBPointer = reflect.TypeOf(DBPointer{"", ObjectId("")}) typeSymbol = reflect.TypeOf(Symbol("")) typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0)) typeOrderKey = reflect.TypeOf(MinKey) @@ -381,6 +382,15 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { e.addElemName('\x05', name) e.addBinary(s.Kind, s.Data) + case DBPointer: + e.addElemName('\x0C', name) + e.addStr(s.Namespace) + if len(s.Id) != 12 { + panic("ObjectIDs must be exactly 12 bytes long (got " + + strconv.Itoa(len(s.Id)) + ")") + } + e.addBytes([]byte(s.Id)...) + case RegEx: e.addElemName('\x0B', name) e.addCStr(s.Pattern) From cdb5092645acea255eadfd468cb873af97932acb Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 1 Oct 2014 23:50:17 -0300 Subject: [PATCH 060/305] Support Indexes and CollectionNames via commands. --- session.go | 73 +++++++++++++++++++++++++++++++++++++++---------- session_test.go | 6 +++- 2 files changed, 64 insertions(+), 15 deletions(-) diff --git a/session.go b/session.go index 7e5f31e14..179d06168 100644 --- a/session.go +++ b/session.go @@ -1179,6 +1179,23 @@ func (c *Collection) DropIndex(key ...string) error { // // See the EnsureIndex method for more details on indexes. func (c *Collection) Indexes() (indexes []Index, err error) { + // Try with a command. + var cmdResult struct { + Indexes []indexSpec + } + err = c.Database.Run(bson.D{{"listIndexes", c.Name}}, &cmdResult) + if err == nil { + for _, spec := range cmdResult.Indexes { + indexes = append(indexes, indexFromSpec(spec)) + } + sort.Sort(indexSlice(indexes)) + return indexes, nil + } + if err != nil && !isNoCmd(err) { + return nil, err + } + + // Command not yet supported. Query the database instead. query := c.Database.C("system.indexes").Find(bson.M{"ns": c.FullName}) iter := query.Sort("name").Iter() for { @@ -1186,21 +1203,30 @@ func (c *Collection) Indexes() (indexes []Index, err error) { if !iter.Next(&spec) { break } - index := Index{ - Name: spec.Name, - Key: simpleIndexKey(spec.Key), - Unique: spec.Unique, - DropDups: spec.DropDups, - Background: spec.Background, - Sparse: spec.Sparse, - ExpireAfter: time.Duration(spec.ExpireAfter) * time.Second, - } - indexes = append(indexes, index) + indexes = append(indexes, indexFromSpec(spec)) } err = iter.Close() - return + sort.Sort(indexSlice(indexes)) + return indexes, nil } +func indexFromSpec(spec indexSpec) Index { + return Index{ + Name: spec.Name, + Key: simpleIndexKey(spec.Key), + Unique: spec.Unique, + DropDups: spec.DropDups, + Background: spec.Background, + Sparse: spec.Sparse, + ExpireAfter: time.Duration(spec.ExpireAfter) * time.Second, + } +} + +type indexSlice []Index +func (idxs indexSlice) Len() int { return len(idxs) } +func (idxs indexSlice) Less(i, j int) bool { return idxs[i].Name < idxs[j].Name } +func (idxs indexSlice) Swap(i, j int) { idxs[i], idxs[j] = idxs[j], idxs[i] } + func simpleIndexKey(realKey bson.D) (key []string) { for i := range realKey { field := realKey[i].Name @@ -2472,14 +2498,33 @@ func (s *Session) FindRef(ref *DBRef) *Query { return c.FindId(ref.Id) } -// CollectionNames returns the collection names present in database. +// CollectionNames returns the collection names present in the db database. func (db *Database) CollectionNames() (names []string, err error) { - c := len(db.Name) + 1 + // Try with a command. + var cmdResult struct { + Collections []struct { + Name string + } + } + err = db.Run(bson.D{{"listCollections", 1}}, &cmdResult) + if err == nil { + for _, coll := range cmdResult.Collections { + names = append(names, coll.Name) + } + sort.Strings(names) + return names, err + } + if err != nil && !isNoCmd(err) { + return nil, err + } + + // Command not yet supported. Query the database instead. + nameIndex := len(db.Name) + 1 iter := db.C("system.namespaces").Find(nil).Iter() var result *struct{ Name string } for iter.Next(&result) { if strings.Index(result.Name, "$") < 0 || strings.Index(result.Name, ".oplog.$") >= 0 { - names = append(names, result.Name[c:]) + names = append(names, result.Name[nameIndex:]) } } if err := iter.Close(); err != nil { diff --git a/session_test.go b/session_test.go index 00eaacda1..9e40b4260 100644 --- a/session_test.go +++ b/session_test.go @@ -2436,11 +2436,15 @@ func (s *S) TestEnsureIndex(c *C) { delete(result2, "v") expected2 := M{ "name": "a_1_b_-1", - "key": M{"a": 1, "b": -1}, + "key": M{"a": 1, "b": -1},b "ns": "mydb.mycoll", "unique": true, "dropDups": true, } + if s.versionAtLeast(2, 7) { + // Was deprecated in 2.6, and not being reported by 2.7+. + delete(expected2, "dropDups") + } c.Assert(result2, DeepEquals, expected2) delete(result3, "v") From 0c13dbf2a6fe4cac8b082044040f19978e6d1aa0 Mon Sep 17 00:00:00 2001 From: Las Zenow Date: Tue, 2 Sep 2014 00:22:47 -0500 Subject: [PATCH 061/305] Add text indexes support --- session.go | 102 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 64 insertions(+), 38 deletions(-) diff --git a/session.go b/session.go index dffeca798..3b941b643 100644 --- a/session.go +++ b/session.go @@ -910,14 +910,17 @@ func (db *Database) RemoveUser(user string) error { } type indexSpec struct { - Name, NS string - Key bson.D - Unique bool ",omitempty" - DropDups bool "dropDups,omitempty" - Background bool ",omitempty" - Sparse bool ",omitempty" - Bits, Min, Max int ",omitempty" - ExpireAfter int "expireAfterSeconds,omitempty" + Name, NS string + Key bson.D + Unique bool ",omitempty" + DropDups bool "dropDups,omitempty" + Background bool ",omitempty" + Sparse bool ",omitempty" + Bits, Min, Max int ",omitempty" + ExpireAfter int "expireAfterSeconds,omitempty" + Weights bson.D ",omitempty" + DefaultLanguage string "default_language,omitempty" + LanguageOverride string "language_override,omitempty" } type Index struct { @@ -932,14 +935,26 @@ type Index struct { Name string // Index name, computed by EnsureIndex Bits, Min, Max int // Properties for spatial indexes + + // Properties for text indexes + DefaultLanguage string + LanguageOverride string +} + +type indexKeyInfo struct { + name string + key bson.D + weights bson.D } -func parseIndexKey(key []string) (name string, realKey bson.D, err error) { +func parseIndexKey(key []string) (*indexKeyInfo, error) { + var keyInfo indexKeyInfo + isText := false var order interface{} for _, field := range key { raw := field - if name != "" { - name += "_" + if keyInfo.name != "" { + keyInfo.name += "_" } var kind string if field != "" { @@ -947,7 +962,7 @@ func parseIndexKey(key []string) (name string, realKey bson.D, err error) { if c := strings.Index(field, ":"); c > 1 && c < len(field)-1 { kind = field[1:c] field = field[c+1:] - name += field + "_" + kind + keyInfo.name += field + "_" + kind } } switch field[0] { @@ -960,32 +975,40 @@ func parseIndexKey(key []string) (name string, realKey bson.D, err error) { // The shell used to render this field as key_ instead of key_2d, // and mgo followed suit. This has been fixed in recent server // releases, and mgo followed as well. - name += field + "_2d" + keyInfo.name += field + "_2d" case '-': order = -1 field = field[1:] - name += field + "_-1" + keyInfo.name += field + "_-1" case '+': field = field[1:] fallthrough default: if kind == "" { order = 1 - name += field + "_1" + keyInfo.name += field + "_1" } else { order = kind } } } if field == "" || kind != "" && order != kind { - return "", nil, fmt.Errorf(`invalid index key: want "[$:][-]", got %q`, raw) + return nil, fmt.Errorf(`invalid index key: want "[$:][-]", got %q`, raw) + } + if kind == "text" { + if !isText { + keyInfo.key = append(keyInfo.key, bson.DocElem{"_fts", "text"}, bson.DocElem{"_ftsx", 1}) + isText = true + } + keyInfo.weights = append(keyInfo.weights, bson.DocElem{field, 1}) + } else { + keyInfo.key = append(keyInfo.key, bson.DocElem{field, order}) } - realKey = append(realKey, bson.DocElem{field, order}) } - if name == "" { - return "", nil, errors.New("invalid index key: no fields provided") + if keyInfo.name == "" { + return nil, errors.New("invalid index key: no fields provided") } - return + return &keyInfo, nil } // EnsureIndexKey ensures an index with the given key exists, creating it @@ -1072,29 +1095,32 @@ func (c *Collection) EnsureIndexKey(key ...string) error { // http://www.mongodb.org/display/DOCS/Multikeys // func (c *Collection) EnsureIndex(index Index) error { - name, realKey, err := parseIndexKey(index.Key) + keyInfo, err := parseIndexKey(index.Key) if err != nil { return err } session := c.Database.Session - cacheKey := c.FullName + "\x00" + name + cacheKey := c.FullName + "\x00" + keyInfo.name if session.cluster().HasCachedIndex(cacheKey) { return nil } spec := indexSpec{ - Name: name, - NS: c.FullName, - Key: realKey, - Unique: index.Unique, - DropDups: index.DropDups, - Background: index.Background, - Sparse: index.Sparse, - Bits: index.Bits, - Min: index.Min, - Max: index.Max, - ExpireAfter: int(index.ExpireAfter / time.Second), + Name: keyInfo.name, + NS: c.FullName, + Key: keyInfo.key, + Unique: index.Unique, + DropDups: index.DropDups, + Background: index.Background, + Sparse: index.Sparse, + Bits: index.Bits, + Min: index.Min, + Max: index.Max, + ExpireAfter: int(index.ExpireAfter / time.Second), + Weights: keyInfo.weights, + DefaultLanguage: index.DefaultLanguage, + LanguageOverride: index.LanguageOverride, } session = session.Clone() @@ -1123,13 +1149,13 @@ func (c *Collection) EnsureIndex(index Index) error { // // See the EnsureIndex method for more details on indexes. func (c *Collection) DropIndex(key ...string) error { - name, _, err := parseIndexKey(key) + keyInfo, err := parseIndexKey(key) if err != nil { return err } session := c.Database.Session - cacheKey := c.FullName + "\x00" + name + cacheKey := c.FullName + "\x00" + keyInfo.name session.cluster().CacheIndex(cacheKey, false) session = session.Clone() @@ -1141,7 +1167,7 @@ func (c *Collection) DropIndex(key ...string) error { ErrMsg string Ok bool }{} - err = db.Run(bson.D{{"dropIndexes", c.Name}, {"index", name}}, &result) + err = db.Run(bson.D{{"dropIndexes", c.Name}, {"index", keyInfo.name}}, &result) if err != nil { return err } @@ -2254,8 +2280,8 @@ func (q *Query) Explain(result interface{}) error { // func (q *Query) Hint(indexKey ...string) *Query { q.m.Lock() - _, realKey, err := parseIndexKey(indexKey) - q.op.options.Hint = realKey + keyInfo, err := parseIndexKey(indexKey) + q.op.options.Hint = keyInfo.key q.op.hasOptions = true q.m.Unlock() if err != nil { From 9e6dbb354206a7f4a21f6a3f4f1f91584064a982 Mon Sep 17 00:00:00 2001 From: Las Zenow Date: Tue, 2 Sep 2014 00:23:38 -0500 Subject: [PATCH 062/305] Add score sort support --- session.go | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/session.go b/session.go index 3b941b643..224ec7f0b 100644 --- a/session.go +++ b/session.go @@ -2197,18 +2197,25 @@ func (q *Query) Select(selector interface{}) *Query { // query1 := collection.Find(nil).Sort("firstname", "lastname") // query2 := collection.Find(nil).Sort("-age") // query3 := collection.Find(nil).Sort("$natural") +// query4 := collection.Find(nil).Select(bson.M{"score": bson.M{"$meta": "textScore"}}).Sort("$textScore:score") // // Relevant documentation: // // http://www.mongodb.org/display/DOCS/Sorting+and+Natural+Order // func (q *Query) Sort(fields ...string) *Query { - // TODO // query4 := collection.Find(nil).Sort("score:{$meta:textScore}") q.m.Lock() var order bson.D for _, field := range fields { n := 1 + var kind string if field != "" { + if field[0] == '$' { + if c := strings.Index(field, ":"); c > 1 && c < len(field)-1 { + kind = field[1:c] + field = field[c+1:] + } + } switch field[0] { case '+': field = field[1:] @@ -2220,7 +2227,11 @@ func (q *Query) Sort(fields ...string) *Query { if field == "" { panic("Sort: empty field name") } - order = append(order, bson.DocElem{field, n}) + if kind == "textScore" { + order = append(order, bson.DocElem{field, bson.M{"$meta": kind}}) + } else { + order = append(order, bson.DocElem{field, n}) + } } q.op.options.OrderBy = order q.op.hasOptions = true From beff0a0cc47dfe0e5fd13ae0915f6c1863987325 Mon Sep 17 00:00:00 2001 From: Las Zenow Date: Thu, 4 Sep 2014 22:17:04 -0500 Subject: [PATCH 063/305] Add test for text index and search --- session_test.go | 52 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/session_test.go b/session_test.go index 788035bd5..9be39a8a2 100644 --- a/session_test.go +++ b/session_test.go @@ -2073,6 +2073,58 @@ func (s *S) TestSortWithBadArgs(c *C) { } } +func (s *S) TestSortScoreText(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.EnsureIndex(mgo.Index{ + Key: []string{"$text:a", "$text:b"}, + }) + c.Assert(err, IsNil) + + err = coll.Insert(M{ + "a": "none", + "b": "twice: foo foo", + }) + c.Assert(err, IsNil) + err = coll.Insert(M{ + "a": "just once: foo", + "b": "none", + }) + c.Assert(err, IsNil) + err = coll.Insert(M{ + "a": "many: foo foo foo", + "b": "none", + }) + c.Assert(err, IsNil) + err = coll.Insert(M{ + "a": "none", + "b": "none", + "c": "ignore: foo", + }) + c.Assert(err, IsNil) + + query := coll.Find(M{"$text": M{"$search": "foo"}}) + query.Select(M{"score": M{"$meta": "textScore"}}) + query.Sort("$textScore:score") + iter := query.Iter() + + var r struct{ A, B string } + var results []string + for iter.Next(&r) { + results = append(results, r.A, r.B) + } + + c.Assert(results, DeepEquals, []string{ + "many: foo foo foo", "none", + "none", "twice: foo foo", + "just once: foo", "none", + }) +} + func (s *S) TestPrefetching(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) From 292c6755f18370eee295d9070584ef3deb9ba8b1 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 6 Oct 2014 18:22:05 +0200 Subject: [PATCH 064/305] Fix typo. --- session_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/session_test.go b/session_test.go index 9e40b4260..874303429 100644 --- a/session_test.go +++ b/session_test.go @@ -2436,7 +2436,7 @@ func (s *S) TestEnsureIndex(c *C) { delete(result2, "v") expected2 := M{ "name": "a_1_b_-1", - "key": M{"a": 1, "b": -1},b + "key": M{"a": 1, "b": -1}, "ns": "mydb.mycoll", "unique": true, "dropDups": true, From b4fff88a43aab5645563810fb1050b343372e58c Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Mon, 6 Oct 2014 16:26:19 -0400 Subject: [PATCH 065/305] Quick fix for Kerberos: don't use hardcoded userPlusRealm length --- sasl/sasl_windows.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sasl/sasl_windows.c b/sasl/sasl_windows.c index 4fa175c14..9e9345669 100644 --- a/sasl/sasl_windows.c +++ b/sasl/sasl_windows.c @@ -73,13 +73,14 @@ int sspi_send_client_authz_id(CtxtHandle *context, PVOID *buffer, ULONG *buffer_ return status; } - int msgSize = 4 + 25; + size_t user_plus_realm_length = strlen(user_plus_realm); + int msgSize = 4 + user_plus_realm_length; char *msg = malloc((sizes.cbSecurityTrailer + msgSize + sizes.cbBlockSize) * sizeof(char)); msg[sizes.cbSecurityTrailer + 0] = 1; msg[sizes.cbSecurityTrailer + 1] = 0; msg[sizes.cbSecurityTrailer + 2] = 0; msg[sizes.cbSecurityTrailer + 3] = 0; - memcpy(&msg[sizes.cbSecurityTrailer + 4], user_plus_realm, 25); + memcpy(&msg[sizes.cbSecurityTrailer + 4], user_plus_realm, user_plus_realm_length); SecBuffer wrapBufs[3]; SecBufferDesc wrapBufDesc; From 227e5a40a3632e8b1ab8c4479280c5d2e13052de Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Mon, 6 Oct 2014 18:23:39 -0400 Subject: [PATCH 066/305] Don't leak msg in sspi_send_client_authz_id --- sasl/sasl_windows.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sasl/sasl_windows.c b/sasl/sasl_windows.c index 9e9345669..dd6a88ab6 100644 --- a/sasl/sasl_windows.c +++ b/sasl/sasl_windows.c @@ -102,6 +102,7 @@ int sspi_send_client_authz_id(CtxtHandle *context, PVOID *buffer, ULONG *buffer_ status = call_sspi_encrypt_message(context, SECQOP_WRAP_NO_ENCRYPT, &wrapBufDesc, 0); if (status != SEC_E_OK) { + free(msg); return status; } @@ -112,5 +113,6 @@ int sspi_send_client_authz_id(CtxtHandle *context, PVOID *buffer, ULONG *buffer_ memcpy(*buffer + wrapBufs[0].cbBuffer, wrapBufs[1].pvBuffer, wrapBufs[1].cbBuffer); memcpy(*buffer + wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer, wrapBufs[2].pvBuffer, wrapBufs[2].cbBuffer); + free(msg); return SEC_E_OK; } From 88b0407dc4147242680c11a71514e62329b13317 Mon Sep 17 00:00:00 2001 From: Gabriel Russell Date: Mon, 6 Oct 2014 18:06:03 -0400 Subject: [PATCH 067/305] allow OtherDBRoles to be used with $external --- session.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/session.go b/session.go index 179d06168..1aa813c27 100644 --- a/session.go +++ b/session.go @@ -779,8 +779,8 @@ func (db *Database) UpsertUser(user *User) error { if (user.Password != "" || user.PasswordHash != "") && user.UserSource != "" { return fmt.Errorf("user has both Password/PasswordHash and UserSource set") } - if len(user.OtherDBRoles) > 0 && db.Name != "admin" { - return fmt.Errorf("user with OtherDBRoles is only supported in admin database") + if len(user.OtherDBRoles) > 0 && db.Name != "admin" && db.Name != "$external" { + return fmt.Errorf("user with OtherDBRoles is only supported in the admin or $external databases") } // Attempt to run this using 2.6+ commands. From 5ebf341556df522db4d4ad63ef7ecfe12d94e372 Mon Sep 17 00:00:00 2001 From: Gabriel Russell Date: Mon, 6 Oct 2014 18:07:23 -0400 Subject: [PATCH 068/305] enable the "localhost exception" --- session.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/session.go b/session.go index 1aa813c27..344d73cd8 100644 --- a/session.go +++ b/session.go @@ -790,7 +790,8 @@ func (db *Database) UpsertUser(user *User) error { rundb = db.Session.DB(user.UserSource) } err := rundb.runUserCmd("updateUser", user) - if isNotFound(err) { + // retry with createUser when isAuthError in order to enable the "localhost exception" + if isNotFound(err) || isAuthError(err) { return rundb.runUserCmd("createUser", user) } if !isNoCmd(err) { @@ -844,6 +845,11 @@ func isNotFound(err error) bool { return ok && e.Code == 11 } +func isAuthError(err error) bool { + e, ok := err.(*QueryError) + return ok && e.Code == 13 +} + func (db *Database) runUserCmd(cmdName string, user *User) error { cmd := make(bson.D, 0, 16) cmd = append(cmd, bson.DocElem{cmdName, user.Username}) From 4e557e1ce572f7aebc99b16fdb714bdfe63f7dd9 Mon Sep 17 00:00:00 2001 From: Gabriel Russell Date: Mon, 22 Sep 2014 17:07:23 -0400 Subject: [PATCH 069/305] MONGODB-X509 authentication --- auth.go | 25 +++++++++++++++++++++++-- session.go | 3 ++- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/auth.go b/auth.go index a0fe5146b..4e7292379 100644 --- a/auth.go +++ b/auth.go @@ -183,8 +183,8 @@ func (socket *mongoSocket) Login(cred Credential) error { err = socket.loginClassic(cred) case "PLAIN": err = socket.loginPlain(cred) - case "MONGO-X509": - err = fmt.Errorf("unsupported authentication mechanism: %s", cred.Mechanism) + case "MONGODB-X509": + err = socket.loginX509(cred) default: // Try SASL for everything else, if it is available. err = socket.loginSASL(cred) @@ -232,6 +232,27 @@ func (socket *mongoSocket) loginClassic(cred Credential) error { }) } +type authX509Cmd struct { + Authenticate int + User string + Mechanism string +} + +func (socket *mongoSocket) loginX509(cred Credential) error { + cmd := authX509Cmd{Authenticate: 1, User: cred.Username, Mechanism: "MONGODB-X509"} + res := authResult{} + return socket.loginRun(cred.Source, &cmd, &res, func() error { + if !res.Ok { + return errors.New(res.ErrMsg) + } + socket.Lock() + socket.dropAuth(cred.Source) + socket.creds = append(socket.creds, cred) + socket.Unlock() + return nil + }) +} + func (socket *mongoSocket) loginPlain(cred Credential) error { cmd := saslCmd{Start: 1, Mechanism: "PLAIN", Payload: []byte("\x00" + cred.Username + "\x00" + cred.Password)} res := authResult{} diff --git a/session.go b/session.go index 344d73cd8..794f2e3e8 100644 --- a/session.go +++ b/session.go @@ -372,7 +372,8 @@ func DialWithInfo(info *DialInfo) (*Session, error) { } if info.Username != "" { source := session.sourcedb - if info.Source == "" && (info.Mechanism == "GSSAPI" || info.Mechanism == "PLAIN") { + if info.Source == "" && + (info.Mechanism == "GSSAPI" || info.Mechanism == "PLAIN" || info.Mechanism == "MONGODB-X509") { source = "$external" } session.dialCred = &Credential{ From 594244cc65bb6a889b5a0396e30a2a0cca50440c Mon Sep 17 00:00:00 2001 From: corymintz Date: Wed, 24 Sep 2014 11:38:38 -0400 Subject: [PATCH 070/305] MONGODB-X509 authentication tests --- auth_test.go | 132 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 132 insertions(+) diff --git a/auth_test.go b/auth_test.go index 46e0d2b11..c704185c6 100644 --- a/auth_test.go +++ b/auth_test.go @@ -27,8 +27,10 @@ package mgo_test import ( + "crypto/tls" "flag" "fmt" + "net" "net/url" "os" "runtime" @@ -1092,3 +1094,133 @@ func windowsAppendPasswordToCredential(cred *mgo.Credential) { cred.Password = getWindowsKerberosPassword() } } + +var ( + x509Flag = flag.String("x509", "", "Test x509 authentication (depends on custom environment)") + x509Subject = "CN=localhost,OU=Client,O=MGO,L=MGO,ST=MGO,C=GO" + clientCert = `-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAwE2sl8YeTTSetwo9kykJ5mCZ/FtfPtn/0X4nOlTM2Qc/uWzA +sjSYoSV4UkuOiWjKQQH2EDeXaltshOo7F0oCY5ozVeQe+phe987iKTvLtf7NoXJD +KqNqR4Kb4ylbCrEky7+Xvw6yrrqw8qgWy+9VsrilR3q8LsETE9SBMtfp3BUaaNQp +peNm+iAhx3uZSv3mdzSLFSA/o61kAyG0scLExYDjo/7xyMNQoloLvNmx4Io160+y +lOz077/qqU620tmuDLRz1QdxK/bptmXTnsBCRxl+U8nzbwVZgWFENhXplbcN+SjN +LhdnvTiU2qFhgZmc7ZtCKdPIpx3W6pH9bx7kTwIDAQABAoIBAQCOQygyo8NY9FuS +J8ZDrvF+9+oS8fm1QorpDT2x/ngI+j7fSyAG9bgQRusLXpAVAWvWyb+iYa3nZbkT +X0DVys+XpcTifr+YPc7L3sYbIPxkKBsxm5kq2vfN7Uart7V9ZG1HOfblxdbUQpKT +AVzUA7vPWqATEC5VHEqjuerWlTqRr9YLZE/nkE7ICLISqdl4WDYfUYJwoXWfYkXQ +Lfl5Qh2leyri9S3urvDrhnURTQ1lM182IbTRA+9rUiFzsRW+9U4HPY7Ao2Itp8dr +GRP4rcq4TP+NcF0Ky64cNfKXCWmwqTBRFYAlTD6gwjN/s2BzvWD/2nlnc0DYAXrB +TgFCPk7xAoGBAOwuHICwwTxtzrdWjuRGU3RxL4eLEXedtL8yon/yPci3e+8eploX +1Fp0rEK2gIGDp/X8DiOtrKXih8XPusCwE/I3EvjHdI0RylLZXTPOp1Ei21dXRsiV +YxcF+d5s11q5tJtF+5ISUeIz2iSc9Z2LBnb8JDK1jcCRa5Q212q3ZWW5AoGBANBw +9CoMbxINLG1q0NvOOSwMKDk2OB+9JbQ5lwF4ijZl2I6qRoOCzQ3lBs0Qv/AeBjNR +SerDs2+eWnIBUbgSdiqcOKnXAI/Qbl1IkVFYV/2g9m6dgu1fNWNBv8NIYDHCLfDx +W3fpO5JMf+iE5XC4XqCfSBIME2yxPSGQjal6tB5HAoGAddYDzolhv/6hVoPPQ0F7 +PeuC5UOTcXSzy3k97kw0W0KAiStnoCengYIYuChKMVQ4ptgdTdvG+fTt/NnJuX2g +Vgb4ZjtNgVzQ70kX4VNH04lqmkcnP8iY6dHHexwezls9KwNdouGVDSEFw6K0QOgu +T4s5nDtNADkNzaMXE11xL7ECgYBoML3rstFmTY1ymB0Uck3jtaP5jR+axdpt7weL +Zax4qooILhcXL6++DUhMAt5ecTOaPTzci7xKw/Xj3MLzZs8IV5R/WQhf2sj/+gEh +jy5UijwEaNmEO74dAkWPoMLsvGpocMzO8JeldnXNTXi+0noCgfvtgXnIMAQlnfMh +z0LviwKBgQCg5KR9JC4iuKses7Kfv2YelcO8vOZkRzBu3NdRWMsiJQC+qfetgd57 +RjRjlRWd1WCHJ5Kmx3hkUaZZOrX5knqfsRW3Nl0I74xgWl7Bli2eSJ9VWl59bcd6 +DqphhY7/gcW+QZlhXpnqbf0W8jB2gPhTYERyCBoS9LfhZWZu/11wuQ== +-----END RSA PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIICyTCCAjKgAwIBAgIBATANBgkqhkiG9w0BAQUFADBcMQswCQYDVQQGEwJHTzEM +MAoGA1UECBMDTUdPMQwwCgYDVQQHEwNNR08xDDAKBgNVBAoTA01HTzEPMA0GA1UE +CxMGU2VydmVyMRIwEAYDVQQDEwlsb2NhbGhvc3QwHhcNMTQwOTI0MTQwMzUzWhcN +MTUwOTI0MTQwMzUzWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECBMDTUdPMQwwCgYD +VQQHEwNNR08xDDAKBgNVBAoTA01HTzEPMA0GA1UECxMGQ2xpZW50MRIwEAYDVQQD +Ewlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDATayX +xh5NNJ63Cj2TKQnmYJn8W18+2f/Rfic6VMzZBz+5bMCyNJihJXhSS46JaMpBAfYQ +N5dqW2yE6jsXSgJjmjNV5B76mF73zuIpO8u1/s2hckMqo2pHgpvjKVsKsSTLv5e/ +DrKuurDyqBbL71WyuKVHerwuwRMT1IEy1+ncFRpo1Cml42b6ICHHe5lK/eZ3NIsV +ID+jrWQDIbSxwsTFgOOj/vHIw1CiWgu82bHgijXrT7KU7PTvv+qpTrbS2a4MtHPV +B3Er9um2ZdOewEJHGX5TyfNvBVmBYUQ2FemVtw35KM0uF2e9OJTaoWGBmZztm0Ip +08inHdbqkf1vHuRPAgMBAAGjFzAVMBMGA1UdJQQMMAoGCCsGAQUFBwMCMA0GCSqG +SIb3DQEBBQUAA4GBAJZD7idSIRzhGlJYARPKWnX2CxD4VVB0F5cH5Mlc2YnoUSU/ +rKuPZFuOYND3awKqez6K3rNb3+tQmNitmoOT8ImmX1uJKBo5w9tuo4B2MmLQcPMk +3fhPePuQCjtlArSmKVrNTrYPkyB9NwKS6q0+FzseFTw9ZJUIKiO9sSjMe+HP +-----END CERTIFICATE----- +` + // The server cert is not used by the test. It is here for + // convenience to use for the mongod specificed in the + // `x509Flag` as the --sslPEMKeyFile and --sslCAFIle parameters. + serverCert = `-----BEGIN CERTIFICATE----- +MIIC+DCCAmGgAwIBAgIJAJ5pBAq2HXAsMA0GCSqGSIb3DQEBBQUAMFwxCzAJBgNV +BAYTAkdPMQwwCgYDVQQIEwNNR08xDDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdP +MQ8wDQYDVQQLEwZTZXJ2ZXIxEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xNDA5MjQx +MzUxMTBaFw0xNTA5MjQxMzUxMTBaMFwxCzAJBgNVBAYTAkdPMQwwCgYDVQQIEwNN +R08xDDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdPMQ8wDQYDVQQLEwZTZXJ2ZXIx +EjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA +pQ5wO2L23xMI4PzpVt/Ftvez82IvA9amwr3fUd7RjlYwiFsFeMnG24a4CUoOeKF0 +fpQWc9rmCs0EeP5ofZ2otOsfxoVWXZAZWdgauuwlYB6EeFaAMH3fxVH3IiH+21RR +q2w9sH/s4fqh5stavUfyPdVmCcb8NW0jD8jlqniJL0kCAwEAAaOBwTCBvjAdBgNV +HQ4EFgQUjyVWGMHBrmPDGwCY5VusHsKIpzIwgY4GA1UdIwSBhjCBg4AUjyVWGMHB +rmPDGwCY5VusHsKIpzKhYKReMFwxCzAJBgNVBAYTAkdPMQwwCgYDVQQIEwNNR08x +DDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdPMQ8wDQYDVQQLEwZTZXJ2ZXIxEjAQ +BgNVBAMTCWxvY2FsaG9zdIIJAJ5pBAq2HXAsMAwGA1UdEwQFMAMBAf8wDQYJKoZI +hvcNAQEFBQADgYEAa65TgDKp3SRUDNAILSuQOCEbenWh/DMPL4vTVgo/Dxd4emoO +7i8/4HMTa0XeYIVbAsxO+dqtxqt32IcV7DurmQozdUZ7q0ueJRXon6APnCN0IqPC +sF71w63xXfpmnvTAfQXi7x6TUAyAQ2nScHExAjzc000DF1dO/6+nIINqNQE= +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +MIICWwIBAAKBgQClDnA7YvbfEwjg/OlW38W297PzYi8D1qbCvd9R3tGOVjCIWwV4 +ycbbhrgJSg54oXR+lBZz2uYKzQR4/mh9nai06x/GhVZdkBlZ2Bq67CVgHoR4VoAw +fd/FUfciIf7bVFGrbD2wf+zh+qHmy1q9R/I91WYJxvw1bSMPyOWqeIkvSQIDAQAB +AoGABA9S22MXx2zkbwRJiQWAC3wURQxJM8L33xpkf9MHPIUKNJBolgwAhC3QIQpd +SMJP5z0lQDxGJEXesksvrsdN+vsgbleRfQsAIcY/rEhr9h8m6auM08f+69oIX32o +aTOWJJRofjbgzE5c/RijqhIaYGdq54a0EE9mAaODwZoa2/ECQQDRGrIRI5L3pdRA +yifDKNjvAFOk6TbdGe+J9zHFw4F7bA2In/b+rno9vrj+EanOevD8LRLzeFshzXrG +WQFzZ69/AkEAyhLSY7WNiQTeJWCwXawVnoSl5AMSRYFA/A2sEUokfORR5BS7gqvL +mmEKmvslnZp5qlMtM4AyrW2OaoGvE6sFNwJACB3xK5kl61cUli9Cu+CqCx0IIi6r +YonPMpvV4sdkD1ZycAtFmz1KoXr102b8IHfFQwS855aUcwt26Jwr4j70IQJAXv9+ +PTXq9hF9xiCwiTkPaNh/jLQM8PQU8uoSjIZIpRZJkWpVxNay/z7D15xeULuAmxxD +UcThDjtFCrkw75Qk/QJAFfcM+5r31R1RrBGM1QPKwDqkFTGsFKnMWuS/pXyLTTOv +I+In9ZJyA/R5zKeJZjM7xtZs0ANU9HpOpgespq6CvA== +-----END RSA PRIVATE KEY-----` +) + +func (s *S) TestAuthx509Cred(c *C) { + if *x509Flag == "" { + c.Skip("no -x509") + } + + clientCert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientCert)) + c.Assert(err, IsNil) + + tlsConfig := &tls.Config{ + // Isolating tests to client certs, don't care about server validation. + InsecureSkipVerify: true, + Certificates: []tls.Certificate{clientCert}, + } + + c.Logf("Connecting to %s...", *x509Flag) + session, err := mgo.DialWithInfo(&mgo.DialInfo{ + Addrs: []string{*x509Flag}, + DialServer: func(addr *mgo.ServerAddr) (net.Conn, error) { + return tls.Dial("tcp", addr.String(), tlsConfig) + }, + }) + c.Assert(err, IsNil) + defer session.Close() + + c.Logf("Connected! Testing the need for authentication...") + names, err := session.DatabaseNames() + c.Assert(err, ErrorMatches, "not authorized .*") + + cred := &mgo.Credential{ + Username: x509Subject, + Mechanism: "MONGODB-X509", + Source: "$external", + } + + c.Logf("Authenticating...") + err = session.Login(cred) + c.Assert(err, IsNil) + c.Logf("Authenticated!") + + names, err = session.DatabaseNames() + c.Assert(err, IsNil) + c.Assert(len(names) > 0, Equals, true) +} From ed8df50f8ef0fa2cdd6761da6c303fbe8ed4210e Mon Sep 17 00:00:00 2001 From: Menno Smits Date: Tue, 7 Oct 2014 08:25:39 +0200 Subject: [PATCH 071/305] Removed unnecessary resort of document keys docKeys() already sorts the returned document keys. --- txn/flusher.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/txn/flusher.go b/txn/flusher.go index 5318ad0c9..bc39ec69f 100644 --- a/txn/flusher.go +++ b/txn/flusher.go @@ -2,7 +2,6 @@ package txn import ( "fmt" - "sort" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" @@ -225,10 +224,10 @@ func (f *flusher) prepare(t *transaction, force bool) (revnos []int64, err error } f.debugf("Preparing %s", t) - // Iterate in a stable way across all runners. This isn't - // strictly required, but reduces the chances of cycles. + // docKeys is sorted to support stable iteration across all + // runners. This isn't strictly required, but reduces the chances + // of cycles. dkeys := t.docKeys() - sort.Sort(dkeys) revno := make(map[docKey]int64) info := txnInfo{} @@ -380,10 +379,10 @@ func (f *flusher) rescan(t *transaction, force bool) (revnos []int64, err error) panic(fmt.Errorf("rescanning transaction in invalid state: %q", t.State)) } - // Iterate in a stable way across all runners. This isn't - // strictly required, but reduces the chances of cycles. + // docKeys is sorted to support stable iteration across all + // runners. This isn't strictly required, but reduces the chances + // of cycles. dkeys := t.docKeys() - sort.Sort(dkeys) tt := t.token() if !force { From bf843b6b9424760c6e6c0014ff8eb6022c868bfc Mon Sep 17 00:00:00 2001 From: Gabriel Russell Date: Fri, 3 Oct 2014 14:26:15 -0400 Subject: [PATCH 072/305] better integrate the MONGODB-X509 authentication tests --- auth.go | 2 +- auth_test.go | 132 ------------------------------------ auth_x509_test.go | 85 +++++++++++++++++++++++ testdb/client.pem | 44 ++++++++++++ testdb/server.pem | 33 +++++++++ testdb/setup.sh | 19 +++++- testdb/supervisord-ssl.conf | 3 + 7 files changed, 182 insertions(+), 136 deletions(-) create mode 100644 auth_x509_test.go create mode 100644 testdb/client.pem create mode 100644 testdb/server.pem create mode 100644 testdb/supervisord-ssl.conf diff --git a/auth.go b/auth.go index 4e7292379..bbd13e371 100644 --- a/auth.go +++ b/auth.go @@ -233,7 +233,7 @@ func (socket *mongoSocket) loginClassic(cred Credential) error { } type authX509Cmd struct { - Authenticate int + Authenticate int // "authenticate" is the mongo command. The value, afaik, is not used. User string Mechanism string } diff --git a/auth_test.go b/auth_test.go index c704185c6..46e0d2b11 100644 --- a/auth_test.go +++ b/auth_test.go @@ -27,10 +27,8 @@ package mgo_test import ( - "crypto/tls" "flag" "fmt" - "net" "net/url" "os" "runtime" @@ -1094,133 +1092,3 @@ func windowsAppendPasswordToCredential(cred *mgo.Credential) { cred.Password = getWindowsKerberosPassword() } } - -var ( - x509Flag = flag.String("x509", "", "Test x509 authentication (depends on custom environment)") - x509Subject = "CN=localhost,OU=Client,O=MGO,L=MGO,ST=MGO,C=GO" - clientCert = `-----BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAwE2sl8YeTTSetwo9kykJ5mCZ/FtfPtn/0X4nOlTM2Qc/uWzA -sjSYoSV4UkuOiWjKQQH2EDeXaltshOo7F0oCY5ozVeQe+phe987iKTvLtf7NoXJD -KqNqR4Kb4ylbCrEky7+Xvw6yrrqw8qgWy+9VsrilR3q8LsETE9SBMtfp3BUaaNQp -peNm+iAhx3uZSv3mdzSLFSA/o61kAyG0scLExYDjo/7xyMNQoloLvNmx4Io160+y -lOz077/qqU620tmuDLRz1QdxK/bptmXTnsBCRxl+U8nzbwVZgWFENhXplbcN+SjN -LhdnvTiU2qFhgZmc7ZtCKdPIpx3W6pH9bx7kTwIDAQABAoIBAQCOQygyo8NY9FuS -J8ZDrvF+9+oS8fm1QorpDT2x/ngI+j7fSyAG9bgQRusLXpAVAWvWyb+iYa3nZbkT -X0DVys+XpcTifr+YPc7L3sYbIPxkKBsxm5kq2vfN7Uart7V9ZG1HOfblxdbUQpKT -AVzUA7vPWqATEC5VHEqjuerWlTqRr9YLZE/nkE7ICLISqdl4WDYfUYJwoXWfYkXQ -Lfl5Qh2leyri9S3urvDrhnURTQ1lM182IbTRA+9rUiFzsRW+9U4HPY7Ao2Itp8dr -GRP4rcq4TP+NcF0Ky64cNfKXCWmwqTBRFYAlTD6gwjN/s2BzvWD/2nlnc0DYAXrB -TgFCPk7xAoGBAOwuHICwwTxtzrdWjuRGU3RxL4eLEXedtL8yon/yPci3e+8eploX -1Fp0rEK2gIGDp/X8DiOtrKXih8XPusCwE/I3EvjHdI0RylLZXTPOp1Ei21dXRsiV -YxcF+d5s11q5tJtF+5ISUeIz2iSc9Z2LBnb8JDK1jcCRa5Q212q3ZWW5AoGBANBw -9CoMbxINLG1q0NvOOSwMKDk2OB+9JbQ5lwF4ijZl2I6qRoOCzQ3lBs0Qv/AeBjNR -SerDs2+eWnIBUbgSdiqcOKnXAI/Qbl1IkVFYV/2g9m6dgu1fNWNBv8NIYDHCLfDx -W3fpO5JMf+iE5XC4XqCfSBIME2yxPSGQjal6tB5HAoGAddYDzolhv/6hVoPPQ0F7 -PeuC5UOTcXSzy3k97kw0W0KAiStnoCengYIYuChKMVQ4ptgdTdvG+fTt/NnJuX2g -Vgb4ZjtNgVzQ70kX4VNH04lqmkcnP8iY6dHHexwezls9KwNdouGVDSEFw6K0QOgu -T4s5nDtNADkNzaMXE11xL7ECgYBoML3rstFmTY1ymB0Uck3jtaP5jR+axdpt7weL -Zax4qooILhcXL6++DUhMAt5ecTOaPTzci7xKw/Xj3MLzZs8IV5R/WQhf2sj/+gEh -jy5UijwEaNmEO74dAkWPoMLsvGpocMzO8JeldnXNTXi+0noCgfvtgXnIMAQlnfMh -z0LviwKBgQCg5KR9JC4iuKses7Kfv2YelcO8vOZkRzBu3NdRWMsiJQC+qfetgd57 -RjRjlRWd1WCHJ5Kmx3hkUaZZOrX5knqfsRW3Nl0I74xgWl7Bli2eSJ9VWl59bcd6 -DqphhY7/gcW+QZlhXpnqbf0W8jB2gPhTYERyCBoS9LfhZWZu/11wuQ== ------END RSA PRIVATE KEY----- ------BEGIN CERTIFICATE----- -MIICyTCCAjKgAwIBAgIBATANBgkqhkiG9w0BAQUFADBcMQswCQYDVQQGEwJHTzEM -MAoGA1UECBMDTUdPMQwwCgYDVQQHEwNNR08xDDAKBgNVBAoTA01HTzEPMA0GA1UE -CxMGU2VydmVyMRIwEAYDVQQDEwlsb2NhbGhvc3QwHhcNMTQwOTI0MTQwMzUzWhcN -MTUwOTI0MTQwMzUzWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECBMDTUdPMQwwCgYD -VQQHEwNNR08xDDAKBgNVBAoTA01HTzEPMA0GA1UECxMGQ2xpZW50MRIwEAYDVQQD -Ewlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDATayX -xh5NNJ63Cj2TKQnmYJn8W18+2f/Rfic6VMzZBz+5bMCyNJihJXhSS46JaMpBAfYQ -N5dqW2yE6jsXSgJjmjNV5B76mF73zuIpO8u1/s2hckMqo2pHgpvjKVsKsSTLv5e/ -DrKuurDyqBbL71WyuKVHerwuwRMT1IEy1+ncFRpo1Cml42b6ICHHe5lK/eZ3NIsV -ID+jrWQDIbSxwsTFgOOj/vHIw1CiWgu82bHgijXrT7KU7PTvv+qpTrbS2a4MtHPV -B3Er9um2ZdOewEJHGX5TyfNvBVmBYUQ2FemVtw35KM0uF2e9OJTaoWGBmZztm0Ip -08inHdbqkf1vHuRPAgMBAAGjFzAVMBMGA1UdJQQMMAoGCCsGAQUFBwMCMA0GCSqG -SIb3DQEBBQUAA4GBAJZD7idSIRzhGlJYARPKWnX2CxD4VVB0F5cH5Mlc2YnoUSU/ -rKuPZFuOYND3awKqez6K3rNb3+tQmNitmoOT8ImmX1uJKBo5w9tuo4B2MmLQcPMk -3fhPePuQCjtlArSmKVrNTrYPkyB9NwKS6q0+FzseFTw9ZJUIKiO9sSjMe+HP ------END CERTIFICATE----- -` - // The server cert is not used by the test. It is here for - // convenience to use for the mongod specificed in the - // `x509Flag` as the --sslPEMKeyFile and --sslCAFIle parameters. - serverCert = `-----BEGIN CERTIFICATE----- -MIIC+DCCAmGgAwIBAgIJAJ5pBAq2HXAsMA0GCSqGSIb3DQEBBQUAMFwxCzAJBgNV -BAYTAkdPMQwwCgYDVQQIEwNNR08xDDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdP -MQ8wDQYDVQQLEwZTZXJ2ZXIxEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xNDA5MjQx -MzUxMTBaFw0xNTA5MjQxMzUxMTBaMFwxCzAJBgNVBAYTAkdPMQwwCgYDVQQIEwNN -R08xDDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdPMQ8wDQYDVQQLEwZTZXJ2ZXIx -EjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA -pQ5wO2L23xMI4PzpVt/Ftvez82IvA9amwr3fUd7RjlYwiFsFeMnG24a4CUoOeKF0 -fpQWc9rmCs0EeP5ofZ2otOsfxoVWXZAZWdgauuwlYB6EeFaAMH3fxVH3IiH+21RR -q2w9sH/s4fqh5stavUfyPdVmCcb8NW0jD8jlqniJL0kCAwEAAaOBwTCBvjAdBgNV -HQ4EFgQUjyVWGMHBrmPDGwCY5VusHsKIpzIwgY4GA1UdIwSBhjCBg4AUjyVWGMHB -rmPDGwCY5VusHsKIpzKhYKReMFwxCzAJBgNVBAYTAkdPMQwwCgYDVQQIEwNNR08x -DDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdPMQ8wDQYDVQQLEwZTZXJ2ZXIxEjAQ -BgNVBAMTCWxvY2FsaG9zdIIJAJ5pBAq2HXAsMAwGA1UdEwQFMAMBAf8wDQYJKoZI -hvcNAQEFBQADgYEAa65TgDKp3SRUDNAILSuQOCEbenWh/DMPL4vTVgo/Dxd4emoO -7i8/4HMTa0XeYIVbAsxO+dqtxqt32IcV7DurmQozdUZ7q0ueJRXon6APnCN0IqPC -sF71w63xXfpmnvTAfQXi7x6TUAyAQ2nScHExAjzc000DF1dO/6+nIINqNQE= ------END CERTIFICATE----- ------BEGIN RSA PRIVATE KEY----- -MIICWwIBAAKBgQClDnA7YvbfEwjg/OlW38W297PzYi8D1qbCvd9R3tGOVjCIWwV4 -ycbbhrgJSg54oXR+lBZz2uYKzQR4/mh9nai06x/GhVZdkBlZ2Bq67CVgHoR4VoAw -fd/FUfciIf7bVFGrbD2wf+zh+qHmy1q9R/I91WYJxvw1bSMPyOWqeIkvSQIDAQAB -AoGABA9S22MXx2zkbwRJiQWAC3wURQxJM8L33xpkf9MHPIUKNJBolgwAhC3QIQpd -SMJP5z0lQDxGJEXesksvrsdN+vsgbleRfQsAIcY/rEhr9h8m6auM08f+69oIX32o -aTOWJJRofjbgzE5c/RijqhIaYGdq54a0EE9mAaODwZoa2/ECQQDRGrIRI5L3pdRA -yifDKNjvAFOk6TbdGe+J9zHFw4F7bA2In/b+rno9vrj+EanOevD8LRLzeFshzXrG -WQFzZ69/AkEAyhLSY7WNiQTeJWCwXawVnoSl5AMSRYFA/A2sEUokfORR5BS7gqvL -mmEKmvslnZp5qlMtM4AyrW2OaoGvE6sFNwJACB3xK5kl61cUli9Cu+CqCx0IIi6r -YonPMpvV4sdkD1ZycAtFmz1KoXr102b8IHfFQwS855aUcwt26Jwr4j70IQJAXv9+ -PTXq9hF9xiCwiTkPaNh/jLQM8PQU8uoSjIZIpRZJkWpVxNay/z7D15xeULuAmxxD -UcThDjtFCrkw75Qk/QJAFfcM+5r31R1RrBGM1QPKwDqkFTGsFKnMWuS/pXyLTTOv -I+In9ZJyA/R5zKeJZjM7xtZs0ANU9HpOpgespq6CvA== ------END RSA PRIVATE KEY-----` -) - -func (s *S) TestAuthx509Cred(c *C) { - if *x509Flag == "" { - c.Skip("no -x509") - } - - clientCert, err := tls.X509KeyPair([]byte(clientCert), []byte(clientCert)) - c.Assert(err, IsNil) - - tlsConfig := &tls.Config{ - // Isolating tests to client certs, don't care about server validation. - InsecureSkipVerify: true, - Certificates: []tls.Certificate{clientCert}, - } - - c.Logf("Connecting to %s...", *x509Flag) - session, err := mgo.DialWithInfo(&mgo.DialInfo{ - Addrs: []string{*x509Flag}, - DialServer: func(addr *mgo.ServerAddr) (net.Conn, error) { - return tls.Dial("tcp", addr.String(), tlsConfig) - }, - }) - c.Assert(err, IsNil) - defer session.Close() - - c.Logf("Connected! Testing the need for authentication...") - names, err := session.DatabaseNames() - c.Assert(err, ErrorMatches, "not authorized .*") - - cred := &mgo.Credential{ - Username: x509Subject, - Mechanism: "MONGODB-X509", - Source: "$external", - } - - c.Logf("Authenticating...") - err = session.Login(cred) - c.Assert(err, IsNil) - c.Logf("Authenticated!") - - names, err = session.DatabaseNames() - c.Assert(err, IsNil) - c.Assert(len(names) > 0, Equals, true) -} diff --git a/auth_x509_test.go b/auth_x509_test.go new file mode 100644 index 000000000..82ad43a33 --- /dev/null +++ b/auth_x509_test.go @@ -0,0 +1,85 @@ +package mgo_test + +import ( + "crypto/tls" + "flag" + "gopkg.in/mgo.v2" + "io/ioutil" + "net" + + . "gopkg.in/check.v1" +) + +/* to run this test: + you need to have an ssl enabled mongod + you need to setup the mongod's with "testdb/setup.sh start ssl" instead of using make + you need to run to "go test" with the -x509 flag +*/ + +var ( + x509Flag = flag.Bool("x509", false, "Test x509 authentication (depends on having an ssl enabled mongd)") + x509Subject = "CN=localhost,OU=Client,O=MGO,L=MGO,ST=MGO,C=GO" // this needs to be kept in sync with client.pem + x509AuthUrl = "localhost:40301" +) + +func (s *S) TestAuthx509Cred(c *C) { + if !*x509Flag { + c.Skip("no -x509") + } + + clientCertPEM, err := ioutil.ReadFile("testdb/client.pem") + c.Assert(err, IsNil) + + clientCert, err := tls.X509KeyPair(clientCertPEM, clientCertPEM) + c.Assert(err, IsNil) + + tlsConfig := &tls.Config{ + // Isolating tests to client certs, don't care about server validation. + InsecureSkipVerify: true, + Certificates: []tls.Certificate{clientCert}, + } + + c.Logf("Connecting to %s...", x509AuthUrl) + session, err := mgo.DialWithInfo(&mgo.DialInfo{ + Addrs: []string{x509AuthUrl}, + DialServer: func(addr *mgo.ServerAddr) (net.Conn, error) { + return tls.Dial("tcp", addr.String(), tlsConfig) + }, + }) + c.Assert(err, IsNil) + defer session.Close() + + adminDB := session.DB("admin") + var adminUser mgo.User = mgo.User{Username: "Admin", Password: "AdminPassword", Roles: []mgo.Role{mgo.RoleRoot}} + err = adminDB.UpsertUser(&adminUser) + c.Assert(err, IsNil) + + err = session.Login(&mgo.Credential{Username: "Admin", Password: "AdminPassword"}) + c.Assert(err, IsNil) + + externalDB := session.DB("$external") + var x509User mgo.User = mgo.User{Username: x509Subject, OtherDBRoles: map[string][]mgo.Role{"admin": []mgo.Role{mgo.RoleRoot}}} + err = externalDB.UpsertUser(&x509User) + c.Assert(err, IsNil) + + session.LogoutAll() + + c.Logf("Connected! Ensuring authentication is required...") + names, err := session.DatabaseNames() + c.Assert(err, ErrorMatches, "not authorized .*") + + cred := &mgo.Credential{ + Username: x509Subject, + Mechanism: "MONGODB-X509", + Source: "$external", + } + + c.Logf("Authenticating...") + err = session.Login(cred) + c.Assert(err, IsNil) + c.Logf("Authenticated!") + + names, err = session.DatabaseNames() + c.Assert(err, IsNil) + c.Assert(len(names) > 0, Equals, true) +} diff --git a/testdb/client.pem b/testdb/client.pem new file mode 100644 index 000000000..cc57eec7a --- /dev/null +++ b/testdb/client.pem @@ -0,0 +1,44 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAwE2sl8YeTTSetwo9kykJ5mCZ/FtfPtn/0X4nOlTM2Qc/uWzA +sjSYoSV4UkuOiWjKQQH2EDeXaltshOo7F0oCY5ozVeQe+phe987iKTvLtf7NoXJD +KqNqR4Kb4ylbCrEky7+Xvw6yrrqw8qgWy+9VsrilR3q8LsETE9SBMtfp3BUaaNQp +peNm+iAhx3uZSv3mdzSLFSA/o61kAyG0scLExYDjo/7xyMNQoloLvNmx4Io160+y +lOz077/qqU620tmuDLRz1QdxK/bptmXTnsBCRxl+U8nzbwVZgWFENhXplbcN+SjN +LhdnvTiU2qFhgZmc7ZtCKdPIpx3W6pH9bx7kTwIDAQABAoIBAQCOQygyo8NY9FuS +J8ZDrvF+9+oS8fm1QorpDT2x/ngI+j7fSyAG9bgQRusLXpAVAWvWyb+iYa3nZbkT +X0DVys+XpcTifr+YPc7L3sYbIPxkKBsxm5kq2vfN7Uart7V9ZG1HOfblxdbUQpKT +AVzUA7vPWqATEC5VHEqjuerWlTqRr9YLZE/nkE7ICLISqdl4WDYfUYJwoXWfYkXQ +Lfl5Qh2leyri9S3urvDrhnURTQ1lM182IbTRA+9rUiFzsRW+9U4HPY7Ao2Itp8dr +GRP4rcq4TP+NcF0Ky64cNfKXCWmwqTBRFYAlTD6gwjN/s2BzvWD/2nlnc0DYAXrB +TgFCPk7xAoGBAOwuHICwwTxtzrdWjuRGU3RxL4eLEXedtL8yon/yPci3e+8eploX +1Fp0rEK2gIGDp/X8DiOtrKXih8XPusCwE/I3EvjHdI0RylLZXTPOp1Ei21dXRsiV +YxcF+d5s11q5tJtF+5ISUeIz2iSc9Z2LBnb8JDK1jcCRa5Q212q3ZWW5AoGBANBw +9CoMbxINLG1q0NvOOSwMKDk2OB+9JbQ5lwF4ijZl2I6qRoOCzQ3lBs0Qv/AeBjNR +SerDs2+eWnIBUbgSdiqcOKnXAI/Qbl1IkVFYV/2g9m6dgu1fNWNBv8NIYDHCLfDx +W3fpO5JMf+iE5XC4XqCfSBIME2yxPSGQjal6tB5HAoGAddYDzolhv/6hVoPPQ0F7 +PeuC5UOTcXSzy3k97kw0W0KAiStnoCengYIYuChKMVQ4ptgdTdvG+fTt/NnJuX2g +Vgb4ZjtNgVzQ70kX4VNH04lqmkcnP8iY6dHHexwezls9KwNdouGVDSEFw6K0QOgu +T4s5nDtNADkNzaMXE11xL7ECgYBoML3rstFmTY1ymB0Uck3jtaP5jR+axdpt7weL +Zax4qooILhcXL6++DUhMAt5ecTOaPTzci7xKw/Xj3MLzZs8IV5R/WQhf2sj/+gEh +jy5UijwEaNmEO74dAkWPoMLsvGpocMzO8JeldnXNTXi+0noCgfvtgXnIMAQlnfMh +z0LviwKBgQCg5KR9JC4iuKses7Kfv2YelcO8vOZkRzBu3NdRWMsiJQC+qfetgd57 +RjRjlRWd1WCHJ5Kmx3hkUaZZOrX5knqfsRW3Nl0I74xgWl7Bli2eSJ9VWl59bcd6 +DqphhY7/gcW+QZlhXpnqbf0W8jB2gPhTYERyCBoS9LfhZWZu/11wuQ== +-----END RSA PRIVATE KEY----- +-----BEGIN CERTIFICATE----- +MIICyTCCAjKgAwIBAgIBATANBgkqhkiG9w0BAQUFADBcMQswCQYDVQQGEwJHTzEM +MAoGA1UECBMDTUdPMQwwCgYDVQQHEwNNR08xDDAKBgNVBAoTA01HTzEPMA0GA1UE +CxMGU2VydmVyMRIwEAYDVQQDEwlsb2NhbGhvc3QwHhcNMTQwOTI0MTQwMzUzWhcN +MTUwOTI0MTQwMzUzWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECBMDTUdPMQwwCgYD +VQQHEwNNR08xDDAKBgNVBAoTA01HTzEPMA0GA1UECxMGQ2xpZW50MRIwEAYDVQQD +Ewlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDATayX +xh5NNJ63Cj2TKQnmYJn8W18+2f/Rfic6VMzZBz+5bMCyNJihJXhSS46JaMpBAfYQ +N5dqW2yE6jsXSgJjmjNV5B76mF73zuIpO8u1/s2hckMqo2pHgpvjKVsKsSTLv5e/ +DrKuurDyqBbL71WyuKVHerwuwRMT1IEy1+ncFRpo1Cml42b6ICHHe5lK/eZ3NIsV +ID+jrWQDIbSxwsTFgOOj/vHIw1CiWgu82bHgijXrT7KU7PTvv+qpTrbS2a4MtHPV +B3Er9um2ZdOewEJHGX5TyfNvBVmBYUQ2FemVtw35KM0uF2e9OJTaoWGBmZztm0Ip +08inHdbqkf1vHuRPAgMBAAGjFzAVMBMGA1UdJQQMMAoGCCsGAQUFBwMCMA0GCSqG +SIb3DQEBBQUAA4GBAJZD7idSIRzhGlJYARPKWnX2CxD4VVB0F5cH5Mlc2YnoUSU/ +rKuPZFuOYND3awKqez6K3rNb3+tQmNitmoOT8ImmX1uJKBo5w9tuo4B2MmLQcPMk +3fhPePuQCjtlArSmKVrNTrYPkyB9NwKS6q0+FzseFTw9ZJUIKiO9sSjMe+HP +-----END CERTIFICATE----- diff --git a/testdb/server.pem b/testdb/server.pem new file mode 100644 index 000000000..16fbef16b --- /dev/null +++ b/testdb/server.pem @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIC+DCCAmGgAwIBAgIJAJ5pBAq2HXAsMA0GCSqGSIb3DQEBBQUAMFwxCzAJBgNV +BAYTAkdPMQwwCgYDVQQIEwNNR08xDDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdP +MQ8wDQYDVQQLEwZTZXJ2ZXIxEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xNDA5MjQx +MzUxMTBaFw0xNTA5MjQxMzUxMTBaMFwxCzAJBgNVBAYTAkdPMQwwCgYDVQQIEwNN +R08xDDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdPMQ8wDQYDVQQLEwZTZXJ2ZXIx +EjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA +pQ5wO2L23xMI4PzpVt/Ftvez82IvA9amwr3fUd7RjlYwiFsFeMnG24a4CUoOeKF0 +fpQWc9rmCs0EeP5ofZ2otOsfxoVWXZAZWdgauuwlYB6EeFaAMH3fxVH3IiH+21RR +q2w9sH/s4fqh5stavUfyPdVmCcb8NW0jD8jlqniJL0kCAwEAAaOBwTCBvjAdBgNV +HQ4EFgQUjyVWGMHBrmPDGwCY5VusHsKIpzIwgY4GA1UdIwSBhjCBg4AUjyVWGMHB +rmPDGwCY5VusHsKIpzKhYKReMFwxCzAJBgNVBAYTAkdPMQwwCgYDVQQIEwNNR08x +DDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdPMQ8wDQYDVQQLEwZTZXJ2ZXIxEjAQ +BgNVBAMTCWxvY2FsaG9zdIIJAJ5pBAq2HXAsMAwGA1UdEwQFMAMBAf8wDQYJKoZI +hvcNAQEFBQADgYEAa65TgDKp3SRUDNAILSuQOCEbenWh/DMPL4vTVgo/Dxd4emoO +7i8/4HMTa0XeYIVbAsxO+dqtxqt32IcV7DurmQozdUZ7q0ueJRXon6APnCN0IqPC +sF71w63xXfpmnvTAfQXi7x6TUAyAQ2nScHExAjzc000DF1dO/6+nIINqNQE= +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +MIICWwIBAAKBgQClDnA7YvbfEwjg/OlW38W297PzYi8D1qbCvd9R3tGOVjCIWwV4 +ycbbhrgJSg54oXR+lBZz2uYKzQR4/mh9nai06x/GhVZdkBlZ2Bq67CVgHoR4VoAw +fd/FUfciIf7bVFGrbD2wf+zh+qHmy1q9R/I91WYJxvw1bSMPyOWqeIkvSQIDAQAB +AoGABA9S22MXx2zkbwRJiQWAC3wURQxJM8L33xpkf9MHPIUKNJBolgwAhC3QIQpd +SMJP5z0lQDxGJEXesksvrsdN+vsgbleRfQsAIcY/rEhr9h8m6auM08f+69oIX32o +aTOWJJRofjbgzE5c/RijqhIaYGdq54a0EE9mAaODwZoa2/ECQQDRGrIRI5L3pdRA +yifDKNjvAFOk6TbdGe+J9zHFw4F7bA2In/b+rno9vrj+EanOevD8LRLzeFshzXrG +WQFzZ69/AkEAyhLSY7WNiQTeJWCwXawVnoSl5AMSRYFA/A2sEUokfORR5BS7gqvL +mmEKmvslnZp5qlMtM4AyrW2OaoGvE6sFNwJACB3xK5kl61cUli9Cu+CqCx0IIi6r +YonPMpvV4sdkD1ZycAtFmz1KoXr102b8IHfFQwS855aUcwt26Jwr4j70IQJAXv9+ +PTXq9hF9xiCwiTkPaNh/jLQM8PQU8uoSjIZIpRZJkWpVxNay/z7D15xeULuAmxxD +UcThDjtFCrkw75Qk/QJAFfcM+5r31R1RrBGM1QPKwDqkFTGsFKnMWuS/pXyLTTOv +I+In9ZJyA/R5zKeJZjM7xtZs0ANU9HpOpgespq6CvA== +-----END RSA PRIVATE KEY----- diff --git a/testdb/setup.sh b/testdb/setup.sh index 27200c090..d75e24b87 100755 --- a/testdb/setup.sh +++ b/testdb/setup.sh @@ -4,7 +4,20 @@ start() { mkdir _testdb cd _testdb mkdir db1 db2 rs1a rs1b rs1c rs2a rs2b rs2c rs3a rs3b rs3c rs4a cfg1 cfg2 cfg3 - ln -s ../testdb/supervisord.conf supervisord.conf + cp "../testdb/supervisord.conf" supervisord.conf + if [ -n "$1" ]; then + case "$1" in + -ssl) + mkdir "ssl1" + cat "../testdb/supervisord-ssl.conf" >> supervisord.conf + ;; + *) + echo "unknown setup.sh option $1" + exit 1 + ;; + esac + fi + ln -s ../testdb/server.pem server.pem echo keyfile > keyfile chmod 600 keyfile echo "Running supervisord..." @@ -42,11 +55,11 @@ fi case "$1" in start) - start + start $2 ;; stop) - stop + stop $2 ;; esac diff --git a/testdb/supervisord-ssl.conf b/testdb/supervisord-ssl.conf new file mode 100644 index 000000000..1925b189d --- /dev/null +++ b/testdb/supervisord-ssl.conf @@ -0,0 +1,3 @@ + +[program:ssl1] +command = mongod --sslMode requireSSL --auth --sslCAFile %(here)s/server.pem --sslPEMKeyFile %(here)s/server.pem --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --dbpath %(here)s/ssl1 --bind_ip=127.0.0.1 --port 40301 From 336cb44cb6869395919de31e4557bd235ca2949e Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Wed, 8 Oct 2014 11:44:34 -0400 Subject: [PATCH 073/305] Use defer to free buffers as per Gustavo's suggestion --- sasl/sasl_windows.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/sasl/sasl_windows.go b/sasl/sasl_windows.go index 82e904cd9..137e953da 100644 --- a/sasl/sasl_windows.go +++ b/sasl/sasl_windows.go @@ -93,9 +93,6 @@ func (ss *saslSession) Close() { for _, cstr := range ss.stringsToFree { C.free(unsafe.Pointer(cstr)) } - for _, cbuf := range ss.buffersToFree { - C.free(unsafe.Pointer(cbuf)) - } } func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, err error) { @@ -113,11 +110,12 @@ func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, er if ss.authComplete { // Step 3: last bit of magic to use the correct server credentials status = C.sspi_send_client_authz_id(&ss.context, &buffer, &bufferLength, ss.cstr(ss.userPlusRealm)) - ss.buffersToFree = append(ss.buffersToFree, buffer) } else { // Step 1 + Step 2: set up security context with the server and TGT status = C.sspi_step(&ss.credHandle, ss.hasContext, &ss.context, &buffer, &bufferLength, ss.cstr(ss.target)) - ss.buffersToFree = append(ss.buffersToFree, buffer) + } + if buffer != C.PVOID(nil) { + defer C.free(unsafe.Pointer(buffer)) } if status != C.SEC_E_OK && status != C.SEC_I_CONTINUE_NEEDED { ss.errored = true From f1948f69dec6d2ed7f1096386e94d3bb4802900b Mon Sep 17 00:00:00 2001 From: Valeri Karpov Date: Wed, 8 Oct 2014 11:45:45 -0400 Subject: [PATCH 074/305] Get rid of unused buffersToFree --- sasl/sasl_windows.go | 1 - 1 file changed, 1 deletion(-) diff --git a/sasl/sasl_windows.go b/sasl/sasl_windows.go index 137e953da..3302cfe05 100644 --- a/sasl/sasl_windows.go +++ b/sasl/sasl_windows.go @@ -36,7 +36,6 @@ type saslSession struct { // Keep track of pointers we need to explicitly free stringsToFree []*C.char - buffersToFree []C.PVOID } var initError error From 1a850554ad40e451d7dc44587710219e85cc0c37 Mon Sep 17 00:00:00 2001 From: waigani Date: Tue, 7 Oct 2014 18:26:06 +0200 Subject: [PATCH 075/305] Support for sorting struct document keys --- txn/dockey_test.go | 210 +++++++++++++++++++++++++++++++++++++++++++++ txn/flusher.go | 7 +- txn/txn.go | 159 ++++++++++++++++++++++++++-------- txn/txn_test.go | 25 ++++++ 4 files changed, 363 insertions(+), 38 deletions(-) create mode 100644 txn/dockey_test.go diff --git a/txn/dockey_test.go b/txn/dockey_test.go new file mode 100644 index 000000000..2b2412ca9 --- /dev/null +++ b/txn/dockey_test.go @@ -0,0 +1,210 @@ +package txn + +import ( + "sort" + "testing" + + . "gopkg.in/check.v1" +) + +func TestAll(t *testing.T) { + TestingT(t) +} + +type DocKeySuite struct{} + +var _ = Suite(&DocKeySuite{}) + +type T struct { + A int + B string +} + +type T2 struct { + A int + B string +} + +type T3 struct { + A int + B string +} + +type T4 struct { + A int + B string +} + +type T5 struct { + F int + Q string +} + +type T6 struct { + A int + B string +} + +type T7 struct { + A bool + B float64 +} + +type T8 struct { + A int + B string +} + +type T9 struct { + A int + B string + C bool +} + +type T10 struct { + C int `bson:"a"` + D string `bson:"b,omitempty"` +} + +type T11 struct { + C int + D string +} + +type T12 struct { + S string +} + +type T13 struct { + p, q, r bool + S string +} + +var docKeysTests = [][]docKeys{ + {{ + {"c", 1}, + {"c", 5}, + {"c", 2}, + }, { + {"c", 1}, + {"c", 2}, + {"c", 5}, + }}, {{ + {"c", "foo"}, + {"c", "bar"}, + {"c", "bob"}, + }, { + {"c", "bar"}, + {"c", "bob"}, + {"c", "foo"}, + }}, {{ + {"c", 0.2}, + {"c", 0.07}, + {"c", 0.9}, + }, { + {"c", 0.07}, + {"c", 0.2}, + {"c", 0.9}, + }}, {{ + {"c", true}, + {"c", false}, + {"c", true}, + }, { + {"c", false}, + {"c", true}, + {"c", true}, + }}, {{ + {"c", T{1, "b"}}, + {"c", T{1, "a"}}, + {"c", T{0, "b"}}, + {"c", T{0, "a"}}, + }, { + {"c", T{0, "a"}}, + {"c", T{0, "b"}}, + {"c", T{1, "a"}}, + {"c", T{1, "b"}}, + }}, {{ + {"c", T{1, "a"}}, + {"c", T{0, "a"}}, + }, { + {"c", T{0, "a"}}, + {"c", T{1, "a"}}, + }}, {{ + {"c", T3{0, "b"}}, + {"c", T2{1, "b"}}, + {"c", T3{1, "a"}}, + {"c", T2{0, "a"}}, + }, { + {"c", T2{0, "a"}}, + {"c", T3{0, "b"}}, + {"c", T3{1, "a"}}, + {"c", T2{1, "b"}}, + }}, {{ + {"c", T5{1, "b"}}, + {"c", T4{1, "b"}}, + {"c", T5{0, "a"}}, + {"c", T4{0, "a"}}, + }, { + {"c", T4{0, "a"}}, + {"c", T5{0, "a"}}, + {"c", T4{1, "b"}}, + {"c", T5{1, "b"}}, + }}, {{ + {"c", T6{1, "b"}}, + {"c", T7{true, 0.2}}, + {"c", T6{0, "a"}}, + {"c", T7{false, 0.04}}, + }, { + {"c", T6{0, "a"}}, + {"c", T6{1, "b"}}, + {"c", T7{false, 0.04}}, + {"c", T7{true, 0.2}}, + }}, {{ + {"c", T9{1, "b", true}}, + {"c", T8{1, "b"}}, + {"c", T9{0, "a", false}}, + {"c", T8{0, "a"}}, + }, { + {"c", T9{0, "a", false}}, + {"c", T8{0, "a"}}, + {"c", T9{1, "b", true}}, + {"c", T8{1, "b"}}, + }}, {{ + {"b", 2}, + {"a", 5}, + {"c", 2}, + {"b", 1}, + }, { + {"a", 5}, + {"b", 1}, + {"b", 2}, + {"c", 2}, + }}, {{ + {"c", T11{1, "a"}}, + {"c", T11{1, "a"}}, + {"c", T10{1, "a"}}, + }, { + {"c", T10{1, "a"}}, + {"c", T11{1, "a"}}, + {"c", T11{1, "a"}}, + }}, {{ + {"c", T12{"a"}}, + {"c", T13{false, true, false, "a"}}, + {"c", T12{"b"}}, + {"c", T13{false, true, false, "b"}}, + }, { + {"c", T12{"a"}}, + {"c", T13{false, true, false, "a"}}, + {"c", T12{"b"}}, + {"c", T13{false, true, false, "b"}}, + }}, +} + +func (s *DocKeySuite) TestSort(c *C) { + for _, test := range docKeysTests { + keys := test[0] + expected := test[1] + sort.Sort(keys) + c.Check(keys, DeepEquals, expected) + } +} diff --git a/txn/flusher.go b/txn/flusher.go index bc39ec69f..25b2f0319 100644 --- a/txn/flusher.go +++ b/txn/flusher.go @@ -224,9 +224,8 @@ func (f *flusher) prepare(t *transaction, force bool) (revnos []int64, err error } f.debugf("Preparing %s", t) - // docKeys is sorted to support stable iteration across all - // runners. This isn't strictly required, but reduces the chances - // of cycles. + // dkeys being sorted means stable iteration across all runners. This + // isn't strictly required, but reduces the chances of cycles. dkeys := t.docKeys() revno := make(map[docKey]int64) @@ -379,7 +378,7 @@ func (f *flusher) rescan(t *transaction, force bool) (revnos []int64, err error) panic(fmt.Errorf("rescanning transaction in invalid state: %q", t.State)) } - // docKeys is sorted to support stable iteration across all + // dkeys being sorted means stable iteration across all // runners. This isn't strictly required, but reduces the chances // of cycles. dkeys := t.docKeys() diff --git a/txn/txn.go b/txn/txn.go index a235f9032..5809e2d3a 100644 --- a/txn/txn.go +++ b/txn/txn.go @@ -9,12 +9,14 @@ package txn import ( "encoding/binary" "fmt" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" "reflect" "sort" + "strings" "sync" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + crand "crypto/rand" mrand "math/rand" ) @@ -455,38 +457,6 @@ func (r *Runner) load(id bson.ObjectId) (*transaction, error) { return &t, nil } -type docKey struct { - C string - Id interface{} -} - -type docKeys []docKey - -func (ks docKeys) Len() int { return len(ks) } -func (ks docKeys) Swap(i, j int) { ks[i], ks[j] = ks[j], ks[i] } -func (ks docKeys) Less(i, j int) bool { - a, b := ks[i], ks[j] - if a.C != b.C { - return a.C < b.C - } - av, an := valueNature(a.Id) - bv, bn := valueNature(b.Id) - if an != bn { - return an < bn - } - switch an { - case natureString: - return av.(string) < bv.(string) - case natureInt: - return av.(int64) < bv.(int64) - case natureFloat: - return av.(float64) < bv.(float64) - case natureBool: - return !av.(bool) && bv.(bool) - } - panic("unreachable") -} - type typeNature int const ( @@ -498,6 +468,7 @@ const ( natureInt natureFloat natureBool + natureStruct ) func valueNature(v interface{}) (value interface{}, nature typeNature) { @@ -513,6 +484,126 @@ func valueNature(v interface{}) (value interface{}, nature typeNature) { return rv.Float(), natureFloat case reflect.Bool: return rv.Bool(), natureBool + case reflect.Struct: + return v, natureStruct } panic("document id type unsupported by txn: " + rv.Kind().String()) } + +type docKey struct { + C string + Id interface{} +} + +type docKeys []docKey + +func (ks docKeys) Len() int { return len(ks) } +func (ks docKeys) Swap(i, j int) { ks[i], ks[j] = ks[j], ks[i] } +func (ks docKeys) Less(i, j int) bool { + a, b := ks[i], ks[j] + if a.C != b.C { + return a.C < b.C + } + return valuecmp(a.Id, b.Id) == -1 +} + +func valuecmp(a, b interface{}) int { + av, an := valueNature(a) + bv, bn := valueNature(b) + if an < bn { + return -1 + } + if an > bn { + return 1 + } + + if av == bv { + return 0 + } + var less bool + switch an { + case natureString: + less = av.(string) < bv.(string) + case natureInt: + less = av.(int64) < bv.(int64) + case natureFloat: + less = av.(float64) < bv.(float64) + case natureBool: + less = !av.(bool) && bv.(bool) + case natureStruct: + less = structcmp(av, bv) == -1 + default: + panic("unreachable") + } + if less { + return -1 + } + return 1 +} + +func structcmp(a, b interface{}) int { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + var ai, bi = 0, 0 + var an, bn = av.NumField(), bv.NumField() + var avi, bvi interface{} + var af, bf reflect.StructField + for { + for ai < an { + af = av.Type().Field(ai) + if isExported(af.Name) { + avi = av.Field(ai).Interface() + ai++ + break + } + ai++ + } + for bi < bn { + bf = bv.Type().Field(bi) + if isExported(bf.Name) { + bvi = bv.Field(bi).Interface() + bi++ + break + } + bi++ + } + if n := valuecmp(avi, bvi); n != 0 { + return n + } + nameA := getFieldName(af) + nameB := getFieldName(bf) + if nameA < nameB { + return -1 + } + if nameA > nameB { + return 1 + } + if ai == an && bi == bn { + return 0 + } + if ai == an || bi == bn { + if ai == bn { + return -1 + } + return 1 + } + } + panic("unreachable") +} + +func isExported(name string) bool { + a := name[0] + return a >= 'A' && a <= 'Z' +} + +func getFieldName(f reflect.StructField) string { + name := f.Tag.Get("bson") + if i := strings.Index(name, ","); i >= 0 { + name = name[:i] + } + if name == "" { + name = strings.ToLower(f.Name) + } + return name +} diff --git a/txn/txn_test.go b/txn/txn_test.go index 119bf21d6..0753c16a8 100644 --- a/txn/txn_test.go +++ b/txn/txn_test.go @@ -107,6 +107,31 @@ func (s *S) TestInsert(c *C) { c.Assert(account.Balance, Equals, 200) } +func (s *S) TestInsertStructID(c *C) { + type id struct { + FirstName string + LastName string + } + ops := []txn.Op{{ + C: "accounts", + Id: id{FirstName: "John", LastName: "Jones"}, + Assert: txn.DocMissing, + Insert: M{"balance": 200}, + }, { + C: "accounts", + Id: id{FirstName: "Sally", LastName: "Smith"}, + Assert: txn.DocMissing, + Insert: M{"balance": 800}, + }} + + err := s.runner.Run(ops, "", nil) + c.Assert(err, IsNil) + + n, err := s.accounts.Find(nil).Count() + c.Assert(err, IsNil) + c.Assert(n, Equals, 2) +} + func (s *S) TestRemove(c *C) { err := s.accounts.Insert(M{"_id": 0, "balance": 300}) c.Assert(err, IsNil) From ddf6e7e4ce586da68a5d5f1fda7834131202da1c Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sat, 11 Oct 2014 14:39:48 +0200 Subject: [PATCH 076/305] SCRAM tests work without external server on 2.7.7. --- auth_test.go | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/auth_test.go b/auth_test.go index 46e0d2b11..62e7ebbfb 100644 --- a/auth_test.go +++ b/auth_test.go @@ -842,12 +842,9 @@ func (s *S) TestAuthDirectWithLogin(c *C) { } } -// TODO SCRAM-SHA-1 will become the default, and this flag will go away. -var scramFlag = flag.String("scram", "", "Host to test SCRAM-SHA-1 authentication against (depends on custom environment)") - func (s *S) TestAuthScramSha1Cred(c *C) { - if *scramFlag == "" { - c.Skip("no -plain") + if !s.versionAtLeast(2, 7, 7) { + c.Skip("SCRAM-SHA-1 tests depend on 2.7.7") } cred := &mgo.Credential{ Username: "root", @@ -855,8 +852,9 @@ func (s *S) TestAuthScramSha1Cred(c *C) { Mechanism: "SCRAM-SHA-1", Source: "admin", } - c.Logf("Connecting to %s...", *scramFlag) - session, err := mgo.Dial(*scramFlag) + host := "localhost:40002" + c.Logf("Connecting to %s...", host) + session, err := mgo.Dial(host) c.Assert(err, IsNil) defer session.Close() @@ -876,6 +874,23 @@ func (s *S) TestAuthScramSha1Cred(c *C) { c.Assert(err, Equals, mgo.ErrNotFound) } +func (s *S) TestAuthScramSha1URL(c *C) { + if !s.versionAtLeast(2, 7, 7) { + c.Skip("SCRAM-SHA-1 tests depend on 2.7.7") + } + host := "localhost:40002" + c.Logf("Connecting to %s...", host) + session, err := mgo.Dial(fmt.Sprintf("root:rapadura@%s?authMechanism=SCRAM-SHA-1", host)) + c.Assert(err, IsNil) + defer session.Close() + + mycoll := session.DB("admin").C("mycoll") + + c.Logf("Connected! Testing the need for authentication...") + err = mycoll.Find(nil).One(nil) + c.Assert(err, Equals, mgo.ErrNotFound) +} + var ( plainFlag = flag.String("plain", "", "Host to test PLAIN authentication against (depends on custom environment)") plainUser = "einstein" From bcdc975962af080342f4b36789fbbe43ec102cf0 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sat, 11 Oct 2014 15:02:39 +0200 Subject: [PATCH 077/305] Default to SCRAM-SHA-1 auth if MaxWireVersion >= 3. --- auth.go | 3 +++ auth_test.go | 4 ++-- cluster.go | 22 ++++++++++++---------- server.go | 7 ++++--- 4 files changed, 21 insertions(+), 15 deletions(-) diff --git a/auth.go b/auth.go index a0fe5146b..39d6429b3 100644 --- a/auth.go +++ b/auth.go @@ -160,6 +160,9 @@ func (socket *mongoSocket) resetNonce() { func (socket *mongoSocket) Login(cred Credential) error { socket.Lock() + if cred.Mechanism == "" && socket.serverInfo.MaxWireVersion >= 3 { + cred.Mechanism = "SCRAM-SHA-1" + } for _, sockCred := range socket.creds { if sockCred == cred { debugf("Socket %p to %s: login: db=%q user=%q (already logged in)", socket, socket.addr, cred.Source, cred.Username) diff --git a/auth_test.go b/auth_test.go index 62e7ebbfb..32f10d5ec 100644 --- a/auth_test.go +++ b/auth_test.go @@ -53,7 +53,7 @@ func (s *S) TestAuthLoginDatabase(c *C) { admindb := session.DB("admin") err = admindb.Login("root", "wrong") - c.Assert(err, ErrorMatches, "auth fail(s|ed)") + c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") err = admindb.Login("root", "rapadura") c.Assert(err, IsNil) @@ -79,7 +79,7 @@ func (s *S) TestAuthLoginSession(c *C) { Password: "wrong", } err = session.Login(&cred) - c.Assert(err, ErrorMatches, "auth fail(s|ed)") + c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") cred.Password = "rapadura" diff --git a/cluster.go b/cluster.go index 10db6372d..104dd3988 100644 --- a/cluster.go +++ b/cluster.go @@ -124,13 +124,14 @@ func (cluster *mongoCluster) removeServer(server *mongoServer) { } type isMasterResult struct { - IsMaster bool - Secondary bool - Primary string - Hosts []string - Passives []string - Tags bson.D - Msg string + IsMaster bool + Secondary bool + Primary string + Hosts []string + Passives []string + Tags bson.D + Msg string + MaxWireVersion int `bson:"maxWireVersion"` } func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResult) error { @@ -214,9 +215,10 @@ func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerI } info = &mongoServerInfo{ - Master: result.IsMaster, - Mongos: result.Msg == "isdbgrid", - Tags: result.Tags, + Master: result.IsMaster, + Mongos: result.Msg == "isdbgrid", + Tags: result.Tags, + MaxWireVersion: result.MaxWireVersion, } hosts = make([]string, 0, 1+len(result.Hosts)+len(result.Passives)) diff --git a/server.go b/server.go index eb89dfd56..8c130bed9 100644 --- a/server.go +++ b/server.go @@ -67,9 +67,10 @@ func (dial dialer) isSet() bool { } type mongoServerInfo struct { - Master bool - Mongos bool - Tags bson.D + Master bool + Mongos bool + Tags bson.D + MaxWireVersion int } var defaultServerInfo mongoServerInfo From b94fbad1b1e70e4238fd71a6b9cad17f46a34592 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sat, 11 Oct 2014 16:14:50 +0200 Subject: [PATCH 078/305] Decode document fields into custom bson.D types. Requested by Daniel Gottlieb. --- bson/bson_test.go | 1 + bson/decode.go | 18 ++++++++++-------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/bson/bson_test.go b/bson/bson_test.go index 0edcb099b..86a5022a3 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -1226,6 +1226,7 @@ var twoWayCrossItems = []crossTypeItem{ // bson.D <=> []DocElem {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}}, {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &MyD{{"a", MyD{{"b", 1}, {"c", 2}}}}}, + {&struct{ V MyD }{MyD{{"a", 1}}}, &bson.D{{"v", bson.D{{"a", 1}}}}}, // bson.RawD <=> []RawDocElem {&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}}, diff --git a/bson/decode.go b/bson/decode.go index f1c8b4f7c..488386498 100644 --- a/bson/decode.go +++ b/bson/decode.go @@ -438,16 +438,18 @@ func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { switch out.Kind() { case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map: d.readDocTo(out) - default: - switch out.Interface().(type) { - case D: - out.Set(d.readDocElems(out.Type())) - case RawD: - out.Set(d.readRawDocElems(out.Type())) - default: - d.readDocTo(blackHole) + return true + case reflect.Slice: + outt := out.Type() + switch outt.Elem() { + case typeDocElem: + out.Set(d.readDocElems(outt)) + case typeRawDocElem: + out.Set(d.readRawDocElems(outt)) } + return true } + d.readDocTo(blackHole) return true } From 69a1a6993d300b5a9ebf6087b62efd4e050e7093 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sat, 11 Oct 2014 16:39:54 +0200 Subject: [PATCH 079/305] Add Pipe.Explain. --- auth_test.go | 8 ++++---- session.go | 30 ++++++++++++++++++++++++++++-- session_test.go | 20 ++++++++++++++++++++ 3 files changed, 52 insertions(+), 6 deletions(-) diff --git a/auth_test.go b/auth_test.go index 32f10d5ec..75722fe72 100644 --- a/auth_test.go +++ b/auth_test.go @@ -241,7 +241,7 @@ func (s *S) TestAuthUpsertUser(c *C) { // Can't login directly into the database using UserSource, though. err = myotherdb.Login("myrwuser", "mypass") - c.Assert(err, ErrorMatches, "auth fail(s|ed)") + c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") } func (s *S) TestAuthUpsertUserOtherDBRoles(c *C) { @@ -386,7 +386,7 @@ func (s *S) TestAuthAddUserReplaces(c *C) { admindb.Logout() err = mydb.Login("myuser", "myoldpass") - c.Assert(err, ErrorMatches, "auth fail(s|ed)") + c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") err = mydb.Login("myuser", "mynewpass") c.Assert(err, IsNil) @@ -413,7 +413,7 @@ func (s *S) TestAuthRemoveUser(c *C) { c.Assert(err, Equals, mgo.ErrNotFound) err = mydb.Login("myuser", "mypass") - c.Assert(err, ErrorMatches, "auth fail(s|ed)") + c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") } func (s *S) TestAuthLoginTwiceDoesNothing(c *C) { @@ -731,7 +731,7 @@ func (s *S) TestAuthURLWrongCredentials(c *C) { if session != nil { session.Close() } - c.Assert(err, ErrorMatches, "auth fail(s|ed)") + c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.") c.Assert(session, IsNil) } diff --git a/session.go b/session.go index 179d06168..4e2368edb 100644 --- a/session.go +++ b/session.go @@ -1793,6 +1793,12 @@ type Pipe struct { pipeline interface{} } +type pipeCmd struct { + Aggregate string + Pipeline interface{} + Explain bool ",omitempty" +} + // Pipe prepares a pipeline to aggregate. The pipeline document // must be a slice built in terms of the aggregation framework language. // @@ -1826,7 +1832,8 @@ func (p *Pipe) Iter() *Iter { iter.gotReply.L = &iter.m var result struct{ Result []bson.Raw } c := p.collection - iter.err = c.Database.Run(bson.D{{"aggregate", c.Name}, {"pipeline", p.pipeline}}, &result) + cmd := pipeCmd{Aggregate: c.Name, Pipeline: p.pipeline} + iter.err = c.Database.Run(cmd, &result) if iter.err != nil { return iter } @@ -1855,6 +1862,25 @@ func (p *Pipe) One(result interface{}) error { return ErrNotFound } +// Explain returns a number of details about how the MongoDB server would +// execute the requested pipeline, such as the number of objects examined, +// the number of times the read lock was yielded to allow writes to go in, +// and so on. +// +// For example: +// +// var m bson.M +// err := collection.Pipe(pipeline).Explain(&m) +// if err == nil { +// fmt.Printf("Explain: %#v\n", m) +// } +// +func (p *Pipe) Explain(result interface{}) error { + c := p.collection + cmd := pipeCmd{c.Name, p.pipeline, true} + return c.Database.Run(cmd, result) +} + type LastError struct { Err string Code, N, Waited int @@ -2241,7 +2267,7 @@ func (q *Query) Sort(fields ...string) *Query { // Explain returns a number of details about how the MongoDB server would // execute the requested query, such as the number of objects examined, -// the number of time the read lock was yielded to allow writes to go in, +// the number of times the read lock was yielded to allow writes to go in, // and so on. // // For example: diff --git a/session_test.go b/session_test.go index 874303429..caeb97df2 100644 --- a/session_test.go +++ b/session_test.go @@ -3171,6 +3171,26 @@ func (s *S) TestPipeOne(c *C) { c.Assert(err, Equals, mgo.ErrNotFound) } +func (s *S) TestPipeExplain(c *C) { + if !s.versionAtLeast(2, 1) { + c.Skip("Pipe only works on 2.1+") + } + + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + coll.Insert(M{"a": 1, "b": 2}) + + pipe := coll.Pipe([]M{{"$project": M{"a": 1, "b": M{"$add": []interface{}{"$b", 1}}}}}) + + var result bson.M + err = pipe.Explain(&result) + c.Assert(err, IsNil) + c.Assert(result["stages"], NotNil) +} + func (s *S) TestBatch1Bug(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) From 3422e857affb266507f9813e14b8bc08f67a1c55 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sat, 11 Oct 2014 16:56:29 +0200 Subject: [PATCH 080/305] Add Pipe.AllowDiskUse. --- session.go | 24 ++++++++++++++++++++++-- session_test.go | 7 ++++++- 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/session.go b/session.go index 4e2368edb..6f511bd44 100644 --- a/session.go +++ b/session.go @@ -1223,6 +1223,7 @@ func indexFromSpec(spec indexSpec) Index { } type indexSlice []Index + func (idxs indexSlice) Len() int { return len(idxs) } func (idxs indexSlice) Less(i, j int) bool { return idxs[i].Name < idxs[j].Name } func (idxs indexSlice) Swap(i, j int) { idxs[i], idxs[j] = idxs[j], idxs[i] } @@ -1791,11 +1792,13 @@ type Pipe struct { session *Session collection *Collection pipeline interface{} + allowDisk bool } type pipeCmd struct { Aggregate string Pipeline interface{} + AllowDisk bool "allowDiskUse,omitempty" Explain bool ",omitempty" } @@ -1832,7 +1835,12 @@ func (p *Pipe) Iter() *Iter { iter.gotReply.L = &iter.m var result struct{ Result []bson.Raw } c := p.collection - cmd := pipeCmd{Aggregate: c.Name, Pipeline: p.pipeline} + cmd := pipeCmd{ + c.Name, + p.pipeline, + p.allowDisk, + false, + } iter.err = c.Database.Run(cmd, &result) if iter.err != nil { return iter @@ -1877,10 +1885,22 @@ func (p *Pipe) One(result interface{}) error { // func (p *Pipe) Explain(result interface{}) error { c := p.collection - cmd := pipeCmd{c.Name, p.pipeline, true} + cmd := pipeCmd{ + c.Name, + p.pipeline, + p.allowDisk, + true, + } return c.Database.Run(cmd, result) } +// AllowDiskUse enables writing to the "/_tmp" server directory so +// that aggregation pipelines do not have to be held entirely in memory. +func (p *Pipe) AllowDiskUse() *Pipe { + p.allowDisk = true + return p +} + type LastError struct { Err string Code, N, Waited int diff --git a/session_test.go b/session_test.go index caeb97df2..e13c8b6dd 100644 --- a/session_test.go +++ b/session_test.go @@ -3109,7 +3109,12 @@ func (s *S) TestPipeIter(c *C) { coll.Insert(M{"n": n}) } - iter := coll.Pipe([]M{{"$match": M{"n": M{"$gte": 42}}}}).Iter() + pipe := coll.Pipe([]M{{"$match": M{"n": M{"$gte": 42}}}}) + + // Smoke test for AllowDiskUse. + pipe.AllowDiskUse() + + iter := pipe.Iter() result := struct{ N int }{} for i := 2; i < 7; i++ { ok := iter.Next(&result) From e77699b842dbd45ca4ee6388776f3e71570e99b1 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sat, 11 Oct 2014 18:16:12 +0200 Subject: [PATCH 081/305] Support cursor-based pipeline iteration. --- session.go | 89 +++++++++++++++++++++++++++++++++++++++++-------- session_test.go | 8 +++-- 2 files changed, 81 insertions(+), 16 deletions(-) diff --git a/session.go b/session.go index 6f511bd44..94a850e03 100644 --- a/session.go +++ b/session.go @@ -1793,13 +1793,19 @@ type Pipe struct { collection *Collection pipeline interface{} allowDisk bool + batchSize int } type pipeCmd struct { Aggregate string Pipeline interface{} - AllowDisk bool "allowDiskUse,omitempty" - Explain bool ",omitempty" + Cursor *pipeCmdCursor ",omitempty" + Explain bool ",omitempty" + AllowDisk bool "allowDiskUse,omitempty" +} + +type pipeCmdCursor struct { + BatchSize int `bson:"batchSize,omitempty"` } // Pipe prepares a pipeline to aggregate. The pipeline document @@ -1818,35 +1824,80 @@ type pipeCmd struct { // func (c *Collection) Pipe(pipeline interface{}) *Pipe { session := c.Database.Session + session.m.Lock() + batchSize := int(session.queryConfig.op.limit) + session.m.Unlock() return &Pipe{ session: session, collection: c, pipeline: pipeline, + batchSize: batchSize, } } // Iter executes the pipeline and returns an iterator capable of going // over all the generated results. func (p *Pipe) Iter() *Iter { + + // Clone session and set it to strong mode so that the server + // used for the query may be safely obtained afterwards, if + // necessary for iteration when a cursor is received. + cloned := p.session.Clone() + cloned.SetMode(Strong, false) + defer cloned.Close() + c := p.collection.With(cloned) + iter := &Iter{ session: p.session, timeout: -1, } iter.gotReply.L = &iter.m - var result struct{ Result []bson.Raw } - c := p.collection + + var result struct { + // 2.4, no cursors. + Result []bson.Raw + + // 2.6+, with cursors. + Cursor struct { + FirstBatch []bson.Raw "firstBatch" + Id int64 + } + } + cmd := pipeCmd{ - c.Name, - p.pipeline, - p.allowDisk, - false, + Aggregate: c.Name, + Pipeline: p.pipeline, + AllowDisk: p.allowDisk, + Cursor: &pipeCmdCursor{p.batchSize}, } iter.err = c.Database.Run(cmd, &result) + if e, ok := iter.err.(*QueryError); ok && e.Message == `unrecognized field "cursor` { + cmd.Cursor = nil + cmd.AllowDisk = false + iter.err = c.Database.Run(cmd, &result) + } if iter.err != nil { return iter } - for i := range result.Result { - iter.docData.Push(result.Result[i].Data) + docs := result.Result + if docs == nil { + docs = result.Cursor.FirstBatch + } + for i := range docs { + iter.docData.Push(docs[i].Data) + } + if result.Cursor.Id != 0 { + socket, err := cloned.acquireSocket(true) + if err != nil { + // Cloned session is in strong mode, and the query + // above succeeded. Should have a reserved socket. + panic("internal error: " + err.Error()) + } + iter.server = socket.Server() + socket.Release() + iter.op.cursorId = result.Cursor.Id + iter.op.collection = c.FullName + iter.op.replyFunc = iter.replyFunc() } return iter } @@ -1886,10 +1937,10 @@ func (p *Pipe) One(result interface{}) error { func (p *Pipe) Explain(result interface{}) error { c := p.collection cmd := pipeCmd{ - c.Name, - p.pipeline, - p.allowDisk, - true, + Aggregate: c.Name, + Pipeline: p.pipeline, + AllowDisk: p.allowDisk, + Explain: true, } return c.Database.Run(cmd, result) } @@ -1901,6 +1952,16 @@ func (p *Pipe) AllowDiskUse() *Pipe { return p } +// Batch sets the batch size used when fetching documents from the database. +// It's possible to change this setting on a per-session basis as well, using +// the Batch method of Session. +// +// The default batch size is defined by the database server. +func (p *Pipe) Batch(n int) *Pipe { + p.batchSize = n + return p +} + type LastError struct { Err string Code, N, Waited int diff --git a/session_test.go b/session_test.go index e13c8b6dd..d2ca46402 100644 --- a/session_test.go +++ b/session_test.go @@ -3111,6 +3111,9 @@ func (s *S) TestPipeIter(c *C) { pipe := coll.Pipe([]M{{"$match": M{"n": M{"$gte": 42}}}}) + // Ensure cursor logic is working by forcing a small batch. + pipe.Batch(2) + // Smoke test for AllowDiskUse. pipe.AllowDiskUse() @@ -3190,10 +3193,11 @@ func (s *S) TestPipeExplain(c *C) { pipe := coll.Pipe([]M{{"$project": M{"a": 1, "b": M{"$add": []interface{}{"$b", 1}}}}}) - var result bson.M + // The explain command result changes across versions. + var result struct{ Ok int } err = pipe.Explain(&result) c.Assert(err, IsNil) - c.Assert(result["stages"], NotNil) + c.Assert(result.Ok, Equals, 1) } func (s *S) TestBatch1Bug(c *C) { From 67cd8512da4339207a2ee3f14b92d1e956e59027 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sun, 12 Oct 2014 10:55:17 +0200 Subject: [PATCH 082/305] Support non-struct getter/setter documents. Fixes #17. --- bson/bson_test.go | 38 +++++++++++++++++++++++++++++++++++++- bson/decode.go | 42 ++++++++++++++++++++++++++---------------- 2 files changed, 63 insertions(+), 17 deletions(-) diff --git a/bson/bson_test.go b/bson/bson_test.go index 86a5022a3..f1a370654 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -67,7 +67,7 @@ func makeZeroDoc(value interface{}) (zero interface{}) { case reflect.Ptr: pv := reflect.New(v.Type().Elem()) zero = pv.Interface() - case reflect.Slice: + case reflect.Slice, reflect.Int: zero = reflect.New(t).Interface() default: panic("unsupported doc type") @@ -1024,6 +1024,36 @@ type inlineBadKeyMap struct { M map[int]int ",inline" } +type getterSetterD bson.D + +func (s getterSetterD) GetBSON() (interface{}, error) { + if len(s) == 0 { + return bson.D{}, nil + } + return bson.D(s[:len(s)-1]), nil +} + +func (s *getterSetterD) SetBSON(raw bson.Raw) error { + var doc bson.D + err := raw.Unmarshal(&doc) + doc = append(doc, bson.DocElem{"suffix", true}) + *s = getterSetterD(doc) + return err +} + +type getterSetterInt int + +func (i getterSetterInt) GetBSON() (interface{}, error) { + return bson.D{{"a", int(i)}}, nil +} + +func (i *getterSetterInt) SetBSON(raw bson.Raw) error { + var doc struct{ A int } + err := raw.Unmarshal(&doc) + *i = getterSetterInt(doc.A) + return err +} + type ( MyString string MyBytes []byte @@ -1041,6 +1071,8 @@ var ( int64ptr = &int64var intvar = int(42) intptr = &intvar + + gsintvar = getterSetterInt(42) ) func parseURL(s string) *url.URL { @@ -1243,6 +1275,10 @@ var twoWayCrossItems = []crossTypeItem{ {&struct{ N json.Number }{"5"}, map[string]interface{}{"n": int64(5)}}, {&struct{ N json.Number }{"5.05"}, map[string]interface{}{"n": 5.05}}, {&struct{ N json.Number }{"9223372036854776000"}, map[string]interface{}{"n": float64(1 << 63)}}, + + // bson.D <=> non-struct getter/setter + {&bson.D{{"a", 1}}, &getterSetterD{{"a", 1}, {"suffix", true}}}, + {&bson.D{{"a", 42}}, &gsintvar}, } // Same thing, but only one way (obj1 => obj2). diff --git a/bson/decode.go b/bson/decode.go index 488386498..0227235f4 100644 --- a/bson/decode.go +++ b/bson/decode.go @@ -73,35 +73,39 @@ const ( setterAddr ) -var setterStyle map[reflect.Type]int +var setterStyles map[reflect.Type]int var setterIface reflect.Type var setterMutex sync.RWMutex func init() { var iface Setter setterIface = reflect.TypeOf(&iface).Elem() - setterStyle = make(map[reflect.Type]int) + setterStyles = make(map[reflect.Type]int) } -func getSetter(outt reflect.Type, out reflect.Value) Setter { +func setterStyle(outt reflect.Type) int { setterMutex.RLock() - style := setterStyle[outt] + style := setterStyles[outt] setterMutex.RUnlock() - if style == setterNone { - return nil - } if style == setterUnknown { setterMutex.Lock() defer setterMutex.Unlock() if outt.Implements(setterIface) { - setterStyle[outt] = setterType + setterStyles[outt] = setterType } else if reflect.PtrTo(outt).Implements(setterIface) { - setterStyle[outt] = setterAddr + setterStyles[outt] = setterAddr } else { - setterStyle[outt] = setterNone - return nil + setterStyles[outt] = setterNone } - style = setterStyle[outt] + style = setterStyles[outt] + } + return style +} + +func getSetter(outt reflect.Type, out reflect.Value) Setter { + style := setterStyle(outt) + if style == setterNone { + return nil } if style == setterAddr { if !out.CanAddr() { @@ -434,13 +438,19 @@ func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { start := d.i if kind == '\x03' { - // Special case for documents. Delegate to readDocTo(). - switch out.Kind() { + // Delegate unmarshaling of documents. + outt := out.Type() + outk := out.Kind() + switch outk { case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map: d.readDocTo(out) return true - case reflect.Slice: - outt := out.Type() + } + if setterStyle(outt) != setterNone { + d.readDocTo(out) + return true + } + if outk == reflect.Slice { switch outt.Elem() { case typeDocElem: out.Set(d.readDocElems(outt)) From 8420bc95d1f3a0f8ad4d3b710a622c463a65cdcc Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sun, 12 Oct 2014 11:11:10 +0200 Subject: [PATCH 083/305] txn: drop duplicated gocheck registration --- txn/dockey_test.go | 5 ----- txn/txn_test.go | 2 -- 2 files changed, 7 deletions(-) diff --git a/txn/dockey_test.go b/txn/dockey_test.go index 2b2412ca9..e8dee952c 100644 --- a/txn/dockey_test.go +++ b/txn/dockey_test.go @@ -2,15 +2,10 @@ package txn import ( "sort" - "testing" . "gopkg.in/check.v1" ) -func TestAll(t *testing.T) { - TestingT(t) -} - type DocKeySuite struct{} var _ = Suite(&DocKeySuite{}) diff --git a/txn/txn_test.go b/txn/txn_test.go index 0753c16a8..1e396eadc 100644 --- a/txn/txn_test.go +++ b/txn/txn_test.go @@ -603,8 +603,6 @@ func (s *S) TestTxnQueueStressTest(c *C) { const runners = 4 const changes = 1000 - txn.SetDebug(true) - var wg sync.WaitGroup wg.Add(runners) for n := 0; n < runners; n++ { From f6368d5c8ccac1bd131e1a80892d8483dab123fc Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sun, 12 Oct 2014 11:36:00 +0200 Subject: [PATCH 084/305] bson: emphasize that DBPointer is deprecated --- bson/bson.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/bson/bson.go b/bson/bson.go index da2273a5e..68e932fb1 100644 --- a/bson/bson.go +++ b/bson/bson.go @@ -386,9 +386,10 @@ type JavaScript struct { Scope interface{} } -// DBPointer is a type that refers to a document in some namespace by wrapping -// a string containing the namespace itself, and the ObjectId in which the _id -// of the document is contained +// DBPointer refers to a document id in a namespace. +// +// This type is deprecated in the BSON specification and should not be used +// except for backwards compatibility with ancient applications. type DBPointer struct { Namespace string Id ObjectId From 97ba2cb90380faab5b93300919fbd2cf89d2d419 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sun, 12 Oct 2014 12:09:25 +0200 Subject: [PATCH 085/305] Better testing for EnsureIndex with text indexes. --- session.go | 12 ++++++--- session_test.go | 65 ++++++++++++++++++++++++++++++++++++++++++------- 2 files changed, 64 insertions(+), 13 deletions(-) diff --git a/session.go b/session.go index 6a7a970c4..d20cd00f8 100644 --- a/session.go +++ b/session.go @@ -941,13 +941,17 @@ type Index struct { Background bool // Build index in background and return immediately Sparse bool // Only index documents containing the Key fields - ExpireAfter time.Duration // Periodically delete docs with indexed time.Time older than that. + // If ExpireAfter is defined the server will periodically delete + // documents with indexed time.Time older than the provided delta. + ExpireAfter time.Duration - Name string // Index name, computed by EnsureIndex + // Index name computed by EnsureIndex during creation. + Name string - Bits, Min, Max int // Properties for spatial indexes + // Properties for spatial indexes. + Bits, Min, Max int - // Properties for text indexes + // Properties for text indexes. DefaultLanguage string LanguageOverride string } diff --git a/session_test.go b/session_test.go index 6e2bee3a5..a80714e2b 100644 --- a/session_test.go +++ b/session_test.go @@ -2451,13 +2451,28 @@ func (s *S) TestEnsureIndex(c *C) { Bits: 32, } - coll := session.DB("mydb").C("mycoll") + index5 := mgo.Index{ + Key: []string{"$text:a", "$text:b"}, + } + + index6 := mgo.Index{ + Key: []string{"$text:a"}, + DefaultLanguage: "portuguese", + LanguageOverride: "idioma", + } - for _, index := range []mgo.Index{index1, index2, index3, index4} { - err = coll.EnsureIndex(index) + coll1 := session.DB("mydb").C("mycoll1") + coll2 := session.DB("mydb").C("mycoll2") + + for _, index := range []mgo.Index{index1, index2, index3, index4, index5} { + err = coll1.EnsureIndex(index) c.Assert(err, IsNil) } + // Cannot have multiple text indexes on the same collection. + err = coll2.EnsureIndex(index6) + c.Assert(err, IsNil) + sysidx := session.DB("mydb").C("system.indexes") result1 := M{} @@ -2476,11 +2491,19 @@ func (s *S) TestEnsureIndex(c *C) { err = sysidx.Find(M{"name": "loc_2d"}).One(result4) c.Assert(err, IsNil) + result5 := M{} + err = sysidx.Find(M{"name": "a_text_b_text"}).One(result5) + c.Assert(err, IsNil) + + result6 := M{} + err = sysidx.Find(M{"name": "a_text"}).One(result6) + c.Assert(err, IsNil) + delete(result1, "v") expected1 := M{ "name": "a_1", "key": M{"a": 1}, - "ns": "mydb.mycoll", + "ns": "mydb.mycoll1", "background": true, } c.Assert(result1, DeepEquals, expected1) @@ -2489,7 +2512,7 @@ func (s *S) TestEnsureIndex(c *C) { expected2 := M{ "name": "a_1_b_-1", "key": M{"a": 1, "b": -1}, - "ns": "mydb.mycoll", + "ns": "mydb.mycoll1", "unique": true, "dropDups": true, } @@ -2503,7 +2526,7 @@ func (s *S) TestEnsureIndex(c *C) { expected3 := M{ "name": "loc_old_2d", "key": M{"loc_old": "2d"}, - "ns": "mydb.mycoll", + "ns": "mydb.mycoll1", "min": -500, "max": 500, "bits": 32, @@ -2514,17 +2537,41 @@ func (s *S) TestEnsureIndex(c *C) { expected4 := M{ "name": "loc_2d", "key": M{"loc": "2d"}, - "ns": "mydb.mycoll", + "ns": "mydb.mycoll1", "min": -500, "max": 500, "bits": 32, } c.Assert(result4, DeepEquals, expected4) + delete(result5, "v") + expected5 := M{ + "name": "a_text_b_text", + "key": M{"_fts": "text", "_ftsx": 1}, + "ns": "mydb.mycoll1", + "weights": M{"a": 1, "b": 1}, + "default_language": "english", + "language_override": "language", + "textIndexVersion": 2, + } + c.Assert(result5, DeepEquals, expected5) + + delete(result6, "v") + expected6 := M{ + "name": "a_text", + "key": M{"_fts": "text", "_ftsx": 1}, + "ns": "mydb.mycoll2", + "weights": M{"a": 1}, + "default_language": "portuguese", + "language_override": "idioma", + "textIndexVersion": 2, + } + c.Assert(result6, DeepEquals, expected6) + // Ensure the index actually works for real. - err = coll.Insert(M{"a": 1, "b": 1}) + err = coll1.Insert(M{"a": 1, "b": 1}) c.Assert(err, IsNil) - err = coll.Insert(M{"a": 1, "b": 1}) + err = coll1.Insert(M{"a": 1, "b": 1}) c.Assert(err, ErrorMatches, ".*duplicate key error.*") c.Assert(mgo.IsDup(err), Equals, true) } From 8ab56ec54a14b409d041908b67b90ce53a23ff0b Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sun, 12 Oct 2014 12:16:03 +0200 Subject: [PATCH 086/305] Clean up BuildInfo.GitVersion from suffixes. --- session.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/session.go b/session.go index d20cd00f8..0e9db249f 100644 --- a/session.go +++ b/session.go @@ -3586,6 +3586,11 @@ func (s *Session) BuildInfo() (info BuildInfo, err error) { for len(info.VersionArray) < 4 { info.VersionArray = append(info.VersionArray, 0) } + if i := strings.IndexByte(info.GitVersion, ' '); i >= 0 { + // Strip off the " modules: enterprise" suffix. This is a _git version_. + // That information may be moved to another field if people need it. + info.GitVersion = info.GitVersion[:i] + } return } From c1b9c9f28c22826cd96fb72faeeebe6064ab7347 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sun, 12 Oct 2014 14:16:06 +0200 Subject: [PATCH 087/305] Make X509 auth test work repeatedly and automatically. --- auth.go | 2 +- auth_test.go | 2 +- auth_x509_test.go | 42 ++++++++++++++++--------------------- session.go | 15 ++++++------- testdb/dropall.js | 9 ++++++-- testdb/init.js | 7 +++++++ testdb/setup.sh | 23 +++++++------------- testdb/supervisord-ssl.conf | 3 --- testdb/supervisord.conf | 3 +++ 9 files changed, 52 insertions(+), 54 deletions(-) delete mode 100644 testdb/supervisord-ssl.conf diff --git a/auth.go b/auth.go index 66d538279..1761d0d6b 100644 --- a/auth.go +++ b/auth.go @@ -236,7 +236,7 @@ func (socket *mongoSocket) loginClassic(cred Credential) error { } type authX509Cmd struct { - Authenticate int // "authenticate" is the mongo command. The value, afaik, is not used. + Authenticate int User string Mechanism string } diff --git a/auth_test.go b/auth_test.go index 75722fe72..85326a167 100644 --- a/auth_test.go +++ b/auth_test.go @@ -160,7 +160,7 @@ func (s *S) TestAuthUpsertUserErrors(c *C) { c.Assert(err, ErrorMatches, "user has both Password/PasswordHash and UserSource set") err = mydb.UpsertUser(&mgo.User{Username: "user", Password: "pass", OtherDBRoles: map[string][]mgo.Role{"db": nil}}) - c.Assert(err, ErrorMatches, "user with OtherDBRoles is only supported in admin database") + c.Assert(err, ErrorMatches, "user with OtherDBRoles is only supported in the admin or \\$external databases") } func (s *S) TestAuthUpsertUser(c *C) { diff --git a/auth_x509_test.go b/auth_x509_test.go index 82ad43a33..96021b97c 100644 --- a/auth_x509_test.go +++ b/auth_x509_test.go @@ -2,7 +2,6 @@ package mgo_test import ( "crypto/tls" - "flag" "gopkg.in/mgo.v2" "io/ioutil" "net" @@ -10,21 +9,14 @@ import ( . "gopkg.in/check.v1" ) -/* to run this test: - you need to have an ssl enabled mongod - you need to setup the mongod's with "testdb/setup.sh start ssl" instead of using make - you need to run to "go test" with the -x509 flag -*/ - -var ( - x509Flag = flag.Bool("x509", false, "Test x509 authentication (depends on having an ssl enabled mongd)") - x509Subject = "CN=localhost,OU=Client,O=MGO,L=MGO,ST=MGO,C=GO" // this needs to be kept in sync with client.pem - x509AuthUrl = "localhost:40301" -) - func (s *S) TestAuthx509Cred(c *C) { - if !*x509Flag { - c.Skip("no -x509") + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + binfo, err := session.BuildInfo() + c.Assert(err, IsNil) + if binfo.OpenSSLVersion == "" { + c.Skip("server does not support SSL") } clientCertPEM, err := ioutil.ReadFile("testdb/client.pem") @@ -39,9 +31,10 @@ func (s *S) TestAuthx509Cred(c *C) { Certificates: []tls.Certificate{clientCert}, } - c.Logf("Connecting to %s...", x509AuthUrl) - session, err := mgo.DialWithInfo(&mgo.DialInfo{ - Addrs: []string{x509AuthUrl}, + var host = "localhost:40003" + c.Logf("Connecting to %s...", host) + session, err = mgo.DialWithInfo(&mgo.DialInfo{ + Addrs: []string{host}, DialServer: func(addr *mgo.ServerAddr) (net.Conn, error) { return tls.Dial("tcp", addr.String(), tlsConfig) }, @@ -49,16 +42,17 @@ func (s *S) TestAuthx509Cred(c *C) { c.Assert(err, IsNil) defer session.Close() - adminDB := session.DB("admin") - var adminUser mgo.User = mgo.User{Username: "Admin", Password: "AdminPassword", Roles: []mgo.Role{mgo.RoleRoot}} - err = adminDB.UpsertUser(&adminUser) + err = session.Login(&mgo.Credential{Username: "root", Password: "rapadura"}) c.Assert(err, IsNil) - err = session.Login(&mgo.Credential{Username: "Admin", Password: "AdminPassword"}) - c.Assert(err, IsNil) + // This needs to be kept in sync with client.pem + x509Subject := "CN=localhost,OU=Client,O=MGO,L=MGO,ST=MGO,C=GO" externalDB := session.DB("$external") - var x509User mgo.User = mgo.User{Username: x509Subject, OtherDBRoles: map[string][]mgo.Role{"admin": []mgo.Role{mgo.RoleRoot}}} + var x509User mgo.User = mgo.User{ + Username: x509Subject, + OtherDBRoles: map[string][]mgo.Role{"admin": []mgo.Role{mgo.RoleRoot}}, + } err = externalDB.UpsertUser(&x509User) c.Assert(err, IsNil) diff --git a/session.go b/session.go index ec710b6e8..de3ad5639 100644 --- a/session.go +++ b/session.go @@ -3553,13 +3553,14 @@ func (q *Query) Apply(change Change, result interface{}) (info *ChangeInfo, err // internally assembled from the Version information for previous versions. // In both cases, VersionArray is guaranteed to have at least 4 entries. type BuildInfo struct { - Version string - VersionArray []int `bson:"versionArray"` // On MongoDB 2.0+; assembled from Version otherwise - GitVersion string `bson:"gitVersion"` - SysInfo string `bson:"sysInfo"` - Bits int - Debug bool - MaxObjectSize int `bson:"maxBsonObjectSize"` + Version string + VersionArray []int `bson:"versionArray"` // On MongoDB 2.0+; assembled from Version otherwise + GitVersion string `bson:"gitVersion"` + OpenSSLVersion string `bson:"OpenSSLVersion"` + SysInfo string `bson:"sysInfo"` + Bits int + Debug bool + MaxObjectSize int `bson:"maxBsonObjectSize"` } // VersionAtLeast returns whether the BuildInfo version is greater than or diff --git a/testdb/dropall.js b/testdb/dropall.js index ca1289263..232eca3c3 100644 --- a/testdb/dropall.js +++ b/testdb/dropall.js @@ -1,13 +1,19 @@ var ports = [40001, 40002, 40011, 40012, 40013, 40021, 40022, 40023, 40041, 40101, 40102, 40103, 40201, 40202, 40203] var auth = [40002, 40103, 40203, 40031] +var db1 = new Mongo("localhost:40001") + +if (db1.getDB("admin").serverBuildInfo().OpenSSLVersion != "") { + ports.push(40003) + auth.push(40003) +} for (var i in ports) { var port = ports[i] var server = "localhost:" + port var mongo = new Mongo("localhost:" + port) var admin = mongo.getDB("admin") - + for (var j in auth) { if (auth[j] == port) { admin.auth("root", "rapadura") @@ -27,7 +33,6 @@ for (var i in ports) { var result = admin.runCommand({"listDatabases": 1}) // Why is the command returning undefined!? while (typeof result.databases == "undefined") { - print("dropall.js: listing databases of :" + port + " got:", result) result = admin.runCommand({"listDatabases": 1}) } var dbs = result.databases diff --git a/testdb/init.js b/testdb/init.js index 02a6c61c8..7deb67e1c 100644 --- a/testdb/init.js +++ b/testdb/init.js @@ -32,6 +32,10 @@ for (var i = 0; i != 60; i++) { sleep(1000) } +function hasSSL() { + return db1.serverBuildInfo().OpenSSLVersion != "" +} + rs1a.runCommand({replSetInitiate: rs1cfg}) rs2a.runCommand({replSetInitiate: rs2cfg}) rs3a.runCommand({replSetInitiate: rs3cfg}) @@ -50,6 +54,9 @@ function configShards() { function configAuth() { var addrs = ["127.0.0.1:40002", "127.0.0.1:40203", "127.0.0.1:40031"] + if (hasSSL()) { + addrs.push("127.0.0.1:40003") + } for (var i in addrs) { var db = new Mongo(addrs[i]).getDB("admin") var v = db.serverBuildInfo().versionArray diff --git a/testdb/setup.sh b/testdb/setup.sh index d75e24b87..317e8e5ab 100755 --- a/testdb/setup.sh +++ b/testdb/setup.sh @@ -3,26 +3,17 @@ start() { mkdir _testdb cd _testdb - mkdir db1 db2 rs1a rs1b rs1c rs2a rs2b rs2c rs3a rs3b rs3c rs4a cfg1 cfg2 cfg3 - cp "../testdb/supervisord.conf" supervisord.conf - if [ -n "$1" ]; then - case "$1" in - -ssl) - mkdir "ssl1" - cat "../testdb/supervisord-ssl.conf" >> supervisord.conf - ;; - *) - echo "unknown setup.sh option $1" - exit 1 - ;; - esac - fi - ln -s ../testdb/server.pem server.pem + mkdir db1 db2 db3 rs1a rs1b rs1c rs2a rs2b rs2c rs3a rs3b rs3c rs4a cfg1 cfg2 cfg3 + cp ../testdb/supervisord.conf supervisord.conf + cp ../testdb/server.pem server.pem echo keyfile > keyfile chmod 600 keyfile + COUNT=$(grep '^\[program' supervisord.conf | wc -l | tr -d ' ') + if ! mongod --help | grep -q -- --ssl; then + COUNT=$(($COUNT - 1)) + fi echo "Running supervisord..." supervisord || ( echo "Supervisord failed executing ($?)" && exit 1 ) - COUNT=$(grep '^\[program' supervisord.conf | wc -l | tr -d ' ') echo "Supervisord is up, starting $COUNT processes..." for i in $(seq 10); do RUNNING=$(supervisorctl status | grep RUNNING | wc -l | tr -d ' ') diff --git a/testdb/supervisord-ssl.conf b/testdb/supervisord-ssl.conf deleted file mode 100644 index 1925b189d..000000000 --- a/testdb/supervisord-ssl.conf +++ /dev/null @@ -1,3 +0,0 @@ - -[program:ssl1] -command = mongod --sslMode requireSSL --auth --sslCAFile %(here)s/server.pem --sslPEMKeyFile %(here)s/server.pem --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --dbpath %(here)s/ssl1 --bind_ip=127.0.0.1 --port 40301 diff --git a/testdb/supervisord.conf b/testdb/supervisord.conf index b0aca01a9..1c2b859a2 100644 --- a/testdb/supervisord.conf +++ b/testdb/supervisord.conf @@ -19,6 +19,9 @@ command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssiz [program:db2] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db2 --bind_ip=127.0.0.1 --port 40002 --auth +[program:db3] +command = mongod -nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --dbpath %(here)s/db3 --bind_ip=127.0.0.1 --port 40003 --auth --sslMode preferSSL --sslCAFile %(here)s/server.pem --sslPEMKeyFile %(here)s/server.pem + [program:rs1a] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1a --bind_ip=127.0.0.1 --port 40011 [program:rs1b] From dc6fe4aaa6343cda237de17f800350a65d8cc84d Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sun, 12 Oct 2014 14:21:01 +0200 Subject: [PATCH 088/305] Move X509 auth test to auth test file. --- auth_test.go | 72 ++++++++++++++++++++++++++++++++++++++++++ auth_x509_test.go | 79 ----------------------------------------------- 2 files changed, 72 insertions(+), 79 deletions(-) delete mode 100644 auth_x509_test.go diff --git a/auth_test.go b/auth_test.go index 85326a167..a9c0b27f8 100644 --- a/auth_test.go +++ b/auth_test.go @@ -27,8 +27,11 @@ package mgo_test import ( + "crypto/tls" "flag" "fmt" + "io/ioutil" + "net" "net/url" "os" "runtime" @@ -891,6 +894,75 @@ func (s *S) TestAuthScramSha1URL(c *C) { c.Assert(err, Equals, mgo.ErrNotFound) } +func (s *S) TestAuthX509Cred(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + binfo, err := session.BuildInfo() + c.Assert(err, IsNil) + if binfo.OpenSSLVersion == "" { + c.Skip("server does not support SSL") + } + + clientCertPEM, err := ioutil.ReadFile("testdb/client.pem") + c.Assert(err, IsNil) + + clientCert, err := tls.X509KeyPair(clientCertPEM, clientCertPEM) + c.Assert(err, IsNil) + + tlsConfig := &tls.Config{ + // Isolating tests to client certs, don't care about server validation. + InsecureSkipVerify: true, + Certificates: []tls.Certificate{clientCert}, + } + + var host = "localhost:40003" + c.Logf("Connecting to %s...", host) + session, err = mgo.DialWithInfo(&mgo.DialInfo{ + Addrs: []string{host}, + DialServer: func(addr *mgo.ServerAddr) (net.Conn, error) { + return tls.Dial("tcp", addr.String(), tlsConfig) + }, + }) + c.Assert(err, IsNil) + defer session.Close() + + err = session.Login(&mgo.Credential{Username: "root", Password: "rapadura"}) + c.Assert(err, IsNil) + + // This needs to be kept in sync with client.pem + x509Subject := "CN=localhost,OU=Client,O=MGO,L=MGO,ST=MGO,C=GO" + + externalDB := session.DB("$external") + var x509User mgo.User = mgo.User{ + Username: x509Subject, + OtherDBRoles: map[string][]mgo.Role{"admin": []mgo.Role{mgo.RoleRoot}}, + } + err = externalDB.UpsertUser(&x509User) + c.Assert(err, IsNil) + + session.LogoutAll() + + c.Logf("Connected! Ensuring authentication is required...") + names, err := session.DatabaseNames() + c.Assert(err, ErrorMatches, "not authorized .*") + + cred := &mgo.Credential{ + Username: x509Subject, + Mechanism: "MONGODB-X509", + Source: "$external", + } + + c.Logf("Authenticating...") + err = session.Login(cred) + c.Assert(err, IsNil) + c.Logf("Authenticated!") + + names, err = session.DatabaseNames() + c.Assert(err, IsNil) + c.Assert(len(names) > 0, Equals, true) +} + var ( plainFlag = flag.String("plain", "", "Host to test PLAIN authentication against (depends on custom environment)") plainUser = "einstein" diff --git a/auth_x509_test.go b/auth_x509_test.go deleted file mode 100644 index 96021b97c..000000000 --- a/auth_x509_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package mgo_test - -import ( - "crypto/tls" - "gopkg.in/mgo.v2" - "io/ioutil" - "net" - - . "gopkg.in/check.v1" -) - -func (s *S) TestAuthx509Cred(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - binfo, err := session.BuildInfo() - c.Assert(err, IsNil) - if binfo.OpenSSLVersion == "" { - c.Skip("server does not support SSL") - } - - clientCertPEM, err := ioutil.ReadFile("testdb/client.pem") - c.Assert(err, IsNil) - - clientCert, err := tls.X509KeyPair(clientCertPEM, clientCertPEM) - c.Assert(err, IsNil) - - tlsConfig := &tls.Config{ - // Isolating tests to client certs, don't care about server validation. - InsecureSkipVerify: true, - Certificates: []tls.Certificate{clientCert}, - } - - var host = "localhost:40003" - c.Logf("Connecting to %s...", host) - session, err = mgo.DialWithInfo(&mgo.DialInfo{ - Addrs: []string{host}, - DialServer: func(addr *mgo.ServerAddr) (net.Conn, error) { - return tls.Dial("tcp", addr.String(), tlsConfig) - }, - }) - c.Assert(err, IsNil) - defer session.Close() - - err = session.Login(&mgo.Credential{Username: "root", Password: "rapadura"}) - c.Assert(err, IsNil) - - // This needs to be kept in sync with client.pem - x509Subject := "CN=localhost,OU=Client,O=MGO,L=MGO,ST=MGO,C=GO" - - externalDB := session.DB("$external") - var x509User mgo.User = mgo.User{ - Username: x509Subject, - OtherDBRoles: map[string][]mgo.Role{"admin": []mgo.Role{mgo.RoleRoot}}, - } - err = externalDB.UpsertUser(&x509User) - c.Assert(err, IsNil) - - session.LogoutAll() - - c.Logf("Connected! Ensuring authentication is required...") - names, err := session.DatabaseNames() - c.Assert(err, ErrorMatches, "not authorized .*") - - cred := &mgo.Credential{ - Username: x509Subject, - Mechanism: "MONGODB-X509", - Source: "$external", - } - - c.Logf("Authenticating...") - err = session.Login(cred) - c.Assert(err, IsNil) - c.Logf("Authenticated!") - - names, err = session.DatabaseNames() - c.Assert(err, IsNil) - c.Assert(len(names) > 0, Equals, true) -} From 231ce7e0549b5ce60d35ed805bff505cab7098e4 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 13 Oct 2014 08:58:55 +0200 Subject: [PATCH 089/305] Add GridFile.SetUploadDate. Fixes #28. --- gridfs.go | 15 ++++++++++++++- gridfs_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 1 deletion(-) diff --git a/gridfs.go b/gridfs.go index 6eea5f8c5..3439462f7 100644 --- a/gridfs.go +++ b/gridfs.go @@ -481,6 +481,17 @@ func (file *GridFile) UploadDate() time.Time { return file.doc.UploadDate } +// SetUploadDate changes the file upload time. +// +// It is a runtime error to call this function when the file is not open +// for writing. +func (file *GridFile) SetUploadDate(t time.Time) { + file.assertMode(gfsWriting) + file.m.Lock() + file.doc.UploadDate = t + file.m.Unlock() +} + // Close flushes any pending changes in case the file is being written // to, waits for any background operations to finish, and closes the file. // @@ -515,7 +526,9 @@ func (file *GridFile) completeWrite() { return } hexsum := hex.EncodeToString(file.wsum.Sum(nil)) - file.doc.UploadDate = bson.Now() + if file.doc.UploadDate.IsZero() { + file.doc.UploadDate = bson.Now() + } file.doc.MD5 = hexsum file.err = file.gfs.Files.Insert(file.doc) file.gfs.Chunks.EnsureIndexKey("files_id", "n") diff --git a/gridfs_test.go b/gridfs_test.go index 7a9533449..9afd2454c 100644 --- a/gridfs_test.go +++ b/gridfs_test.go @@ -183,6 +183,34 @@ func (s *S) TestGridFSFileDetails(c *C) { c.Assert(result, DeepEquals, expected) } +func (s *S) TestGridFSSetUploadDate(c *C) { + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + db := session.DB("mydb") + + gfs := db.GridFS("fs") + file, err := gfs.Create("") + c.Assert(err, IsNil) + + t := time.Date(2014, 1, 1, 1, 1, 1, 0, time.Local) + file.SetUploadDate(t) + + err = file.Close() + c.Assert(err, IsNil) + + // Check the file information. + result := M{} + err = db.C("fs.files").Find(nil).One(result) + c.Assert(err, IsNil) + + ud := result["uploadDate"].(time.Time) + if !ud.Equal(t) { + c.Fatalf("want upload date %s, got %s", t, ud) + } +} + func (s *S) TestGridFSCreateWithChunking(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) From ed435dfdb740932b5e86e41c6a0383deb80a2d9b Mon Sep 17 00:00:00 2001 From: mike o'brien Date: Wed, 29 Oct 2014 09:46:20 -0400 Subject: [PATCH 090/305] add support for repairCursor command with test coverage --- session.go | 63 +++++++++++++++++++++++++++++++++++++++++++++++++ session_test.go | 42 +++++++++++++++++++++++++++++++++ 2 files changed, 105 insertions(+) diff --git a/session.go b/session.go index de3ad5639..327b7e80c 100644 --- a/session.go +++ b/session.go @@ -1845,6 +1845,15 @@ type pipeCmdCursor struct { BatchSize int `bson:"batchSize,omitempty"` } +type repairCmd struct { + RepairCursor string `bson:"repairCursor"` + Cursor *repairCmdCursor ",omitempty" +} + +type repairCmdCursor struct { + BatchSize int `bson:"batchSize,omitempty"` +} + // Pipe prepares a pipeline to aggregate. The pipeline document // must be a slice built in terms of the aggregation framework language. // @@ -1872,6 +1881,60 @@ func (c *Collection) Pipe(pipeline interface{}) *Pipe { } } +func (c *Collection) Repair() *Iter { + // Clone session and set it to strong mode so that the server + // used for the query may be safely obtained afterwards, if + // necessary for iteration when a cursor is received. + session := c.Database.Session + session.m.Lock() + batchSize := int(session.queryConfig.op.limit) + session.m.Unlock() + cloned := session.Clone() + cloned.SetMode(Strong, false) + defer cloned.Close() + c = c.With(cloned) + + iter := &Iter{ + session: session, + timeout: -1, + } + iter.gotReply.L = &iter.m + + var result struct { + Cursor struct { + FirstBatch []bson.Raw "firstBatch" + Id int64 + } + } + + cmd := repairCmd{ + RepairCursor: c.Name, + Cursor: &repairCmdCursor{batchSize}, + } + iter.err = c.Database.Run(cmd, &result) + if iter.err != nil { + return iter + } + docs := result.Cursor.FirstBatch + for i := range docs { + iter.docData.Push(docs[i].Data) + } + if result.Cursor.Id != 0 { + socket, err := cloned.acquireSocket(true) + if err != nil { + // Cloned session is in strong mode, and the query + // above succeeded. Should have a reserved socket. + panic("internal error: " + err.Error()) + } + iter.server = socket.Server() + socket.Release() + iter.op.cursorId = result.Cursor.Id + iter.op.collection = c.FullName + iter.op.replyFunc = iter.replyFunc() + } + return iter +} + // Iter executes the pipeline and returns an iterator capable of going // over all the generated results. func (p *Pipe) Iter() *Iter { diff --git a/session_test.go b/session_test.go index a80714e2b..3673a4a13 100644 --- a/session_test.go +++ b/session_test.go @@ -3192,6 +3192,48 @@ func (s *S) TestFsync(c *C) { c.Assert(err, IsNil) } +func (s *S) TestRepairCursor(c *C) { + if !s.versionAtLeast(2, 7) { + c.Skip("RepairCursor only works on 2.7+") + } + + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + session.SetBatch(2) + + coll := session.DB("mydb").C("mycoll3") + err = coll.DropCollection() + + ns := []int{0, 10, 20, 30, 40, 50} + for _, n := range ns { + coll.Insert(M{"n": n}) + } + + repairIter := coll.Repair() + + c.Assert(repairIter.Err(), IsNil) + + result := struct{ N int }{} + resultCounts := map[int]int{} + for repairIter.Next(&result) { + resultCounts[result.N]++ + } + + c.Assert(repairIter.Next(&result), Equals, false) + c.Assert(repairIter.Err(), IsNil) + c.Assert(repairIter.Close(), IsNil) + + /* Verify that the results of the repair cursor are valid. + The repair cursor can return multiple copies + of the same document, so to check correctness we only + need to verify that at least 1 of each document was returned. */ + + for _, key := range ns { + c.Assert(resultCounts[key] > 0, Equals, true) + } +} + func (s *S) TestPipeIter(c *C) { if !s.versionAtLeast(2, 1) { c.Skip("Pipe only works on 2.1+") From 338f01b0bbb2f258455a600ecef69f82383b9a7b Mon Sep 17 00:00:00 2001 From: mike o'brien Date: Wed, 29 Oct 2014 10:16:18 -0400 Subject: [PATCH 091/305] update comments and docstrings for repairCursor --- session.go | 5 +++++ session_test.go | 8 ++++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/session.go b/session.go index 327b7e80c..be6abbde0 100644 --- a/session.go +++ b/session.go @@ -1881,6 +1881,11 @@ func (c *Collection) Pipe(pipeline interface{}) *Pipe { } } +// Repair calls the 'repairCursor' command (supported in mongo 2.7.8 and later) +// and returns an iterator to go through the results. +// This command requests the server to perform a best-effort attempt to recover +// all documents from the collection in cases of damaged data files, so it may +// return multiple copies of the same document. func (c *Collection) Repair() *Iter { // Clone session and set it to strong mode so that the server // used for the query may be safely obtained afterwards, if diff --git a/session_test.go b/session_test.go index 3673a4a13..87ae84944 100644 --- a/session_test.go +++ b/session_test.go @@ -3224,10 +3224,10 @@ func (s *S) TestRepairCursor(c *C) { c.Assert(repairIter.Err(), IsNil) c.Assert(repairIter.Close(), IsNil) - /* Verify that the results of the repair cursor are valid. - The repair cursor can return multiple copies - of the same document, so to check correctness we only - need to verify that at least 1 of each document was returned. */ + // Verify that the results of the repair cursor are valid. + // The repair cursor can return multiple copies + // of the same document, so to check correctness we only + // need to verify that at least 1 of each document was returned. for _, key := range ns { c.Assert(resultCounts[key] > 0, Equals, true) From 3fecc512fe874fa769063e07c0b8407c8d8792ef Mon Sep 17 00:00:00 2001 From: mike o'brien Date: Wed, 29 Oct 2014 14:14:29 -0400 Subject: [PATCH 092/305] improvements to docstring on Repair() method --- session.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/session.go b/session.go index be6abbde0..bca22e56c 100644 --- a/session.go +++ b/session.go @@ -1881,11 +1881,12 @@ func (c *Collection) Pipe(pipeline interface{}) *Pipe { } } -// Repair calls the 'repairCursor' command (supported in mongo 2.7.8 and later) -// and returns an iterator to go through the results. -// This command requests the server to perform a best-effort attempt to recover -// all documents from the collection in cases of damaged data files, so it may -// return multiple copies of the same document. +// Repair returns an iterator that goes over all recovered documents in the +// collection, in a best-effort manner. This is most useful when there are +// damaged data files. Multiple copies of the same document may be returned +// by the iterator. +// +// Repair is supported in MongoDB 2.7.8 and later. func (c *Collection) Repair() *Iter { // Clone session and set it to strong mode so that the server // used for the query may be safely obtained afterwards, if From 8c1ecfe7d8a0b5bc49a809f4c15955e514f77a80 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 3 Nov 2014 17:47:19 -0200 Subject: [PATCH 093/305] Move Repair method out of Pipe implementation. --- session.go | 112 ++++++++++++++++++++++++++--------------------------- 1 file changed, 56 insertions(+), 56 deletions(-) diff --git a/session.go b/session.go index bca22e56c..73305b299 100644 --- a/session.go +++ b/session.go @@ -1816,35 +1816,6 @@ func (c *Collection) Find(query interface{}) *Query { return q } -// FindId is a convenience helper equivalent to: -// -// query := collection.Find(bson.M{"_id": id}) -// -// See the Find method for more details. -func (c *Collection) FindId(id interface{}) *Query { - return c.Find(bson.D{{"_id", id}}) -} - -type Pipe struct { - session *Session - collection *Collection - pipeline interface{} - allowDisk bool - batchSize int -} - -type pipeCmd struct { - Aggregate string - Pipeline interface{} - Cursor *pipeCmdCursor ",omitempty" - Explain bool ",omitempty" - AllowDisk bool "allowDiskUse,omitempty" -} - -type pipeCmdCursor struct { - BatchSize int `bson:"batchSize,omitempty"` -} - type repairCmd struct { RepairCursor string `bson:"repairCursor"` Cursor *repairCmdCursor ",omitempty" @@ -1854,33 +1825,6 @@ type repairCmdCursor struct { BatchSize int `bson:"batchSize,omitempty"` } -// Pipe prepares a pipeline to aggregate. The pipeline document -// must be a slice built in terms of the aggregation framework language. -// -// For example: -// -// pipe := collection.Pipe([]bson.M{{"$match": bson.M{"name": "Otavio"}}}) -// iter := pipe.Iter() -// -// Relevant documentation: -// -// http://docs.mongodb.org/manual/reference/aggregation -// http://docs.mongodb.org/manual/applications/aggregation -// http://docs.mongodb.org/manual/tutorial/aggregation-examples -// -func (c *Collection) Pipe(pipeline interface{}) *Pipe { - session := c.Database.Session - session.m.Lock() - batchSize := int(session.queryConfig.op.limit) - session.m.Unlock() - return &Pipe{ - session: session, - collection: c, - pipeline: pipeline, - batchSize: batchSize, - } -} - // Repair returns an iterator that goes over all recovered documents in the // collection, in a best-effort manner. This is most useful when there are // damaged data files. Multiple copies of the same document may be returned @@ -1941,6 +1885,62 @@ func (c *Collection) Repair() *Iter { return iter } +// FindId is a convenience helper equivalent to: +// +// query := collection.Find(bson.M{"_id": id}) +// +// See the Find method for more details. +func (c *Collection) FindId(id interface{}) *Query { + return c.Find(bson.D{{"_id", id}}) +} + +type Pipe struct { + session *Session + collection *Collection + pipeline interface{} + allowDisk bool + batchSize int +} + +type pipeCmd struct { + Aggregate string + Pipeline interface{} + Cursor *pipeCmdCursor ",omitempty" + Explain bool ",omitempty" + AllowDisk bool "allowDiskUse,omitempty" +} + +type pipeCmdCursor struct { + BatchSize int `bson:"batchSize,omitempty"` +} + +// Pipe prepares a pipeline to aggregate. The pipeline document +// must be a slice built in terms of the aggregation framework language. +// +// For example: +// +// pipe := collection.Pipe([]bson.M{{"$match": bson.M{"name": "Otavio"}}}) +// iter := pipe.Iter() +// +// Relevant documentation: +// +// http://docs.mongodb.org/manual/reference/aggregation +// http://docs.mongodb.org/manual/applications/aggregation +// http://docs.mongodb.org/manual/tutorial/aggregation-examples +// +func (c *Collection) Pipe(pipeline interface{}) *Pipe { + session := c.Database.Session + session.m.Lock() + batchSize := int(session.queryConfig.op.limit) + session.m.Unlock() + return &Pipe{ + session: session, + collection: c, + pipeline: pipeline, + batchSize: batchSize, + } +} + // Iter executes the pipeline and returns an iterator capable of going // over all the generated results. func (p *Pipe) Iter() *Iter { From e2e914857713db7497cca2bd7fc0b030fc9cb22d Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Fri, 7 Nov 2014 12:25:03 -0200 Subject: [PATCH 094/305] Properly return ErrCursor on cursor timeouts. Fix by Daniel Gottlieb. --- session.go | 8 +++++++- session_test.go | 44 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) diff --git a/session.go b/session.go index 73305b299..94d555a45 100644 --- a/session.go +++ b/session.go @@ -120,7 +120,10 @@ type Iter struct { timedout bool } -var ErrNotFound = errors.New("not found") +var ( + ErrNotFound = errors.New("not found") + ErrCursor = errors.New("invalid cursor") +) const defaultPrefetch = 0.25 @@ -3775,6 +3778,9 @@ func (iter *Iter) replyFunc() replyFunc { if op != nil && op.cursorId != 0 { // It's a tailable cursor. iter.op.cursorId = op.cursorId + } else if op != nil && op.cursorId == 0 && op.flags&1 == 1 { + // Cursor likely timed out. + iter.err = ErrCursor } else { iter.err = ErrNotFound } diff --git a/session_test.go b/session_test.go index 87ae84944..1aaf80a1c 100644 --- a/session_test.go +++ b/session_test.go @@ -1246,6 +1246,50 @@ func (s *S) TestFindIterLimit(c *C) { c.Assert(stats.SocketsInUse, Equals, 0) } +var cursorTimeout = flag.Bool("cursor-timeout", false, "Enable cursor timeout test") + +func (s *S) TestFindIterCursorTimeout(c *C) { + if !*cursorTimeout { + c.Skip("-cursor-timeout") + } + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + type Doc struct { + Id int "_id" + } + + coll := session.DB("test").C("test") + coll.Remove(nil) + for i := 0; i < 100; i++ { + err = coll.Insert(Doc{i}) + c.Assert(err, IsNil) + } + + session.SetBatch(1) + iter := coll.Find(nil).Iter() + var doc Doc + if !iter.Next(&doc) { + c.Fatalf("iterator failed to return any documents") + } + + for i := 10; i > 0; i-- { + c.Logf("Sleeping... %d minutes to go...", i) + time.Sleep(1*time.Minute + 2*time.Second) + } + + // Drain any existing documents that were fetched. + if !iter.Next(&doc) { + c.Fatalf("iterator with timed out cursor failed to return previously cached document") + } + if iter.Next(&doc) { + c.Fatalf("timed out cursor returned document") + } + + c.Assert(iter.Err(), Equals, mgo.ErrCursor) +} + func (s *S) TestTooManyItemsLimitBug(c *C) { if *fast { c.Skip("-fast") From 4890377a975625a156d530718c118267c5256f4b Mon Sep 17 00:00:00 2001 From: Wisdom Omuya Date: Wed, 19 Nov 2014 15:47:34 -0500 Subject: [PATCH 095/305] Enable replica set name assertion in server discovery --- cluster.go | 33 ++++++- cluster_test.go | 230 ++++++++++++++++++++++++++++++++++++++++++++++-- server.go | 1 + session.go | 37 +++++--- 4 files changed, 278 insertions(+), 23 deletions(-) diff --git a/cluster.go b/cluster.go index 104dd3988..681a39e04 100644 --- a/cluster.go +++ b/cluster.go @@ -54,18 +54,25 @@ type mongoCluster struct { direct bool failFast bool syncCount uint + err error + setName string cachedIndex map[string]bool sync chan bool dial dialer } -func newCluster(userSeeds []string, direct, failFast bool, dial dialer) *mongoCluster { +var ( + ErrNoReachableServers = errors.New("no reachable servers") +) + +func newCluster(userSeeds []string, direct, failFast bool, dial dialer, setName string) *mongoCluster { cluster := &mongoCluster{ userSeeds: userSeeds, references: 1, direct: direct, failFast: failFast, dial: dial, + setName: setName, } cluster.serverSynced.L = cluster.RWMutex.RLocker() cluster.sync = make(chan bool, 1) @@ -131,7 +138,8 @@ type isMasterResult struct { Passives []string Tags bson.D Msg string - MaxWireVersion int `bson:"maxWireVersion"` + SetName string `bson:"setName"` + MaxWireVersion int `bson:"maxWireVersion"` } func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResult) error { @@ -198,6 +206,12 @@ func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerI break } + if cluster.setName != "" && result.SetName != cluster.setName { + log("SYNC Server ", addr, " not a member of replica set ", cluster.setName) + cluster.err = errors.New(addr + " is not part of " + cluster.setName + " replica set") + return nil, nil, cluster.err + } + if result.IsMaster { debugf("SYNC %s is a master.", addr) // Made an incorrect assumption above, so fix stats. @@ -218,6 +232,7 @@ func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerI Master: result.IsMaster, Mongos: result.Msg == "isdbgrid", Tags: result.Tags, + SetName: result.SetName, MaxWireVersion: result.MaxWireVersion, } @@ -250,7 +265,14 @@ func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInf log("SYNC Discarding unknown server ", server.Addr, " due to partial sync.") return } - cluster.servers.Add(server) + if cluster.setName != "" && info.SetName != cluster.setName { + log("SYNC Discarding ", server.Addr, " not part of ", cluster.setName, " replica set.") + cluster.Unlock() + server.Close() + return + } else { + cluster.servers.Add(server) + } if info.Master { cluster.masters.Add(server) log("SYNC Adding ", server.Addr, " to cluster as a master.") @@ -539,7 +561,10 @@ func (cluster *mongoCluster) AcquireSocket(slaveOk bool, syncTimeout time.Durati syncCount = cluster.syncCount } else if syncTimeout != 0 && started.Before(time.Now().Add(-syncTimeout)) || cluster.failFast && cluster.syncCount != syncCount { cluster.RUnlock() - return nil, errors.New("no reachable servers") + if cluster.err != nil { + return nil, cluster.err + } + return nil, ErrNoReachableServers } log("Waiting for servers to synchronize...") cluster.syncServers() diff --git a/cluster_test.go b/cluster_test.go index a29bd6f16..a7ea49f13 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -814,7 +814,7 @@ func (s *S) TestSyncTimeout(c *C) { // Do something. result := struct{ Ok bool }{} err = session.Run("getLastError", &result) - c.Assert(err, ErrorMatches, "no reachable servers") + c.Assert(err, Equals, mgo.ErrNoReachableServers) c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true) } @@ -832,7 +832,7 @@ func (s *S) TestDialWithTimeout(c *C) { if session != nil { session.Close() } - c.Assert(err, ErrorMatches, "no reachable servers") + c.Assert(err, Equals, mgo.ErrNoReachableServers) c.Assert(session, IsNil) c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true) @@ -875,7 +875,7 @@ func (s *S) TestSocketTimeoutOnDial(c *C) { started := time.Now() session, err := mgo.DialWithTimeout("localhost:40001", timeout) - c.Assert(err, ErrorMatches, "no reachable servers") + c.Assert(err, Equals, mgo.ErrNoReachableServers) c.Assert(session, IsNil) c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) @@ -907,6 +907,224 @@ func (s *S) TestSocketTimeoutOnInactiveSocket(c *C) { c.Assert(session.Ping(), IsNil) } +func (s *S) TestDialWithKnownReplPrimary(c *C) { + // port 40011 is used by replica set rs1's primary + info := mgo.DialInfo{ + Addrs: []string{"localhost:40011"}, + Timeout: 5 * time.Second, + ReplicaSetName: "rs1", + } + connectionUrl := "mongodb://localhost:40011/?replicaSet=rs1" + + runTest := func(session *mgo.Session, err error) { + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + coll.Insert(M{"a": 1, "b": 2}) + + result := struct{ Ok bool }{} + err = session.Run("getLastError", &result) + c.Assert(err, IsNil) + c.Assert(result.Ok, Equals, true) + } + + session, err := mgo.DialWithInfo(&info) + runTest(session, err) + session, err = mgo.Dial(connectionUrl) + runTest(session, err) +} + +func (s *S) TestDialWithKnownReplSecondary(c *C) { + // port 40012 is used by replica set rs1's secondary + info := mgo.DialInfo{ + Addrs: []string{"localhost:40012"}, + Timeout: 5 * time.Second, + ReplicaSetName: "rs1", + } + connectionUrl := "mongodb://localhost:40012/?replicaSet=rs1" + + runTest := func(session *mgo.Session, err error) { + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + coll.Insert(M{"a": 1, "b": 2}) + + result := struct{ Ok bool }{} + err = session.Run("getLastError", &result) + c.Assert(err, IsNil) + c.Assert(result.Ok, Equals, true) + } + + session, err := mgo.DialWithInfo(&info) + runTest(session, err) + session, err = mgo.Dial(connectionUrl) + runTest(session, err) +} + +var foreignMemberErrorRegex = ".*not part of.*" + +func (s *S) TestDialWithForeignReplPrimary(c *C) { + if *fast { + c.Skip("-fast") + } + + // port 40021 is used by replica set rs2's primary + info := mgo.DialInfo{ + Addrs: []string{"localhost:40021"}, + Timeout: 5 * time.Second, + ReplicaSetName: "rs1", + } + _, err := mgo.DialWithInfo(&info) + c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + + info.Direct = true + _, err = mgo.DialWithInfo(&info) + c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + + connectionUrl := "mongodb://localhost:40021/?replicaSet=rs1" + _, err = mgo.Dial(connectionUrl) + c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + + connectionUrl += "&connect=direct" + _, err = mgo.Dial(connectionUrl) + c.Assert(err, ErrorMatches, foreignMemberErrorRegex) +} + +func (s *S) TestDialWithForeignReplSecondary(c *C) { + if *fast { + c.Skip("-fast") + } + + // port 40022 is used by replica set rs2's secondary + info := mgo.DialInfo{ + Addrs: []string{"localhost:40022"}, + Timeout: 5 * time.Second, + ReplicaSetName: "rs1", + } + _, err := mgo.DialWithInfo(&info) + c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + + info.Direct = true + _, err = mgo.DialWithInfo(&info) + c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + + connectionUrl := "mongodb://localhost:40022/?replicaSet=rs1" + _, err = mgo.Dial(connectionUrl) + c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + + connectionUrl += "&connect=direct" + _, err = mgo.Dial(connectionUrl) + c.Assert(err, ErrorMatches, foreignMemberErrorRegex) +} + +func (s *S) TestDialWithMixedPrimaries(c *C) { + // port 40011 is used by replica set rs1's primary + // port 40021 is used by replica set rs2's primary + info := mgo.DialInfo{ + Addrs: []string{"localhost:40011", "localhost:40021"}, + Timeout: 5 * time.Second, + ReplicaSetName: "rs1", + } + + session, err := mgo.DialWithInfo(&info) + c.Assert(err, IsNil) + session.Close() + + info.Direct = true + session, err = mgo.DialWithInfo(&info) + c.Assert(err, IsNil) + session.Close() + + connectionUrl := "mongodb://localhost:40011,localhost:40021/?replicaSet=rs1" + session, err = mgo.Dial(connectionUrl) + c.Assert(err, IsNil) + session.Close() + + connectionUrl += "&connect=direct" + session, err = mgo.Dial(connectionUrl) + c.Assert(err, IsNil) + session.Close() +} + +func (s *S) TestDialWithMixedSecondaries(c *C) { + // port 40012 is used by replica set rs1's secondary + // port 40022 is used by replica set rs2's secondary + info := mgo.DialInfo{ + Addrs: []string{"localhost:40012", "localhost:40022"}, + Timeout: 5 * time.Second, + ReplicaSetName: "rs1", + } + + session, err := mgo.DialWithInfo(&info) + c.Assert(err, IsNil) + session.Close() + + info.Direct = true + session, err = mgo.DialWithInfo(&info) + c.Assert(err, IsNil) + session.Close() + + connectionUrl := "mongodb://localhost:40012,localhost:40022/?replicaSet=rs1" + session, err = mgo.Dial(connectionUrl) + c.Assert(err, IsNil) + session.Close() + + connectionUrl += "&connect=direct" + session, err = mgo.Dial(connectionUrl) + c.Assert(err, IsNil) + session.Close() +} + +func (s *S) TestDialWithForeignSeeds(c *C) { + if *fast { + c.Skip("-fast") + } + + // port 40021 is used by replica set rs2's primary + // port 40022 is used by replica set rs2's secondary + info := mgo.DialInfo{ + Addrs: []string{"localhost:40021", "localhost:40022"}, + Timeout: 5 * time.Second, + ReplicaSetName: "rs1", + } + + _, err := mgo.DialWithInfo(&info) + c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + + info.Direct = true + _, err = mgo.DialWithInfo(&info) + c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + + connectionUrl := "mongodb://localhost:40021,localhost:40022/?replicaSet=rs1" + _, err = mgo.Dial(connectionUrl) + c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + + connectionUrl += "&connect=direct" + _, err = mgo.Dial(connectionUrl) + c.Assert(err, ErrorMatches, foreignMemberErrorRegex) +} + +func (s *S) TestDialWithUnknownSeeds(c *C) { + if *fast { + c.Skip("-fast") + } + + info := mgo.DialInfo{ + Addrs: []string{"localhost:54321", "localhost:12345"}, + Timeout: 5 * time.Second, + ReplicaSetName: "rs1", + } + + _, err := mgo.DialWithInfo(&info) + c.Assert(err, Equals, mgo.ErrNoReachableServers) + + connectionUrl := "mongodb://localhost:54321,localhost:12345/?replicaSet=rs1" + _, err = mgo.Dial(connectionUrl) + c.Assert(err, Equals, mgo.ErrNoReachableServers) +} + func (s *S) TestDirect(c *C) { session, err := mgo.Dial("localhost:40012?connect=direct") c.Assert(err, IsNil) @@ -930,7 +1148,7 @@ func (s *S) TestDirect(c *C) { coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"test": 1}) - c.Assert(err, ErrorMatches, "no reachable servers") + c.Assert(err, Equals, mgo.ErrNoReachableServers) // Writing to the local database is okay. coll = session.DB("local").C("mycoll") @@ -968,7 +1186,7 @@ func (s *S) TestDirectToUnknownStateMember(c *C) { coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"test": 1}) - c.Assert(err, ErrorMatches, "no reachable servers") + c.Assert(err, Equals, mgo.ErrNoReachableServers) // Slave is still reachable. result.Host = "" @@ -987,7 +1205,7 @@ func (s *S) TestFailFast(c *C) { started := time.Now() _, err := mgo.DialWithInfo(&info) - c.Assert(err, ErrorMatches, "no reachable servers") + c.Assert(err, Equals, mgo.ErrNoReachableServers) c.Assert(started.After(time.Now().Add(-time.Second)), Equals, true) } diff --git a/server.go b/server.go index 8c130bed9..d5086a290 100644 --- a/server.go +++ b/server.go @@ -71,6 +71,7 @@ type mongoServerInfo struct { Mongos bool Tags bson.D MaxWireVersion int + SetName string } var defaultServerInfo mongoServerInfo diff --git a/session.go b/session.go index 94d555a45..0a749d412 100644 --- a/session.go +++ b/session.go @@ -122,7 +122,7 @@ type Iter struct { var ( ErrNotFound = errors.New("not found") - ErrCursor = errors.New("invalid cursor") + ErrCursor = errors.New("invalid cursor") ) const defaultPrefetch = 0.25 @@ -226,6 +226,7 @@ func DialWithTimeout(url string, timeout time.Duration) (*Session, error) { mechanism := "" service := "" source := "" + setName := "" poolLimit := 0 for k, v := range uinfo.options { switch k { @@ -235,6 +236,8 @@ func DialWithTimeout(url string, timeout time.Duration) (*Session, error) { mechanism = v case "gssapiServiceName": service = v + case "replicaSet": + setName = v case "maxPoolSize": poolLimit, err = strconv.Atoi(v) if err != nil { @@ -254,16 +257,17 @@ func DialWithTimeout(url string, timeout time.Duration) (*Session, error) { } } info := DialInfo{ - Addrs: uinfo.addrs, - Direct: direct, - Timeout: timeout, - Database: uinfo.db, - Username: uinfo.user, - Password: uinfo.pass, - Mechanism: mechanism, - Service: service, - Source: source, - PoolLimit: poolLimit, + Addrs: uinfo.addrs, + Direct: direct, + Timeout: timeout, + Database: uinfo.db, + Username: uinfo.user, + Password: uinfo.pass, + Mechanism: mechanism, + Service: service, + Source: source, + PoolLimit: poolLimit, + ReplicaSetName: setName, } return DialWithInfo(&info) } @@ -294,9 +298,16 @@ type DialInfo struct { // Database is the default database name used when the Session.DB method // is called with an empty name, and is also used during the intial - // authenticatoin if Source is unset. + // authentication if Source is unset. Database string + // ReplicaSetName defines the name of the replica set to use for cluster + // discovery and monitoring. If specified, at least one of the seed servers + // must be a member of the ReplicaSetName replica set - non-members are + // ignored. If unspecified, the intended connection is assumed to be either + // with individual servers, or one or multiple mongos routers. + ReplicaSetName string + // Source is the database used to establish credentials and privileges // with a MongoDB server. Defaults to the value of Database, if that is // set, or "admin" otherwise. @@ -360,7 +371,7 @@ func DialWithInfo(info *DialInfo) (*Session, error) { } addrs[i] = addr } - cluster := newCluster(addrs, info.Direct, info.FailFast, dialer{info.Dial, info.DialServer}) + cluster := newCluster(addrs, info.Direct, info.FailFast, dialer{info.Dial, info.DialServer}, info.ReplicaSetName) session := newSession(Eventual, cluster, info.Timeout) session.defaultdb = info.Database if session.defaultdb == "" { From 6b6fb2943082a34e1658420b6001f5972711d409 Mon Sep 17 00:00:00 2001 From: Nicholas Katsaros Date: Fri, 5 Dec 2014 11:09:37 -0500 Subject: [PATCH 096/305] Fixed typo. --- session.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/session.go b/session.go index 94d555a45..e5bd17b27 100644 --- a/session.go +++ b/session.go @@ -294,7 +294,7 @@ type DialInfo struct { // Database is the default database name used when the Session.DB method // is called with an empty name, and is also used during the intial - // authenticatoin if Source is unset. + // authentication if Source is unset. Database string // Source is the database used to establish credentials and privileges From c50bc8e3def90854625b3ba34cc7e94f46da80eb Mon Sep 17 00:00:00 2001 From: Wisdom Omuya Date: Tue, 9 Dec 2014 19:14:05 -0500 Subject: [PATCH 097/305] Don't use cluster.err --- cluster.go | 16 +++------------- cluster_test.go | 42 ++++++++++++++++++++---------------------- 2 files changed, 23 insertions(+), 35 deletions(-) diff --git a/cluster.go b/cluster.go index 681a39e04..320b5b986 100644 --- a/cluster.go +++ b/cluster.go @@ -54,17 +54,12 @@ type mongoCluster struct { direct bool failFast bool syncCount uint - err error setName string cachedIndex map[string]bool sync chan bool dial dialer } -var ( - ErrNoReachableServers = errors.New("no reachable servers") -) - func newCluster(userSeeds []string, direct, failFast bool, dial dialer, setName string) *mongoCluster { cluster := &mongoCluster{ userSeeds: userSeeds, @@ -208,8 +203,7 @@ func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerI if cluster.setName != "" && result.SetName != cluster.setName { log("SYNC Server ", addr, " not a member of replica set ", cluster.setName) - cluster.err = errors.New(addr + " is not part of " + cluster.setName + " replica set") - return nil, nil, cluster.err + return nil, nil, errors.New(addr + " is not part of " + cluster.setName + " replica set") } if result.IsMaster { @@ -270,9 +264,8 @@ func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInf cluster.Unlock() server.Close() return - } else { - cluster.servers.Add(server) } + cluster.servers.Add(server) if info.Master { cluster.masters.Add(server) log("SYNC Adding ", server.Addr, " to cluster as a master.") @@ -561,10 +554,7 @@ func (cluster *mongoCluster) AcquireSocket(slaveOk bool, syncTimeout time.Durati syncCount = cluster.syncCount } else if syncTimeout != 0 && started.Before(time.Now().Add(-syncTimeout)) || cluster.failFast && cluster.syncCount != syncCount { cluster.RUnlock() - if cluster.err != nil { - return nil, cluster.err - } - return nil, ErrNoReachableServers + return nil, errors.New("no reachable servers") } log("Waiting for servers to synchronize...") cluster.syncServers() diff --git a/cluster_test.go b/cluster_test.go index a7ea49f13..6cad192ff 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -814,7 +814,7 @@ func (s *S) TestSyncTimeout(c *C) { // Do something. result := struct{ Ok bool }{} err = session.Run("getLastError", &result) - c.Assert(err, Equals, mgo.ErrNoReachableServers) + c.Assert(err, ErrorMatches, "no reachable servers") c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true) } @@ -832,7 +832,7 @@ func (s *S) TestDialWithTimeout(c *C) { if session != nil { session.Close() } - c.Assert(err, Equals, mgo.ErrNoReachableServers) + c.Assert(err, ErrorMatches, "no reachable servers") c.Assert(session, IsNil) c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true) @@ -875,7 +875,7 @@ func (s *S) TestSocketTimeoutOnDial(c *C) { started := time.Now() session, err := mgo.DialWithTimeout("localhost:40001", timeout) - c.Assert(err, Equals, mgo.ErrNoReachableServers) + c.Assert(err, ErrorMatches, "no reachable servers") c.Assert(session, IsNil) c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true) @@ -963,8 +963,6 @@ func (s *S) TestDialWithKnownReplSecondary(c *C) { runTest(session, err) } -var foreignMemberErrorRegex = ".*not part of.*" - func (s *S) TestDialWithForeignReplPrimary(c *C) { if *fast { c.Skip("-fast") @@ -977,19 +975,19 @@ func (s *S) TestDialWithForeignReplPrimary(c *C) { ReplicaSetName: "rs1", } _, err := mgo.DialWithInfo(&info) - c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + c.Assert(err, ErrorMatches, "no reachable servers") info.Direct = true _, err = mgo.DialWithInfo(&info) - c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + c.Assert(err, ErrorMatches, "no reachable servers") connectionUrl := "mongodb://localhost:40021/?replicaSet=rs1" _, err = mgo.Dial(connectionUrl) - c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + c.Assert(err, ErrorMatches, "no reachable servers") connectionUrl += "&connect=direct" _, err = mgo.Dial(connectionUrl) - c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + c.Assert(err, ErrorMatches, "no reachable servers") } func (s *S) TestDialWithForeignReplSecondary(c *C) { @@ -1004,19 +1002,19 @@ func (s *S) TestDialWithForeignReplSecondary(c *C) { ReplicaSetName: "rs1", } _, err := mgo.DialWithInfo(&info) - c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + c.Assert(err, ErrorMatches, "no reachable servers") info.Direct = true _, err = mgo.DialWithInfo(&info) - c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + c.Assert(err, ErrorMatches, "no reachable servers") connectionUrl := "mongodb://localhost:40022/?replicaSet=rs1" _, err = mgo.Dial(connectionUrl) - c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + c.Assert(err, ErrorMatches, "no reachable servers") connectionUrl += "&connect=direct" _, err = mgo.Dial(connectionUrl) - c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + c.Assert(err, ErrorMatches, "no reachable servers") } func (s *S) TestDialWithMixedPrimaries(c *C) { @@ -1091,19 +1089,19 @@ func (s *S) TestDialWithForeignSeeds(c *C) { } _, err := mgo.DialWithInfo(&info) - c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + c.Assert(err, ErrorMatches, "no reachable servers") info.Direct = true _, err = mgo.DialWithInfo(&info) - c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + c.Assert(err, ErrorMatches, "no reachable servers") connectionUrl := "mongodb://localhost:40021,localhost:40022/?replicaSet=rs1" _, err = mgo.Dial(connectionUrl) - c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + c.Assert(err, ErrorMatches, "no reachable servers") connectionUrl += "&connect=direct" _, err = mgo.Dial(connectionUrl) - c.Assert(err, ErrorMatches, foreignMemberErrorRegex) + c.Assert(err, ErrorMatches, "no reachable servers") } func (s *S) TestDialWithUnknownSeeds(c *C) { @@ -1118,11 +1116,11 @@ func (s *S) TestDialWithUnknownSeeds(c *C) { } _, err := mgo.DialWithInfo(&info) - c.Assert(err, Equals, mgo.ErrNoReachableServers) + c.Assert(err, ErrorMatches, "no reachable servers") connectionUrl := "mongodb://localhost:54321,localhost:12345/?replicaSet=rs1" _, err = mgo.Dial(connectionUrl) - c.Assert(err, Equals, mgo.ErrNoReachableServers) + c.Assert(err, ErrorMatches, "no reachable servers") } func (s *S) TestDirect(c *C) { @@ -1148,7 +1146,7 @@ func (s *S) TestDirect(c *C) { coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"test": 1}) - c.Assert(err, Equals, mgo.ErrNoReachableServers) + c.Assert(err, ErrorMatches, "no reachable servers") // Writing to the local database is okay. coll = session.DB("local").C("mycoll") @@ -1186,7 +1184,7 @@ func (s *S) TestDirectToUnknownStateMember(c *C) { coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"test": 1}) - c.Assert(err, Equals, mgo.ErrNoReachableServers) + c.Assert(err, ErrorMatches, "no reachable servers") // Slave is still reachable. result.Host = "" @@ -1205,7 +1203,7 @@ func (s *S) TestFailFast(c *C) { started := time.Now() _, err := mgo.DialWithInfo(&info) - c.Assert(err, Equals, mgo.ErrNoReachableServers) + c.Assert(err, ErrorMatches, "no reachable servers") c.Assert(started.After(time.Now().Add(-time.Second)), Equals, true) } From dcf26a2099d33581eab0b5aca771e5b12fcc9b9f Mon Sep 17 00:00:00 2001 From: Wisdom Omuya Date: Mon, 15 Dec 2014 15:29:13 -0500 Subject: [PATCH 098/305] Check live servers are correct --- cluster_test.go | 100 ++++++++++++++++++++++++++---------------------- 1 file changed, 54 insertions(+), 46 deletions(-) diff --git a/cluster_test.go b/cluster_test.go index 6cad192ff..9435ef252 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -936,7 +936,7 @@ func (s *S) TestDialWithKnownReplPrimary(c *C) { } func (s *S) TestDialWithKnownReplSecondary(c *C) { - // port 40012 is used by replica set rs1's secondary + // port 40012 is used by an rs1 replica set secondary info := mgo.DialInfo{ Addrs: []string{"localhost:40012"}, Timeout: 5 * time.Second, @@ -995,7 +995,7 @@ func (s *S) TestDialWithForeignReplSecondary(c *C) { c.Skip("-fast") } - // port 40022 is used by replica set rs2's secondary + // port 40022 is used by an rs2 replica set secondary info := mgo.DialInfo{ Addrs: []string{"localhost:40022"}, Timeout: 5 * time.Second, @@ -1017,62 +1017,70 @@ func (s *S) TestDialWithForeignReplSecondary(c *C) { c.Assert(err, ErrorMatches, "no reachable servers") } -func (s *S) TestDialWithMixedPrimaries(c *C) { +func (s *S) TestDialWithMixedSeedList(c *C) { // port 40011 is used by replica set rs1's primary // port 40021 is used by replica set rs2's primary - info := mgo.DialInfo{ - Addrs: []string{"localhost:40011", "localhost:40021"}, - Timeout: 5 * time.Second, - ReplicaSetName: "rs1", + + seedLists := [][]string{ + // rs1 primary and rs2 primary + []string{"localhost:40011", "localhost:40021"}, + // rs1 primary and rs2 secondary + []string{"localhost:40011", "localhost:40022"}, + // rs1 secondary and rs2 primary + []string{"localhost:40012", "localhost:40021"}, + // rs1 secondary and rs2 secondary + []string{"localhost:40012", "localhost:40022"}, } - session, err := mgo.DialWithInfo(&info) - c.Assert(err, IsNil) - session.Close() + rs2Members := []string{":40021", ":40022", ":40023"} - info.Direct = true - session, err = mgo.DialWithInfo(&info) - c.Assert(err, IsNil) - session.Close() + verifySyncedServers := func(session *mgo.Session, numServers int) { + // wait for the server(s) to be synced + for len(session.LiveServers()) != numServers { + c.Log("Waiting for cluster sync to finish...") + time.Sleep(5e8) + } - connectionUrl := "mongodb://localhost:40011,localhost:40021/?replicaSet=rs1" - session, err = mgo.Dial(connectionUrl) - c.Assert(err, IsNil) - session.Close() + // ensure none of the rs2 set members are communicated with + for _, addr := range session.LiveServers() { + for _, rs2Member := range rs2Members { + c.Assert(strings.HasSuffix(addr, rs2Member), Equals, false) + } + } + } - connectionUrl += "&connect=direct" - session, err = mgo.Dial(connectionUrl) - c.Assert(err, IsNil) - session.Close() -} + // only communication with rs1 members is expected + for _, seedList := range seedLists { + info := mgo.DialInfo{ + Addrs: seedList, + Timeout: 5 * time.Second, + ReplicaSetName: "rs1", + } -func (s *S) TestDialWithMixedSecondaries(c *C) { - // port 40012 is used by replica set rs1's secondary - // port 40022 is used by replica set rs2's secondary - info := mgo.DialInfo{ - Addrs: []string{"localhost:40012", "localhost:40022"}, - Timeout: 5 * time.Second, - ReplicaSetName: "rs1", - } + session, err := mgo.DialWithInfo(&info) + c.Assert(err, IsNil) + verifySyncedServers(session, 3) + session.Close() - session, err := mgo.DialWithInfo(&info) - c.Assert(err, IsNil) - session.Close() + info.Direct = true + session, err = mgo.DialWithInfo(&info) + c.Assert(err, IsNil) + verifySyncedServers(session, 1) + session.Close() - info.Direct = true - session, err = mgo.DialWithInfo(&info) - c.Assert(err, IsNil) - session.Close() + connectionUrl := fmt.Sprintf("mongodb://%v/?replicaSet=rs1", strings.Join(seedList, ",")) + session, err = mgo.Dial(connectionUrl) + c.Assert(err, IsNil) + verifySyncedServers(session, 3) + session.Close() - connectionUrl := "mongodb://localhost:40012,localhost:40022/?replicaSet=rs1" - session, err = mgo.Dial(connectionUrl) - c.Assert(err, IsNil) - session.Close() + connectionUrl += "&connect=direct" + session, err = mgo.Dial(connectionUrl) + c.Assert(err, IsNil) + verifySyncedServers(session, 1) + session.Close() + } - connectionUrl += "&connect=direct" - session, err = mgo.Dial(connectionUrl) - c.Assert(err, IsNil) - session.Close() } func (s *S) TestDialWithForeignSeeds(c *C) { From 84603f4983eeaf751409ad2620640060714f7a4d Mon Sep 17 00:00:00 2001 From: Wisdom Omuya Date: Mon, 15 Dec 2014 16:44:53 -0500 Subject: [PATCH 099/305] Remove extraneous server check --- cluster.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/cluster.go b/cluster.go index 320b5b986..4e32be7d0 100644 --- a/cluster.go +++ b/cluster.go @@ -202,8 +202,8 @@ func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerI } if cluster.setName != "" && result.SetName != cluster.setName { - log("SYNC Server ", addr, " not a member of replica set ", cluster.setName) - return nil, nil, errors.New(addr + " is not part of " + cluster.setName + " replica set") + log("SYNC Server ", addr, " is not a member of replica set ", cluster.setName) + return nil, nil, errors.New(addr + " is not a member of replica set " + cluster.setName) } if result.IsMaster { @@ -259,12 +259,6 @@ func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInf log("SYNC Discarding unknown server ", server.Addr, " due to partial sync.") return } - if cluster.setName != "" && info.SetName != cluster.setName { - log("SYNC Discarding ", server.Addr, " not part of ", cluster.setName, " replica set.") - cluster.Unlock() - server.Close() - return - } cluster.servers.Add(server) if info.Master { cluster.masters.Add(server) From e0d7e593b0ac44502220bf0de8fb5213b0b734e7 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 16 Dec 2014 16:29:51 -0200 Subject: [PATCH 100/305] Improve ReplicaSetName field doc, drop dup tests --- cluster.go | 5 +- cluster_test.go | 163 +----------------------------------------------- session.go | 9 ++- 3 files changed, 8 insertions(+), 169 deletions(-) diff --git a/cluster.go b/cluster.go index 4e32be7d0..bd5d9acc6 100644 --- a/cluster.go +++ b/cluster.go @@ -28,6 +28,7 @@ package mgo import ( "errors" + "fmt" "net" "sync" "time" @@ -202,8 +203,8 @@ func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerI } if cluster.setName != "" && result.SetName != cluster.setName { - log("SYNC Server ", addr, " is not a member of replica set ", cluster.setName) - return nil, nil, errors.New(addr + " is not a member of replica set " + cluster.setName) + logf("SYNC Server %s is not a member of replica set %q", addr, cluster.setName) + return nil, nil, fmt.Errorf("server %s is not a member of replica set %q", addr, cluster.setName) } if result.IsMaster { diff --git a/cluster_test.go b/cluster_test.go index 9435ef252..cef5438a1 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -907,120 +907,7 @@ func (s *S) TestSocketTimeoutOnInactiveSocket(c *C) { c.Assert(session.Ping(), IsNil) } -func (s *S) TestDialWithKnownReplPrimary(c *C) { - // port 40011 is used by replica set rs1's primary - info := mgo.DialInfo{ - Addrs: []string{"localhost:40011"}, - Timeout: 5 * time.Second, - ReplicaSetName: "rs1", - } - connectionUrl := "mongodb://localhost:40011/?replicaSet=rs1" - - runTest := func(session *mgo.Session, err error) { - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - coll.Insert(M{"a": 1, "b": 2}) - - result := struct{ Ok bool }{} - err = session.Run("getLastError", &result) - c.Assert(err, IsNil) - c.Assert(result.Ok, Equals, true) - } - - session, err := mgo.DialWithInfo(&info) - runTest(session, err) - session, err = mgo.Dial(connectionUrl) - runTest(session, err) -} - -func (s *S) TestDialWithKnownReplSecondary(c *C) { - // port 40012 is used by an rs1 replica set secondary - info := mgo.DialInfo{ - Addrs: []string{"localhost:40012"}, - Timeout: 5 * time.Second, - ReplicaSetName: "rs1", - } - connectionUrl := "mongodb://localhost:40012/?replicaSet=rs1" - - runTest := func(session *mgo.Session, err error) { - c.Assert(err, IsNil) - defer session.Close() - - coll := session.DB("mydb").C("mycoll") - coll.Insert(M{"a": 1, "b": 2}) - - result := struct{ Ok bool }{} - err = session.Run("getLastError", &result) - c.Assert(err, IsNil) - c.Assert(result.Ok, Equals, true) - } - - session, err := mgo.DialWithInfo(&info) - runTest(session, err) - session, err = mgo.Dial(connectionUrl) - runTest(session, err) -} - -func (s *S) TestDialWithForeignReplPrimary(c *C) { - if *fast { - c.Skip("-fast") - } - - // port 40021 is used by replica set rs2's primary - info := mgo.DialInfo{ - Addrs: []string{"localhost:40021"}, - Timeout: 5 * time.Second, - ReplicaSetName: "rs1", - } - _, err := mgo.DialWithInfo(&info) - c.Assert(err, ErrorMatches, "no reachable servers") - - info.Direct = true - _, err = mgo.DialWithInfo(&info) - c.Assert(err, ErrorMatches, "no reachable servers") - - connectionUrl := "mongodb://localhost:40021/?replicaSet=rs1" - _, err = mgo.Dial(connectionUrl) - c.Assert(err, ErrorMatches, "no reachable servers") - - connectionUrl += "&connect=direct" - _, err = mgo.Dial(connectionUrl) - c.Assert(err, ErrorMatches, "no reachable servers") -} - -func (s *S) TestDialWithForeignReplSecondary(c *C) { - if *fast { - c.Skip("-fast") - } - - // port 40022 is used by an rs2 replica set secondary - info := mgo.DialInfo{ - Addrs: []string{"localhost:40022"}, - Timeout: 5 * time.Second, - ReplicaSetName: "rs1", - } - _, err := mgo.DialWithInfo(&info) - c.Assert(err, ErrorMatches, "no reachable servers") - - info.Direct = true - _, err = mgo.DialWithInfo(&info) - c.Assert(err, ErrorMatches, "no reachable servers") - - connectionUrl := "mongodb://localhost:40022/?replicaSet=rs1" - _, err = mgo.Dial(connectionUrl) - c.Assert(err, ErrorMatches, "no reachable servers") - - connectionUrl += "&connect=direct" - _, err = mgo.Dial(connectionUrl) - c.Assert(err, ErrorMatches, "no reachable servers") -} - -func (s *S) TestDialWithMixedSeedList(c *C) { - // port 40011 is used by replica set rs1's primary - // port 40021 is used by replica set rs2's primary - +func (s *S) TestDialWithReplicaSetName(c *C) { seedLists := [][]string{ // rs1 primary and rs2 primary []string{"localhost:40011", "localhost:40021"}, @@ -1083,54 +970,6 @@ func (s *S) TestDialWithMixedSeedList(c *C) { } -func (s *S) TestDialWithForeignSeeds(c *C) { - if *fast { - c.Skip("-fast") - } - - // port 40021 is used by replica set rs2's primary - // port 40022 is used by replica set rs2's secondary - info := mgo.DialInfo{ - Addrs: []string{"localhost:40021", "localhost:40022"}, - Timeout: 5 * time.Second, - ReplicaSetName: "rs1", - } - - _, err := mgo.DialWithInfo(&info) - c.Assert(err, ErrorMatches, "no reachable servers") - - info.Direct = true - _, err = mgo.DialWithInfo(&info) - c.Assert(err, ErrorMatches, "no reachable servers") - - connectionUrl := "mongodb://localhost:40021,localhost:40022/?replicaSet=rs1" - _, err = mgo.Dial(connectionUrl) - c.Assert(err, ErrorMatches, "no reachable servers") - - connectionUrl += "&connect=direct" - _, err = mgo.Dial(connectionUrl) - c.Assert(err, ErrorMatches, "no reachable servers") -} - -func (s *S) TestDialWithUnknownSeeds(c *C) { - if *fast { - c.Skip("-fast") - } - - info := mgo.DialInfo{ - Addrs: []string{"localhost:54321", "localhost:12345"}, - Timeout: 5 * time.Second, - ReplicaSetName: "rs1", - } - - _, err := mgo.DialWithInfo(&info) - c.Assert(err, ErrorMatches, "no reachable servers") - - connectionUrl := "mongodb://localhost:54321,localhost:12345/?replicaSet=rs1" - _, err = mgo.Dial(connectionUrl) - c.Assert(err, ErrorMatches, "no reachable servers") -} - func (s *S) TestDirect(c *C) { session, err := mgo.Dial("localhost:40012?connect=direct") c.Assert(err, IsNil) diff --git a/session.go b/session.go index 0a749d412..5d559b7aa 100644 --- a/session.go +++ b/session.go @@ -301,11 +301,10 @@ type DialInfo struct { // authentication if Source is unset. Database string - // ReplicaSetName defines the name of the replica set to use for cluster - // discovery and monitoring. If specified, at least one of the seed servers - // must be a member of the ReplicaSetName replica set - non-members are - // ignored. If unspecified, the intended connection is assumed to be either - // with individual servers, or one or multiple mongos routers. + // ReplicaSetName, if specified, will prevent the obtained session from + // communicating with any server which is not part of a replica set + // with the given name. The default is to communicate with any server + // specified or discovered via the servers contacted. ReplicaSetName string // Source is the database used to establish credentials and privileges From f8aa918d8e7d9d01627d5c74d6e2452c5f2860a2 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 17 Dec 2014 18:54:36 -0200 Subject: [PATCH 101/305] Do not acquire sockets with iter.m locked. This prevents the deadlock reported by John Morales in MGO-57. --- session.go | 67 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 40 insertions(+), 27 deletions(-) diff --git a/session.go b/session.go index 5d559b7aa..4f6f2380e 100644 --- a/session.go +++ b/session.go @@ -2960,30 +2960,29 @@ func (iter *Iter) Err() error { // a *QueryError type. func (iter *Iter) Close() error { iter.m.Lock() - iter.killCursor() + cursorId := iter.op.cursorId + iter.op.cursorId = 0 err := iter.err iter.m.Unlock() - if err == ErrNotFound { - return nil - } - return err -} -func (iter *Iter) killCursor() error { - if iter.op.cursorId != 0 { - socket, err := iter.acquireSocket() - if err == nil { - // TODO Batch kills. - err = socket.Query(&killCursorsOp{[]int64{iter.op.cursorId}}) - socket.Release() - } - if err != nil && (iter.err == nil || iter.err == ErrNotFound) { - iter.err = err - } - iter.op.cursorId = 0 + if cursorId == 0 { return err } - return nil + socket, err := iter.acquireSocket() + if err == nil { + // TODO Batch kills. + err = socket.Query(&killCursorsOp{[]int64{cursorId}}) + socket.Release() + } + + iter.m.Lock() + if err != nil && (iter.err == nil || iter.err == ErrNotFound) { + iter.err = err + } else if iter.err != ErrNotFound { + err = iter.err + } + iter.m.Unlock() + return err } // Timeout returns true if Next returned false due to a timeout of @@ -3043,6 +3042,7 @@ func (iter *Iter) Next(result interface{}) bool { // Exhaust available data before reporting any errors. if docData, ok := iter.docData.Pop().([]byte); ok { + close := false if iter.limit > 0 { iter.limit-- if iter.limit == 0 { @@ -3051,19 +3051,20 @@ func (iter *Iter) Next(result interface{}) bool { panic(fmt.Errorf("data remains after limit exhausted: %d", iter.docData.Len())) } iter.err = ErrNotFound - if iter.killCursor() != nil { - iter.m.Unlock() - return false - } + close = true } } if iter.op.cursorId != 0 && iter.err == nil { - if iter.docsBeforeMore == 0 { + iter.docsBeforeMore-- + if iter.docsBeforeMore == -1 { iter.getMore() } - iter.docsBeforeMore-- // Goes negative. } iter.m.Unlock() + + if close { + iter.Close() + } err := bson.Unmarshal(docData, result) if err != nil { debugf("Iter %p document unmarshaling failed: %#v", iter, err) @@ -3188,6 +3189,12 @@ func (iter *Iter) For(result interface{}, f func() error) (err error) { return iter.Err() } +// acquireSocket acquires a socket from the same server that the iterator +// cursor was obtained from. +// +// WARNING: This method must not be called with iter.m locked. Acquiring the +// socket depends on the cluster sync loop, and the cluster sync loop might +// attempt actions which cause replyFunc to be called, inducing a deadlock. func (iter *Iter) acquireSocket() (*mongoSocket, error) { socket, err := iter.session.acquireSocket(true) if err != nil { @@ -3216,7 +3223,12 @@ func (iter *Iter) acquireSocket() (*mongoSocket, error) { } func (iter *Iter) getMore() { + // Increment now so that unlocking the iterator won't cause a + // different goroutine to get here as well. + iter.docsToReceive++ + iter.m.Unlock() socket, err := iter.acquireSocket() + iter.m.Lock() if err != nil { iter.err = err return @@ -3225,15 +3237,16 @@ func (iter *Iter) getMore() { debugf("Iter %p requesting more documents", iter) if iter.limit > 0 { - limit := iter.limit - int32(iter.docsToReceive) - int32(iter.docData.Len()) + // The -1 below accounts for the fact docsToReceive was incremented above. + limit := iter.limit - int32(iter.docsToReceive - 1) - int32(iter.docData.Len()) if limit < iter.op.limit { iter.op.limit = limit } } if err := socket.Query(&iter.op); err != nil { + iter.docsToReceive-- iter.err = err } - iter.docsToReceive++ } type countCmd struct { From c064533a8e1c44be97e7518592e1eac031effffb Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sun, 21 Dec 2014 17:24:53 -0200 Subject: [PATCH 102/305] testdb: make dropall.js resilient to "not master" --- testdb/dropall.js | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/testdb/dropall.js b/testdb/dropall.js index 232eca3c3..5b654f337 100644 --- a/testdb/dropall.js +++ b/testdb/dropall.js @@ -31,10 +31,20 @@ for (var i in ports) { } } var result = admin.runCommand({"listDatabases": 1}) - // Why is the command returning undefined!? - while (typeof result.databases == "undefined") { + for (var j = 0; j != 100; j++) { + if (typeof result.databases != "undefined" || result.errmsg == "not master") { + break + } result = admin.runCommand({"listDatabases": 1}) } + if (result.errmsg == "not master") { + continue + } + if (typeof result.databases == "undefined") { + print("Could not list databases. Command result:") + print(JSON.stringify(result)) + quit(12) + } var dbs = result.databases for (var j = 0; j != dbs.length; j++) { var db = dbs[j] From 5463a0a55997f4fbdf3c530c79cb371aa8a9af3e Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sun, 21 Dec 2014 21:18:39 -0200 Subject: [PATCH 103/305] Fix ErrNotFound bug just introduced on Iter change. --- session.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/session.go b/session.go index 4f6f2380e..7a9ab92ce 100644 --- a/session.go +++ b/session.go @@ -2964,8 +2964,10 @@ func (iter *Iter) Close() error { iter.op.cursorId = 0 err := iter.err iter.m.Unlock() - if cursorId == 0 { + if err == ErrNotFound { + return nil + } return err } socket, err := iter.acquireSocket() @@ -3238,7 +3240,7 @@ func (iter *Iter) getMore() { debugf("Iter %p requesting more documents", iter) if iter.limit > 0 { // The -1 below accounts for the fact docsToReceive was incremented above. - limit := iter.limit - int32(iter.docsToReceive - 1) - int32(iter.docData.Len()) + limit := iter.limit - int32(iter.docsToReceive-1) - int32(iter.docData.Len()) if limit < iter.op.limit { iter.op.limit = limit } From 513c45dcfd233502953f53ad8539fcd82c335a94 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sun, 21 Dec 2014 21:11:02 -0200 Subject: [PATCH 104/305] Trivial test fixes for 2.8.0-rc3. --- session_test.go | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/session_test.go b/session_test.go index 1aaf80a1c..d70647edf 100644 --- a/session_test.go +++ b/session_test.go @@ -1028,9 +1028,13 @@ func (s *S) TestQueryExplain(c *C) { query := coll.Find(nil).Limit(2) err = query.Explain(m) c.Assert(err, IsNil) - c.Assert(m["cursor"], Equals, "BasicCursor") - c.Assert(m["nscanned"], Equals, 2) - c.Assert(m["n"], Equals, 2) + if m["queryPlanner"] != nil { + c.Assert(m["executionStats"].(M)["totalDocsExamined"], Equals, 2) + } else { + c.Assert(m["cursor"], Equals, "BasicCursor") + c.Assert(m["nscanned"], Equals, 2) + c.Assert(m["n"], Equals, 2) + } n := 0 var result M @@ -1072,8 +1076,16 @@ func (s *S) TestQueryHint(c *C) { m := M{} err = coll.Find(nil).Hint("a").Explain(m) c.Assert(err, IsNil) - c.Assert(m["indexBounds"], NotNil) - c.Assert(m["indexBounds"].(M)["a"], NotNil) + + if m["queryPlanner"] != nil { + m = m["queryPlanner"].(M) + m = m["winningPlan"].(M) + m = m["inputStage"].(M) + c.Assert(m["indexName"], Equals, "a_1") + } else { + c.Assert(m["indexBounds"], NotNil) + c.Assert(m["indexBounds"].(M)["a"], NotNil) + } } func (s *S) TestFindOneNotFound(c *C) { @@ -2401,7 +2413,7 @@ func (s *S) TestSafeParameters(c *C) { // Tweak the safety parameters to something unachievable. session.SetSafe(&mgo.Safe{W: 4, WTimeout: 100}) err = coll.Insert(M{"_id": 1}) - c.Assert(err, ErrorMatches, "timeout|timed out waiting for slaves") + c.Assert(err, ErrorMatches, "timeout|timed out waiting for slaves|Not enough data-bearing nodes") if !s.versionAtLeast(2, 6) { // 2.6 turned it into a query error. c.Assert(err.(*mgo.LastError).WTimeout, Equals, true) From baa44cad7b0b9a4386b5999524a0795d58897243 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sun, 21 Dec 2014 21:21:25 -0200 Subject: [PATCH 105/305] Add Collection.NewIter method. This is a refactoring of the first batch + cursor logic that is used in the Pipe and Repair methods, so it may be used both internally in other places and also externally when people depend on functionality not yet implemented in the driver. --- session.go | 132 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 75 insertions(+), 57 deletions(-) diff --git a/session.go b/session.go index 7a9ab92ce..874cafae1 100644 --- a/session.go +++ b/session.go @@ -1855,13 +1855,6 @@ func (c *Collection) Repair() *Iter { cloned := session.Clone() cloned.SetMode(Strong, false) defer cloned.Close() - c = c.With(cloned) - - iter := &Iter{ - session: session, - timeout: -1, - } - iter.gotReply.L = &iter.m var result struct { Cursor struct { @@ -1874,28 +1867,10 @@ func (c *Collection) Repair() *Iter { RepairCursor: c.Name, Cursor: &repairCmdCursor{batchSize}, } - iter.err = c.Database.Run(cmd, &result) - if iter.err != nil { - return iter - } - docs := result.Cursor.FirstBatch - for i := range docs { - iter.docData.Push(docs[i].Data) - } - if result.Cursor.Id != 0 { - socket, err := cloned.acquireSocket(true) - if err != nil { - // Cloned session is in strong mode, and the query - // above succeeded. Should have a reserved socket. - panic("internal error: " + err.Error()) - } - iter.server = socket.Server() - socket.Release() - iter.op.cursorId = result.Cursor.Id - iter.op.collection = c.FullName - iter.op.replyFunc = iter.replyFunc() - } - return iter + + clonedc := c.With(cloned) + err := clonedc.Database.Run(cmd, &result) + return clonedc.NewIter(session, result.Cursor.FirstBatch, result.Cursor.Id, err) } // FindId is a convenience helper equivalent to: @@ -1957,7 +1932,6 @@ func (c *Collection) Pipe(pipeline interface{}) *Pipe { // Iter executes the pipeline and returns an iterator capable of going // over all the generated results. func (p *Pipe) Iter() *Iter { - // Clone session and set it to strong mode so that the server // used for the query may be safely obtained afterwards, if // necessary for iteration when a cursor is received. @@ -1966,12 +1940,6 @@ func (p *Pipe) Iter() *Iter { defer cloned.Close() c := p.collection.With(cloned) - iter := &Iter{ - session: p.session, - timeout: -1, - } - iter.gotReply.L = &iter.m - var result struct { // 2.4, no cursors. Result []bson.Raw @@ -1989,34 +1957,84 @@ func (p *Pipe) Iter() *Iter { AllowDisk: p.allowDisk, Cursor: &pipeCmdCursor{p.batchSize}, } - iter.err = c.Database.Run(cmd, &result) - if e, ok := iter.err.(*QueryError); ok && e.Message == `unrecognized field "cursor` { + err := c.Database.Run(cmd, &result) + if e, ok := err.(*QueryError); ok && e.Message == `unrecognized field "cursor` { cmd.Cursor = nil cmd.AllowDisk = false - iter.err = c.Database.Run(cmd, &result) + err = c.Database.Run(cmd, &result) + } + firstBatch := result.Result + if firstBatch == nil { + firstBatch = result.Cursor.FirstBatch + } + return c.NewIter(p.session, firstBatch, result.Cursor.Id, err) +} + +// NewIter returns a newly created iterator with the provided parameters. +// Using this method is not recommended unless the desired functionality +// is not yet exposed via a more convenient interface (Find, Pipe, etc). +// +// The optional session parameter associates the lifetime of the returned +// iterator to an arbitrary session. If nil, the iterator will be bound to +// c's session. +// +// Documents in firstBatch will be individually provided by the returned +// iterator before documents from cursorId are made available. If cursorId +// is zero, only the documents in firstBatch are provided. +// +// If err is not nil, the iterator's Err method will report it after +// exhausting documents in firstBatch. +// +// NewIter must be called right after the cursor id is obtained, and must not +// be called on a collection in Eventual mode, because the cursor id is +// associated with the specific server that returned it. The session parameter +// may be in any mode or state, though. +// +func (c *Collection) NewIter(session *Session, firstBatch []bson.Raw, cursorId int64, err error) *Iter { + var server *mongoServer + csession := c.Database.Session + csession.m.RLock() + socket := csession.masterSocket + if socket == nil { + socket = csession.slaveSocket } - if iter.err != nil { - return iter + if socket != nil { + server = socket.Server() } - docs := result.Result - if docs == nil { - docs = result.Cursor.FirstBatch + csession.m.RUnlock() + + if server == nil { + if csession.Mode() == Eventual { + panic("Collection.NewIter called in Eventual mode") + } + panic("Collection.NewIter called on a fresh session with no associated server") } - for i := range docs { - iter.docData.Push(docs[i].Data) + + if session == nil { + session = csession } - if result.Cursor.Id != 0 { - socket, err := cloned.acquireSocket(true) - if err != nil { - // Cloned session is in strong mode, and the query - // above succeeded. Should have a reserved socket. - panic("internal error: " + err.Error()) + + iter := &Iter{ + session: session, + server: server, + timeout: -1, + err: err, + } + iter.gotReply.L = &iter.m + for _, doc := range firstBatch { + iter.docData.Push(doc.Data) + } + if cursorId != 0 { + socket, err := c.Database.Session.acquireSocket(true) + if err == nil { + iter.server = socket.Server() + socket.Release() + iter.op.cursorId = cursorId + iter.op.collection = c.FullName + iter.op.replyFunc = iter.replyFunc() + } else if iter.err == nil { + iter.err = err } - iter.server = socket.Server() - socket.Release() - iter.op.cursorId = result.Cursor.Id - iter.op.collection = c.FullName - iter.op.replyFunc = iter.replyFunc() } return iter } From bbabc258bdacb43d19ef312ed88beed8dc13f25c Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sun, 21 Dec 2014 21:11:41 -0200 Subject: [PATCH 106/305] Support cursors on listIndexes and listCollections. --- session.go | 82 +++++++++++++++++++++++++++++++++++------------------- 1 file changed, 54 insertions(+), 28 deletions(-) diff --git a/session.go b/session.go index 874cafae1..ce80adb76 100644 --- a/session.go +++ b/session.go @@ -1230,32 +1230,37 @@ func (c *Collection) DropIndex(key ...string) error { // See the EnsureIndex method for more details on indexes. func (c *Collection) Indexes() (indexes []Index, err error) { // Try with a command. - var cmdResult struct { - Indexes []indexSpec - } - err = c.Database.Run(bson.D{{"listIndexes", c.Name}}, &cmdResult) - if err == nil { - for _, spec := range cmdResult.Indexes { - indexes = append(indexes, indexFromSpec(spec)) + var result struct { + Indexes []bson.Raw + + Cursor struct { + FirstBatch []bson.Raw "firstBatch" + NS string + Id int64 } - sort.Sort(indexSlice(indexes)) - return indexes, nil } - if err != nil && !isNoCmd(err) { + var iter *Iter + err = c.Database.Run(bson.D{{"listIndexes", c.Name}, {"cursor", bson.D{}}}, &result) + if err == nil { + firstBatch := result.Indexes + if firstBatch == nil { + firstBatch = result.Cursor.FirstBatch + } + iter = c.Database.C(result.Cursor.NS).NewIter(nil, firstBatch, result.Cursor.Id, nil) + } else if isNoCmd(err) { + // Command not yet supported. Query the database instead. + iter = c.Database.C("system.indexes").Find(bson.M{"ns": c.FullName}).Iter() + } else { return nil, err } - // Command not yet supported. Query the database instead. - query := c.Database.C("system.indexes").Find(bson.M{"ns": c.FullName}) - iter := query.Sort("name").Iter() - for { - var spec indexSpec - if !iter.Next(&spec) { - break - } + var spec indexSpec + for iter.Next(&spec) { indexes = append(indexes, indexFromSpec(spec)) } - err = iter.Close() + if err = iter.Close(); err != nil { + return nil, err + } sort.Sort(indexSlice(indexes)) return indexes, nil } @@ -2755,17 +2760,38 @@ func (s *Session) FindRef(ref *DBRef) *Query { // CollectionNames returns the collection names present in the db database. func (db *Database) CollectionNames() (names []string, err error) { + // Clone session and set it to strong mode so that the server + // used for the query may be safely obtained afterwards, if + // necessary for iteration when a cursor is received. + session := db.Session + cloned := session.Clone() + cloned.SetMode(Strong, false) + defer cloned.Close() + // Try with a command. - var cmdResult struct { - Collections []struct { - Name string + var result struct { + Collections []bson.Raw + + Cursor struct { + FirstBatch []bson.Raw "firstBatch" + NS string + Id int64 } } - err = db.Run(bson.D{{"listCollections", 1}}, &cmdResult) + err = db.Run(bson.D{{"listCollections", 1}, {"cursor", bson.D{}}}, &result) if err == nil { - for _, coll := range cmdResult.Collections { + firstBatch := result.Collections + if firstBatch == nil { + firstBatch = result.Cursor.FirstBatch + } + iter := db.C(result.Cursor.NS).NewIter(nil, firstBatch, result.Cursor.Id, nil) + var coll struct{ Name string } + for iter.Next(&coll) { names = append(names, coll.Name) } + if err := iter.Close(); err != nil { + return nil, err + } sort.Strings(names) return names, err } @@ -2776,10 +2802,10 @@ func (db *Database) CollectionNames() (names []string, err error) { // Command not yet supported. Query the database instead. nameIndex := len(db.Name) + 1 iter := db.C("system.namespaces").Find(nil).Iter() - var result *struct{ Name string } - for iter.Next(&result) { - if strings.Index(result.Name, "$") < 0 || strings.Index(result.Name, ".oplog.$") >= 0 { - names = append(names, result.Name[nameIndex:]) + var coll struct{ Name string } + for iter.Next(&coll) { + if strings.Index(coll.Name, "$") < 0 || strings.Index(coll.Name, ".oplog.$") >= 0 { + names = append(names, coll.Name[nameIndex:]) } } if err := iter.Close(); err != nil { From 6bc18a881e53b57a25d47acb4a04c8a1770e5ce2 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 22 Dec 2014 00:35:49 -0200 Subject: [PATCH 107/305] More tweaks and tests for NewIter. Also fixes result.Cursor.NS handling, as mentioned by Jeff Yemin. --- session.go | 45 +++++++++++++++++++++++++++------------------ session_test.go | 6 ++++++ 2 files changed, 33 insertions(+), 18 deletions(-) diff --git a/session.go b/session.go index ce80adb76..e10c3833a 100644 --- a/session.go +++ b/session.go @@ -1229,6 +1229,11 @@ func (c *Collection) DropIndex(key ...string) error { // // See the EnsureIndex method for more details on indexes. func (c *Collection) Indexes() (indexes []Index, err error) { + session := c.Database.Session + session.m.RLock() + batchSize := int(session.queryConfig.op.limit) + session.m.RUnlock() + // Try with a command. var result struct { Indexes []bson.Raw @@ -1240,13 +1245,17 @@ func (c *Collection) Indexes() (indexes []Index, err error) { } } var iter *Iter - err = c.Database.Run(bson.D{{"listIndexes", c.Name}, {"cursor", bson.D{}}}, &result) + err = c.Database.Run(bson.D{{"listIndexes", c.Name}, {"cursor", bson.D{{"batchSize", batchSize}}}}, &result) if err == nil { firstBatch := result.Indexes if firstBatch == nil { firstBatch = result.Cursor.FirstBatch } - iter = c.Database.C(result.Cursor.NS).NewIter(nil, firstBatch, result.Cursor.Id, nil) + ns := strings.SplitN(result.Cursor.NS, ".", 2) + if len(ns) < 2 { + panic("server returned invalid cursor.ns result on listIndexes") + } + iter = session.DB(ns[0]).C(ns[1]).NewIter(nil, firstBatch, result.Cursor.Id, nil) } else if isNoCmd(err) { // Command not yet supported. Query the database instead. iter = c.Database.C("system.indexes").Find(bson.M{"ns": c.FullName}).Iter() @@ -1854,9 +1863,9 @@ func (c *Collection) Repair() *Iter { // used for the query may be safely obtained afterwards, if // necessary for iteration when a cursor is received. session := c.Database.Session - session.m.Lock() + session.m.RLock() batchSize := int(session.queryConfig.op.limit) - session.m.Unlock() + session.m.RUnlock() cloned := session.Clone() cloned.SetMode(Strong, false) defer cloned.Close() @@ -1923,9 +1932,9 @@ type pipeCmdCursor struct { // func (c *Collection) Pipe(pipeline interface{}) *Pipe { session := c.Database.Session - session.m.Lock() + session.m.RLock() batchSize := int(session.queryConfig.op.limit) - session.m.Unlock() + session.m.RUnlock() return &Pipe{ session: session, collection: c, @@ -2030,16 +2039,9 @@ func (c *Collection) NewIter(session *Session, firstBatch []bson.Raw, cursorId i iter.docData.Push(doc.Data) } if cursorId != 0 { - socket, err := c.Database.Session.acquireSocket(true) - if err == nil { - iter.server = socket.Server() - socket.Release() - iter.op.cursorId = cursorId - iter.op.collection = c.FullName - iter.op.replyFunc = iter.replyFunc() - } else if iter.err == nil { - iter.err = err - } + iter.op.cursorId = cursorId + iter.op.collection = c.FullName + iter.op.replyFunc = iter.replyFunc() } return iter } @@ -2764,6 +2766,9 @@ func (db *Database) CollectionNames() (names []string, err error) { // used for the query may be safely obtained afterwards, if // necessary for iteration when a cursor is received. session := db.Session + session.m.RLock() + batchSize := int(session.queryConfig.op.limit) + session.m.RUnlock() cloned := session.Clone() cloned.SetMode(Strong, false) defer cloned.Close() @@ -2778,13 +2783,17 @@ func (db *Database) CollectionNames() (names []string, err error) { Id int64 } } - err = db.Run(bson.D{{"listCollections", 1}, {"cursor", bson.D{}}}, &result) + err = db.Run(bson.D{{"listCollections", 1}, {"cursor", bson.D{{"batchSize", batchSize}}}}, &result) if err == nil { firstBatch := result.Collections if firstBatch == nil { firstBatch = result.Cursor.FirstBatch } - iter := db.C(result.Cursor.NS).NewIter(nil, firstBatch, result.Cursor.Id, nil) + ns := strings.SplitN(result.Cursor.NS, ".", 2) + if len(ns) < 2 { + panic("server returned invalid cursor.ns result on listCollections") + } + iter := session.DB(ns[0]).C(ns[1]).NewIter(nil, firstBatch, result.Cursor.Id, nil) var coll struct{ Name string } for iter.Next(&coll) { names = append(names, coll.Name) diff --git a/session_test.go b/session_test.go index d70647edf..d1486738b 100644 --- a/session_test.go +++ b/session_test.go @@ -282,6 +282,9 @@ func (s *S) TestDatabaseAndCollectionNames(c *C) { c.Assert(names, DeepEquals, []string{"db1", "db2", "local"}) } + // Try to exercise cursor logic. 2.8.0-rc3 still ignores this. + session.SetBatch(2) + names, err = db1.CollectionNames() c.Assert(err, IsNil) c.Assert(names, DeepEquals, []string{"col1", "col2", "system.indexes"}) @@ -2807,6 +2810,9 @@ func (s *S) TestEnsureIndexGetIndexes(c *C) { err = coll.EnsureIndexKey("$2d:d") c.Assert(err, IsNil) + // Try to exercise cursor logic. 2.8.0-rc3 still ignores this. + session.SetBatch(2) + indexes, err := coll.Indexes() c.Assert(err, IsNil) From c735aed8f9ef6cce43c34b967f586da6906220ad Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 31 Dec 2014 14:30:25 -0200 Subject: [PATCH 108/305] Use cloned session properly in CollectionNames. Issue reported by Daniel Gottlieb. --- session.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/session.go b/session.go index e10c3833a..034265459 100644 --- a/session.go +++ b/session.go @@ -1867,7 +1867,7 @@ func (c *Collection) Repair() *Iter { batchSize := int(session.queryConfig.op.limit) session.m.RUnlock() cloned := session.Clone() - cloned.SetMode(Strong, false) + cloned.SetMode(Monotonic, false) defer cloned.Close() var result struct { @@ -1950,7 +1950,7 @@ func (p *Pipe) Iter() *Iter { // used for the query may be safely obtained afterwards, if // necessary for iteration when a cursor is received. cloned := p.session.Clone() - cloned.SetMode(Strong, false) + cloned.SetMode(Monotonic, false) defer cloned.Close() c := p.collection.With(cloned) @@ -2770,7 +2770,7 @@ func (db *Database) CollectionNames() (names []string, err error) { batchSize := int(session.queryConfig.op.limit) session.m.RUnlock() cloned := session.Clone() - cloned.SetMode(Strong, false) + cloned.SetMode(Monotonic, false) defer cloned.Close() // Try with a command. @@ -2783,7 +2783,7 @@ func (db *Database) CollectionNames() (names []string, err error) { Id int64 } } - err = db.Run(bson.D{{"listCollections", 1}, {"cursor", bson.D{{"batchSize", batchSize}}}}, &result) + err = db.With(cloned).Run(bson.D{{"listCollections", 1}, {"cursor", bson.D{{"batchSize", batchSize}}}}, &result) if err == nil { firstBatch := result.Collections if firstBatch == nil { @@ -2793,7 +2793,7 @@ func (db *Database) CollectionNames() (names []string, err error) { if len(ns) < 2 { panic("server returned invalid cursor.ns result on listCollections") } - iter := session.DB(ns[0]).C(ns[1]).NewIter(nil, firstBatch, result.Cursor.Id, nil) + iter := cloned.DB(ns[0]).C(ns[1]).NewIter(session, firstBatch, result.Cursor.Id, nil) var coll struct{ Name string } for iter.Next(&coll) { names = append(names, coll.Name) From 7d75669a2de30ca1d0b310da148cb04e0096e6a4 Mon Sep 17 00:00:00 2001 From: Michael Christenson II Date: Sun, 4 Jan 2015 21:30:22 -0700 Subject: [PATCH 109/305] Fixed small documentation typo on GridFs.Create --- gridfs.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gridfs.go b/gridfs.go index 3439462f7..147530dd6 100644 --- a/gridfs.go +++ b/gridfs.go @@ -131,7 +131,7 @@ func finalizeFile(file *GridFile) { // } // file, err := db.GridFS("fs").Create("myfile.txt") // check(err) -// n, err := file.Write([]byte("Hello world!") +// n, err := file.Write([]byte("Hello world!")) // check(err) // err = file.Close() // check(err) From d901aa69c0c2711f1774ba54f4b8716204c41a59 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 13 Jan 2015 18:56:33 -0200 Subject: [PATCH 110/305] Import mgo.v2 from the unstable branch. --- auth.go | 8 ++++---- auth_test.go | 3 +-- bson/bson_test.go | 2 +- bulk_test.go | 2 +- cluster.go | 2 +- cluster_test.go | 4 ++-- gridfs.go | 2 +- gridfs_test.go | 4 ++-- internal/scram/scram_test.go | 2 +- saslimpl.go | 2 +- server.go | 2 +- session.go | 2 +- session_test.go | 4 ++-- socket.go | 2 +- suite_test.go | 4 ++-- txn/debug.go | 2 +- txn/flusher.go | 4 ++-- txn/mgo_test.go | 2 +- txn/sim_test.go | 9 ++++----- txn/tarjan.go | 2 +- txn/tarjan_test.go | 2 +- txn/txn.go | 4 ++-- txn/txn_test.go | 6 +++--- 23 files changed, 37 insertions(+), 39 deletions(-) diff --git a/auth.go b/auth.go index 1761d0d6b..7787ea198 100644 --- a/auth.go +++ b/auth.go @@ -34,8 +34,8 @@ import ( "fmt" "sync" - "gopkg.in/mgo.v2/bson" - "gopkg.in/mgo.v2/internal/scram" + "gopkg.in/mgo.v2-unstable/bson" + "gopkg.in/mgo.v2-unstable/internal/scram" ) type authCmd struct { @@ -361,8 +361,8 @@ func saslNewScram(cred Credential) *saslScram { } type saslScram struct { - cred Credential - client *scram.Client + cred Credential + client *scram.Client } func (s *saslScram) Close() {} diff --git a/auth_test.go b/auth_test.go index a9c0b27f8..e0216abec 100644 --- a/auth_test.go +++ b/auth_test.go @@ -39,7 +39,7 @@ import ( "time" . "gopkg.in/check.v1" - "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2-unstable" ) func (s *S) TestAuthLoginDatabase(c *C) { @@ -1022,7 +1022,6 @@ var ( winKerberosPasswordEnv = "MGO_KERBEROS_PASSWORD" ) - // Kerberos has its own suite because it talks to a remote server // that is prepared to authenticate against a kerberos deployment. type KerberosSuite struct{} diff --git a/bson/bson_test.go b/bson/bson_test.go index 0606c4905..7d4079d5c 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -37,7 +37,7 @@ import ( "time" . "gopkg.in/check.v1" - "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2-unstable/bson" ) func TestAll(t *testing.T) { diff --git a/bulk_test.go b/bulk_test.go index 24af1b102..d35c42cad 100644 --- a/bulk_test.go +++ b/bulk_test.go @@ -28,7 +28,7 @@ package mgo_test import ( . "gopkg.in/check.v1" - "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2-unstable" ) func (s *S) TestBulkInsert(c *C) { diff --git a/cluster.go b/cluster.go index bd5d9acc6..2db8a6d37 100644 --- a/cluster.go +++ b/cluster.go @@ -33,7 +33,7 @@ import ( "sync" "time" - "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2-unstable/bson" ) // --------------------------------------------------------------------------- diff --git a/cluster_test.go b/cluster_test.go index cef5438a1..cd1366957 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -35,8 +35,8 @@ import ( "time" . "gopkg.in/check.v1" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2-unstable" + "gopkg.in/mgo.v2-unstable/bson" ) func (s *S) TestNewSession(c *C) { diff --git a/gridfs.go b/gridfs.go index 3439462f7..5ed6a77e4 100644 --- a/gridfs.go +++ b/gridfs.go @@ -36,7 +36,7 @@ import ( "sync" "time" - "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2-unstable/bson" ) type GridFS struct { diff --git a/gridfs_test.go b/gridfs_test.go index 9afd2454c..655fbc4c8 100644 --- a/gridfs_test.go +++ b/gridfs_test.go @@ -32,8 +32,8 @@ import ( "time" . "gopkg.in/check.v1" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2-unstable" + "gopkg.in/mgo.v2-unstable/bson" ) func (s *S) TestGridFSCreate(c *C) { diff --git a/internal/scram/scram_test.go b/internal/scram/scram_test.go index 9c20fdfc4..ff4abe941 100644 --- a/internal/scram/scram_test.go +++ b/internal/scram/scram_test.go @@ -5,7 +5,7 @@ import ( "testing" . "gopkg.in/check.v1" - "gopkg.in/mgo.v2/internal/scram" + "gopkg.in/mgo.v2-unstable/internal/scram" "strings" ) diff --git a/saslimpl.go b/saslimpl.go index 58c0891c6..49c94091d 100644 --- a/saslimpl.go +++ b/saslimpl.go @@ -3,7 +3,7 @@ package mgo import ( - "gopkg.in/mgo.v2/sasl" + "gopkg.in/mgo.v2-unstable/sasl" ) func saslNew(cred Credential, host string) (saslStepper, error) { diff --git a/server.go b/server.go index d5086a290..54fe233eb 100644 --- a/server.go +++ b/server.go @@ -33,7 +33,7 @@ import ( "sync" "time" - "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2-unstable/bson" ) // --------------------------------------------------------------------------- diff --git a/session.go b/session.go index 034265459..c4513443a 100644 --- a/session.go +++ b/session.go @@ -41,7 +41,7 @@ import ( "sync" "time" - "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2-unstable/bson" ) type mode int diff --git a/session_test.go b/session_test.go index d1486738b..d92def27a 100644 --- a/session_test.go +++ b/session_test.go @@ -38,8 +38,8 @@ import ( "time" . "gopkg.in/check.v1" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2-unstable" + "gopkg.in/mgo.v2-unstable/bson" ) func (s *S) TestRunString(c *C) { diff --git a/socket.go b/socket.go index 1fb0dff77..53848c7f5 100644 --- a/socket.go +++ b/socket.go @@ -32,7 +32,7 @@ import ( "sync" "time" - "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2-unstable/bson" ) type replyFunc func(err error, reply *replyOp, docNum int, docData []byte) diff --git a/suite_test.go b/suite_test.go index 334407e31..962a6c1cb 100644 --- a/suite_test.go +++ b/suite_test.go @@ -38,8 +38,8 @@ import ( "time" . "gopkg.in/check.v1" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2-unstable" + "gopkg.in/mgo.v2-unstable/bson" ) var fast = flag.Bool("fast", false, "Skip slow tests") diff --git a/txn/debug.go b/txn/debug.go index 8224bb313..e3300c433 100644 --- a/txn/debug.go +++ b/txn/debug.go @@ -6,7 +6,7 @@ import ( "sort" "sync/atomic" - "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2-unstable/bson" ) var ( diff --git a/txn/flusher.go b/txn/flusher.go index 25b2f0319..86e711037 100644 --- a/txn/flusher.go +++ b/txn/flusher.go @@ -3,8 +3,8 @@ package txn import ( "fmt" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2-unstable" + "gopkg.in/mgo.v2-unstable/bson" ) func flush(r *Runner, t *transaction) error { diff --git a/txn/mgo_test.go b/txn/mgo_test.go index 5abc47335..3e7073a3a 100644 --- a/txn/mgo_test.go +++ b/txn/mgo_test.go @@ -2,7 +2,7 @@ package txn_test import ( "bytes" - "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2-unstable" . "gopkg.in/check.v1" "os/exec" "time" diff --git a/txn/sim_test.go b/txn/sim_test.go index 35f7048cc..9fcfb140c 100644 --- a/txn/sim_test.go +++ b/txn/sim_test.go @@ -2,9 +2,9 @@ package txn_test import ( "flag" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - "gopkg.in/mgo.v2/txn" + "gopkg.in/mgo.v2-unstable" + "gopkg.in/mgo.v2-unstable/bson" + "gopkg.in/mgo.v2-unstable/txn" . "gopkg.in/check.v1" "math/rand" "time" @@ -149,7 +149,6 @@ func (s *S) TestSimChangeLog(c *C) { }) } - type balanceChange struct { id bson.ObjectId origin int @@ -184,7 +183,7 @@ func simulate(c *C, params params) { tclog := db.C("tc.log") if params.changelog { info := mgo.CollectionInfo{ - Capped: true, + Capped: true, MaxBytes: 1000000, } err := tclog.Create(&info) diff --git a/txn/tarjan.go b/txn/tarjan.go index e56541c9b..d5ae94690 100644 --- a/txn/tarjan.go +++ b/txn/tarjan.go @@ -1,7 +1,7 @@ package txn import ( - "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2-unstable/bson" "sort" ) diff --git a/txn/tarjan_test.go b/txn/tarjan_test.go index 79745c39b..b5633bc5e 100644 --- a/txn/tarjan_test.go +++ b/txn/tarjan_test.go @@ -2,7 +2,7 @@ package txn import ( "fmt" - "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2-unstable/bson" . "gopkg.in/check.v1" ) diff --git a/txn/txn.go b/txn/txn.go index 5809e2d3a..d81c1f16f 100644 --- a/txn/txn.go +++ b/txn/txn.go @@ -14,8 +14,8 @@ import ( "strings" "sync" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2-unstable" + "gopkg.in/mgo.v2-unstable/bson" crand "crypto/rand" mrand "math/rand" diff --git a/txn/txn_test.go b/txn/txn_test.go index 1e396eadc..e52b119d3 100644 --- a/txn/txn_test.go +++ b/txn/txn_test.go @@ -6,9 +6,9 @@ import ( "time" . "gopkg.in/check.v1" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - "gopkg.in/mgo.v2/txn" + "gopkg.in/mgo.v2-unstable" + "gopkg.in/mgo.v2-unstable/bson" + "gopkg.in/mgo.v2-unstable/txn" ) func TestAll(t *testing.T) { From fbdf8c9289f58deb18e6869939530aaa93452596 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 20 Jan 2015 16:44:36 -0200 Subject: [PATCH 111/305] Only look at public fields on omitempty in structs. --- bson/bson_test.go | 3 +++ bson/encode.go | 8 ++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/bson/bson_test.go b/bson/bson_test.go index 7d4079d5c..899c013e9 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -1296,6 +1296,9 @@ var oneWayCrossItems = []crossTypeItem{ // Would get decoded into a int32 too in the opposite direction. {&shortIface{int64(1) << 30}, map[string]interface{}{"v": 1 << 30}}, + + // Ensure omitempty on struct with private fields works properly. + {&struct{ V struct { v time.Time } ",omitempty" }{}, map[string]interface{}{}}, } func testCrossPair(c *C, dump interface{}, load interface{}) { diff --git a/bson/encode.go b/bson/encode.go index 03a15484d..81a13add8 100644 --- a/bson/encode.go +++ b/bson/encode.go @@ -180,10 +180,14 @@ func isZero(v reflect.Value) bool { case reflect.Bool: return !v.Bool() case reflect.Struct: - if v.Type() == typeTime { + vt := v.Type() + if vt == typeTime { return v.Interface().(time.Time).IsZero() } - for i := v.NumField()-1; i >= 0; i-- { + for i := 0; i < v.NumField(); i++ { + if vt.Field(i).PkgPath != "" { + continue // Private field + } if !isZero(v.Field(i)) { return false } From a6441a71f179567e196ef445fca1ada779c1d34f Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 20 Jan 2015 19:41:54 -0200 Subject: [PATCH 112/305] More improvements around NewIter. --- session.go | 44 +++++++++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/session.go b/session.go index c4513443a..dbbe0922b 100644 --- a/session.go +++ b/session.go @@ -1229,10 +1229,15 @@ func (c *Collection) DropIndex(key ...string) error { // // See the EnsureIndex method for more details on indexes. func (c *Collection) Indexes() (indexes []Index, err error) { + // Clone session and set it to Monotonic mode so that the server + // used for the query may be safely obtained afterwards, if + // necessary for iteration when a cursor is received. session := c.Database.Session - session.m.RLock() - batchSize := int(session.queryConfig.op.limit) - session.m.RUnlock() + cloned := session.Clone() + cloned.SetMode(Monotonic, false) + defer cloned.Close() + + batchSize := int(cloned.queryConfig.op.limit) // Try with a command. var result struct { @@ -1245,7 +1250,7 @@ func (c *Collection) Indexes() (indexes []Index, err error) { } } var iter *Iter - err = c.Database.Run(bson.D{{"listIndexes", c.Name}, {"cursor", bson.D{{"batchSize", batchSize}}}}, &result) + err = c.Database.With(cloned).Run(bson.D{{"listIndexes", c.Name}, {"cursor", bson.D{{"batchSize", batchSize}}}}, &result) if err == nil { firstBatch := result.Indexes if firstBatch == nil { @@ -1253,9 +1258,10 @@ func (c *Collection) Indexes() (indexes []Index, err error) { } ns := strings.SplitN(result.Cursor.NS, ".", 2) if len(ns) < 2 { - panic("server returned invalid cursor.ns result on listIndexes") + iter = c.With(cloned).NewIter(nil, firstBatch, result.Cursor.Id, nil) + } else { + iter = cloned.DB(ns[0]).C(ns[1]).NewIter(nil, firstBatch, result.Cursor.Id, nil) } - iter = session.DB(ns[0]).C(ns[1]).NewIter(nil, firstBatch, result.Cursor.Id, nil) } else if isNoCmd(err) { // Command not yet supported. Query the database instead. iter = c.Database.C("system.indexes").Find(bson.M{"ns": c.FullName}).Iter() @@ -1859,17 +1865,16 @@ type repairCmdCursor struct { // // Repair is supported in MongoDB 2.7.8 and later. func (c *Collection) Repair() *Iter { - // Clone session and set it to strong mode so that the server + // Clone session and set it to Monotonic mode so that the server // used for the query may be safely obtained afterwards, if // necessary for iteration when a cursor is received. session := c.Database.Session - session.m.RLock() - batchSize := int(session.queryConfig.op.limit) - session.m.RUnlock() cloned := session.Clone() cloned.SetMode(Monotonic, false) defer cloned.Close() + batchSize := int(cloned.queryConfig.op.limit) + var result struct { Cursor struct { FirstBatch []bson.Raw "firstBatch" @@ -1946,7 +1951,7 @@ func (c *Collection) Pipe(pipeline interface{}) *Pipe { // Iter executes the pipeline and returns an iterator capable of going // over all the generated results. func (p *Pipe) Iter() *Iter { - // Clone session and set it to strong mode so that the server + // Clone session and set it to Monotonic mode so that the server // used for the query may be safely obtained afterwards, if // necessary for iteration when a cursor is received. cloned := p.session.Clone() @@ -2001,8 +2006,8 @@ func (p *Pipe) Iter() *Iter { // // NewIter must be called right after the cursor id is obtained, and must not // be called on a collection in Eventual mode, because the cursor id is -// associated with the specific server that returned it. The session parameter -// may be in any mode or state, though. +// associated with the specific server that returned it. The provided session +// parameter may be in any mode or state, though. // func (c *Collection) NewIter(session *Session, firstBatch []bson.Raw, cursorId int64, err error) *Iter { var server *mongoServer @@ -2762,17 +2767,16 @@ func (s *Session) FindRef(ref *DBRef) *Query { // CollectionNames returns the collection names present in the db database. func (db *Database) CollectionNames() (names []string, err error) { - // Clone session and set it to strong mode so that the server + // Clone session and set it to Monotonic mode so that the server // used for the query may be safely obtained afterwards, if // necessary for iteration when a cursor is received. session := db.Session - session.m.RLock() - batchSize := int(session.queryConfig.op.limit) - session.m.RUnlock() cloned := session.Clone() cloned.SetMode(Monotonic, false) defer cloned.Close() + batchSize := int(cloned.queryConfig.op.limit) + // Try with a command. var result struct { Collections []bson.Raw @@ -2789,11 +2793,13 @@ func (db *Database) CollectionNames() (names []string, err error) { if firstBatch == nil { firstBatch = result.Cursor.FirstBatch } + var iter *Iter ns := strings.SplitN(result.Cursor.NS, ".", 2) if len(ns) < 2 { - panic("server returned invalid cursor.ns result on listCollections") + iter = db.With(cloned).C("").NewIter(nil, firstBatch, result.Cursor.Id, nil) + } else { + iter = cloned.DB(ns[0]).C(ns[1]).NewIter(nil, firstBatch, result.Cursor.Id, nil) } - iter := cloned.DB(ns[0]).C(ns[1]).NewIter(session, firstBatch, result.Cursor.Id, nil) var coll struct{ Name string } for iter.Next(&coll) { names = append(names, coll.Name) From 70863c46ab9aeef14ab189fb499ccb39c5bdc3f4 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 20 Jan 2015 23:59:16 -0200 Subject: [PATCH 113/305] Error kindly on no server in NewIter. Besides user error, that situation may happen when the synchronization goroutine finds the server unavailable. Reported by John Morales. --- session.go | 4 +++- session_test.go | 21 +++++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/session.go b/session.go index dbbe0922b..ba73f1153 100644 --- a/session.go +++ b/session.go @@ -2026,7 +2026,9 @@ func (c *Collection) NewIter(session *Session, firstBatch []bson.Raw, cursorId i if csession.Mode() == Eventual { panic("Collection.NewIter called in Eventual mode") } - panic("Collection.NewIter called on a fresh session with no associated server") + if err == nil { + err = errors.New("server not available") + } } if session == nil { diff --git a/session_test.go b/session_test.go index d92def27a..5547d4dc3 100644 --- a/session_test.go +++ b/session_test.go @@ -3510,6 +3510,27 @@ func (s *S) TestSetCursorTimeout(c *C) { c.Assert(iter.Next(&result), Equals, false) } +func (s *S) TestNewIterNoServer(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + data, err := bson.Marshal(bson.M{"a": 1}) + + coll := session.DB("mydb").C("mycoll") + iter := coll.NewIter(nil, []bson.Raw{{3, data}}, 42, nil) + + var result struct{ A int } + ok := iter.Next(&result) + c.Assert(ok, Equals, true) + c.Assert(result.A, Equals, 1) + + ok = iter.Next(&result) + c.Assert(ok, Equals, false) + + c.Assert(iter.Err(), ErrorMatches, "server not available") +} + // -------------------------------------------------------------------------- // Some benchmarks that require a running database. From fce0aed60015c422a63754b03d2bf9426b2ce240 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 21 Jan 2015 00:07:02 -0200 Subject: [PATCH 114/305] Another test for the prior NewIter change. --- session_test.go | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/session_test.go b/session_test.go index 5547d4dc3..62b587aee 100644 --- a/session_test.go +++ b/session_test.go @@ -3531,6 +3531,27 @@ func (s *S) TestNewIterNoServer(c *C) { c.Assert(iter.Err(), ErrorMatches, "server not available") } +func (s *S) TestNewIterNoServerPresetErr(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + data, err := bson.Marshal(bson.M{"a": 1}) + + coll := session.DB("mydb").C("mycoll") + iter := coll.NewIter(nil, []bson.Raw{{3, data}}, 42, fmt.Errorf("my error")) + + var result struct{ A int } + ok := iter.Next(&result) + c.Assert(ok, Equals, true) + c.Assert(result.A, Equals, 1) + + ok = iter.Next(&result) + c.Assert(ok, Equals, false) + + c.Assert(iter.Err(), ErrorMatches, "my error") +} + // -------------------------------------------------------------------------- // Some benchmarks that require a running database. From 0e93d4bb0b3c853469ba8082f436e1828cec02f4 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 21 Jan 2015 09:19:51 -0200 Subject: [PATCH 115/305] Support weighting of text index fields. Updates #65. --- session.go | 17 +++++++++++++++++ session_test.go | 3 ++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/session.go b/session.go index ba73f1153..f30e2541b 100644 --- a/session.go +++ b/session.go @@ -974,6 +974,12 @@ type Index struct { // Properties for text indexes. DefaultLanguage string LanguageOverride string + + // Weights defines the significance of provided fields relative to other + // fields in a text index. The score for a given word in a document is derived + // from the weighted sum of the frequency for each of the indexed fields in + // that document. The default field weight is 1. + Weights map[string]int } type indexKeyInfo struct { @@ -1158,6 +1164,17 @@ func (c *Collection) EnsureIndex(index Index) error { LanguageOverride: index.LanguageOverride, } +NextField: + for name, weight := range index.Weights { + for i, elem := range spec.Weights { + if elem.Name == name { + spec.Weights[i].Value = weight + continue NextField + } + } + panic("weight provided for field that is not part of index key: " + name) + } + session = session.Clone() defer session.Close() session.SetMode(Strong, false) diff --git a/session_test.go b/session_test.go index 62b587aee..8758a80b2 100644 --- a/session_test.go +++ b/session_test.go @@ -2512,6 +2512,7 @@ func (s *S) TestEnsureIndex(c *C) { index5 := mgo.Index{ Key: []string{"$text:a", "$text:b"}, + Weights: map[string]int{"b": 42}, } index6 := mgo.Index{ @@ -2608,7 +2609,7 @@ func (s *S) TestEnsureIndex(c *C) { "name": "a_text_b_text", "key": M{"_fts": "text", "_ftsx": 1}, "ns": "mydb.mycoll1", - "weights": M{"a": 1, "b": 1}, + "weights": M{"a": 1, "b": 42}, "default_language": "english", "language_override": "language", "textIndexVersion": 2, From 2b90f35bf2be57fcd8aeb079840ee45cca02d31c Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 22 Jan 2015 00:41:53 -0200 Subject: [PATCH 116/305] Respect a non-empty mgo.Index.Name on EnsureIndex. Also improve the documentation for the index key handling. Updates #65. --- session.go | 17 +++++++++++++---- session_test.go | 21 +++++++++++++++++++-- 2 files changed, 32 insertions(+), 6 deletions(-) diff --git a/session.go b/session.go index f30e2541b..fcfcab9e9 100644 --- a/session.go +++ b/session.go @@ -965,7 +965,7 @@ type Index struct { // documents with indexed time.Time older than the provided delta. ExpireAfter time.Duration - // Index name computed by EnsureIndex during creation. + // Name holds the index name. On creation it is automatically computed by EnsureIndex if unset. Name string // Properties for spatial indexes. @@ -1069,7 +1069,8 @@ func (c *Collection) EnsureIndexKey(key ...string) error { } // EnsureIndex ensures an index with the given key exists, creating it with -// the provided parameters if necessary. +// the provided parameters if necessary. EnsureIndex does not modify a previously +// existent index with a matching key. The old index must be dropped first instead. // // Once EnsureIndex returns successfully, following requests for the same index // will not contact the server unless Collection.DropIndex is used to drop the @@ -1088,9 +1089,13 @@ func (c *Collection) EnsureIndexKey(key ...string) error { // // The Key value determines which fields compose the index. The index ordering // will be ascending by default. To obtain an index with a descending order, -// the field name should be prefixed by a dash (e.g. []string{"-time"}). +// the field name should be prefixed by a dash (e.g. []string{"-time"}). It can +// also be optionally prefixed by an index kind, as in "$text:summary" or +// "$2d:-point". The key string format is: +// +// [$:][-] // -// If Unique is true, the index must necessarily contain only a single +// If the Unique field is true, the index must necessarily contain only a single // document per Key. With DropDups set to true, documents with the same key // as a previously indexed one will be dropped rather than an error returned. // @@ -1164,6 +1169,10 @@ func (c *Collection) EnsureIndex(index Index) error { LanguageOverride: index.LanguageOverride, } + if index.Name != "" { + spec.Name = index.Name + } + NextField: for name, weight := range index.Weights { for i, elem := range spec.Weights { diff --git a/session_test.go b/session_test.go index 8758a80b2..1685947ac 100644 --- a/session_test.go +++ b/session_test.go @@ -2511,7 +2511,7 @@ func (s *S) TestEnsureIndex(c *C) { } index5 := mgo.Index{ - Key: []string{"$text:a", "$text:b"}, + Key: []string{"$text:a", "$text:b"}, Weights: map[string]int{"b": 42}, } @@ -2521,10 +2521,15 @@ func (s *S) TestEnsureIndex(c *C) { LanguageOverride: "idioma", } + index7 := mgo.Index{ + Key: []string{"cn"}, + Name: "CustomName", + } + coll1 := session.DB("mydb").C("mycoll1") coll2 := session.DB("mydb").C("mycoll2") - for _, index := range []mgo.Index{index1, index2, index3, index4, index5} { + for _, index := range []mgo.Index{index1, index2, index3, index4, index5, index7} { err = coll1.EnsureIndex(index) c.Assert(err, IsNil) } @@ -2559,6 +2564,10 @@ func (s *S) TestEnsureIndex(c *C) { err = sysidx.Find(M{"name": "a_text"}).One(result6) c.Assert(err, IsNil) + result7 := M{} + err = sysidx.Find(M{"name": "CustomName"}).One(result7) + c.Assert(err, IsNil) + delete(result1, "v") expected1 := M{ "name": "a_1", @@ -2628,6 +2637,14 @@ func (s *S) TestEnsureIndex(c *C) { } c.Assert(result6, DeepEquals, expected6) + delete(result7, "v") + expected7 := M{ + "name": "CustomName", + "key": M{"cn": 1}, + "ns": "mydb.mycoll1", + } + c.Assert(result7, DeepEquals, expected7) + // Ensure the index actually works for real. err = coll1.Insert(M{"a": 1, "b": 1}) c.Assert(err, IsNil) From 565a5386d06df73f3028cfd7ba70b99484d907df Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 22 Jan 2015 12:14:46 -0200 Subject: [PATCH 117/305] Drop support for custom index name on EnsureIndex. MongoDB only supports a single index with a given index key, so the custom name is a distraction. It will create surprising behavior where a given index is not created because a different index with the same key already exists, and it breaks the use of the current API, such as with DropIndex which is based on the index key. Updates #65. --- session.go | 7 +- session_test.go | 222 ++++++++++++++++++------------------------------ 2 files changed, 85 insertions(+), 144 deletions(-) diff --git a/session.go b/session.go index fcfcab9e9..ed1a57b08 100644 --- a/session.go +++ b/session.go @@ -965,7 +965,8 @@ type Index struct { // documents with indexed time.Time older than the provided delta. ExpireAfter time.Duration - // Name holds the index name. On creation it is automatically computed by EnsureIndex if unset. + // Name holds the stored index name. On creation this field is ignored and the index name + // is automatically computed by EnsureIndex based on the index key Name string // Properties for spatial indexes. @@ -1169,10 +1170,6 @@ func (c *Collection) EnsureIndex(index Index) error { LanguageOverride: index.LanguageOverride, } - if index.Name != "" { - spec.Name = index.Name - } - NextField: for name, weight := range index.Weights { for i, elem := range spec.Weights { diff --git a/session_test.go b/session_test.go index 1685947ac..4aa69e45a 100644 --- a/session_test.go +++ b/session_test.go @@ -2479,178 +2479,122 @@ func (s *S) TestQueryErrorNext(c *C) { c.Assert(result.Err, Matches, ".*Unsupported projection option:.*") } -func (s *S) TestEnsureIndex(c *C) { - session, err := mgo.Dial("localhost:40001") - c.Assert(err, IsNil) - defer session.Close() - - index1 := mgo.Index{ +var indexTests = []struct { + index mgo.Index + expected M +}{{ + mgo.Index{ Key: []string{"a"}, Background: true, - } - - index2 := mgo.Index{ - Key: []string{"a", "-b"}, - Unique: true, - DropDups: true, - } - - // Obsolete: - index3 := mgo.Index{ - Key: []string{"@loc_old"}, - Min: -500, - Max: 500, - Bits: 32, - } - - index4 := mgo.Index{ - Key: []string{"$2d:loc"}, - Min: -500, - Max: 500, - Bits: 32, - } - - index5 := mgo.Index{ - Key: []string{"$text:a", "$text:b"}, - Weights: map[string]int{"b": 42}, - } - - index6 := mgo.Index{ - Key: []string{"$text:a"}, - DefaultLanguage: "portuguese", - LanguageOverride: "idioma", - } - - index7 := mgo.Index{ - Key: []string{"cn"}, - Name: "CustomName", - } - - coll1 := session.DB("mydb").C("mycoll1") - coll2 := session.DB("mydb").C("mycoll2") - - for _, index := range []mgo.Index{index1, index2, index3, index4, index5, index7} { - err = coll1.EnsureIndex(index) - c.Assert(err, IsNil) - } - - // Cannot have multiple text indexes on the same collection. - err = coll2.EnsureIndex(index6) - c.Assert(err, IsNil) - - sysidx := session.DB("mydb").C("system.indexes") - - result1 := M{} - err = sysidx.Find(M{"name": "a_1"}).One(result1) - c.Assert(err, IsNil) - - result2 := M{} - err = sysidx.Find(M{"name": "a_1_b_-1"}).One(result2) - c.Assert(err, IsNil) - - result3 := M{} - err = sysidx.Find(M{"name": "loc_old_2d"}).One(result3) - c.Assert(err, IsNil) - - result4 := M{} - err = sysidx.Find(M{"name": "loc_2d"}).One(result4) - c.Assert(err, IsNil) - - result5 := M{} - err = sysidx.Find(M{"name": "a_text_b_text"}).One(result5) - c.Assert(err, IsNil) - - result6 := M{} - err = sysidx.Find(M{"name": "a_text"}).One(result6) - c.Assert(err, IsNil) - - result7 := M{} - err = sysidx.Find(M{"name": "CustomName"}).One(result7) - c.Assert(err, IsNil) - - delete(result1, "v") - expected1 := M{ + }, + M{ "name": "a_1", "key": M{"a": 1}, - "ns": "mydb.mycoll1", + "ns": "mydb.mycoll", "background": true, - } - c.Assert(result1, DeepEquals, expected1) - - delete(result2, "v") - expected2 := M{ + }, +}, { + mgo.Index{ + Key: []string{"a", "-b"}, + Unique: true, + DropDups: true, + }, + M{ "name": "a_1_b_-1", "key": M{"a": 1, "b": -1}, - "ns": "mydb.mycoll1", + "ns": "mydb.mycoll", "unique": true, "dropDups": true, - } - if s.versionAtLeast(2, 7) { - // Was deprecated in 2.6, and not being reported by 2.7+. - delete(expected2, "dropDups") - } - c.Assert(result2, DeepEquals, expected2) - - delete(result3, "v") - expected3 := M{ + }, +}, { + mgo.Index{ + Key: []string{"@loc_old"}, // Obsolete + Min: -500, + Max: 500, + Bits: 32, + }, + M{ "name": "loc_old_2d", "key": M{"loc_old": "2d"}, - "ns": "mydb.mycoll1", + "ns": "mydb.mycoll", "min": -500, "max": 500, "bits": 32, - } - c.Assert(result3, DeepEquals, expected3) - - delete(result4, "v") - expected4 := M{ + }, +}, { + mgo.Index{ + Key: []string{"$2d:loc"}, + Min: -500, + Max: 500, + Bits: 32, + }, + M{ "name": "loc_2d", "key": M{"loc": "2d"}, - "ns": "mydb.mycoll1", + "ns": "mydb.mycoll", "min": -500, "max": 500, "bits": 32, - } - c.Assert(result4, DeepEquals, expected4) - - delete(result5, "v") - expected5 := M{ + }, +}, { + mgo.Index{ + Key: []string{"$text:a", "$text:b"}, + Weights: map[string]int{"b": 42}, + }, + M{ "name": "a_text_b_text", "key": M{"_fts": "text", "_ftsx": 1}, - "ns": "mydb.mycoll1", + "ns": "mydb.mycoll", "weights": M{"a": 1, "b": 42}, "default_language": "english", "language_override": "language", "textIndexVersion": 2, - } - c.Assert(result5, DeepEquals, expected5) - - delete(result6, "v") - expected6 := M{ + }, +}, { + mgo.Index{ + Key: []string{"$text:a"}, + DefaultLanguage: "portuguese", + LanguageOverride: "idioma", + }, + M{ "name": "a_text", "key": M{"_fts": "text", "_ftsx": 1}, - "ns": "mydb.mycoll2", + "ns": "mydb.mycoll", "weights": M{"a": 1}, "default_language": "portuguese", "language_override": "idioma", "textIndexVersion": 2, - } - c.Assert(result6, DeepEquals, expected6) - - delete(result7, "v") - expected7 := M{ - "name": "CustomName", - "key": M{"cn": 1}, - "ns": "mydb.mycoll1", - } - c.Assert(result7, DeepEquals, expected7) + }, +}} - // Ensure the index actually works for real. - err = coll1.Insert(M{"a": 1, "b": 1}) +func (s *S) TestEnsureIndex(c *C) { + session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) - err = coll1.Insert(M{"a": 1, "b": 1}) - c.Assert(err, ErrorMatches, ".*duplicate key error.*") - c.Assert(mgo.IsDup(err), Equals, true) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + idxs := session.DB("mydb").C("system.indexes") + + for _, test := range indexTests { + err = coll.EnsureIndex(test.index) + c.Assert(err, IsNil) + + obtained := M{} + err = idxs.Find(M{"name": test.expected["name"]}).One(obtained) + c.Assert(err, IsNil) + + delete(obtained, "v") + + if s.versionAtLeast(2, 7) { + // Was deprecated in 2.6, and not being reported by 2.7+. + delete(test.expected, "dropDups") + } + + c.Assert(obtained, DeepEquals, test.expected) + + err = coll.DropIndex(test.index.Key...) + c.Assert(err, IsNil) + } } func (s *S) TestEnsureIndexWithBadInfo(c *C) { From a581209da7756f6d7ab045a275d70a865b07e498 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 22 Jan 2015 12:24:52 -0200 Subject: [PATCH 118/305] Fix support for special $** text index field name. Closes #65. --- session.go | 4 +++- session_test.go | 13 +++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/session.go b/session.go index ed1a57b08..93988ba7f 100644 --- a/session.go +++ b/session.go @@ -1005,10 +1005,12 @@ func parseIndexKey(key []string) (*indexKeyInfo, error) { kind = field[1:c] field = field[c+1:] keyInfo.name += field + "_" + kind + } else { + field = "\x00" } } switch field[0] { - case '$': + case 0: // Logic above failed. Reset and error. field = "" case '@': diff --git a/session_test.go b/session_test.go index 4aa69e45a..324a27a27 100644 --- a/session_test.go +++ b/session_test.go @@ -2565,6 +2565,19 @@ var indexTests = []struct { "language_override": "idioma", "textIndexVersion": 2, }, +}, { + mgo.Index{ + Key: []string{"$text:$**"}, + }, + M{ + "name": "$**_text", + "key": M{"_fts": "text", "_ftsx": 1}, + "ns": "mydb.mycoll", + "weights": M{"$**": 1}, + "default_language": "english", + "language_override": "language", + "textIndexVersion": 2, + }, }} func (s *S) TestEnsureIndex(c *C) { From ff4340b5940f02c9ea0bdd95182870b2282fea32 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 22 Jan 2015 14:43:44 -0200 Subject: [PATCH 119/305] Remove GridFS chunks if file doc insertion fails. Fixes #66. --- gridfs.go | 17 +++++++++-------- gridfs_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 8 deletions(-) diff --git a/gridfs.go b/gridfs.go index 5ed6a77e4..b67e3ad45 100644 --- a/gridfs.go +++ b/gridfs.go @@ -521,17 +521,18 @@ func (file *GridFile) completeWrite() { debugf("GridFile %p: waiting for %d pending chunks to complete file write", file, file.wpending) file.c.Wait() } + if file.err == nil { + hexsum := hex.EncodeToString(file.wsum.Sum(nil)) + if file.doc.UploadDate.IsZero() { + file.doc.UploadDate = bson.Now() + } + file.doc.MD5 = hexsum + file.err = file.gfs.Files.Insert(file.doc) + file.gfs.Chunks.EnsureIndexKey("files_id", "n") + } if file.err != nil { file.gfs.Chunks.RemoveAll(bson.D{{"files_id", file.doc.Id}}) - return - } - hexsum := hex.EncodeToString(file.wsum.Sum(nil)) - if file.doc.UploadDate.IsZero() { - file.doc.UploadDate = bson.Now() } - file.doc.MD5 = hexsum - file.err = file.gfs.Files.Insert(file.doc) - file.gfs.Chunks.EnsureIndexKey("files_id", "n") } // Abort cancels an in-progress write, preventing the file from being diff --git a/gridfs_test.go b/gridfs_test.go index 655fbc4c8..2c43df2e0 100644 --- a/gridfs_test.go +++ b/gridfs_test.go @@ -329,6 +329,34 @@ func (s *S) TestGridFSAbort(c *C) { c.Assert(count, Equals, 0) } +func (s *S) TestGridFSCloseConflict(c *C) { + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + db := session.DB("mydb") + + db.C("fs.files").EnsureIndex(mgo.Index{Key: []string{"filename"}, Unique: true}) + + // For a closing-time conflict + err = db.C("fs.files").Insert(M{"filename": "foo.txt"}) + c.Assert(err, IsNil) + + gfs := db.GridFS("fs") + file, err := gfs.Create("foo.txt") + c.Assert(err, IsNil) + + _, err = file.Write([]byte("some data")) + c.Assert(err, IsNil) + + err = file.Close() + c.Assert(mgo.IsDup(err), Equals, true) + + count, err := db.C("fs.chunks").Count() + c.Assert(err, IsNil) + c.Assert(count, Equals, 0) +} + func (s *S) TestGridFSOpenNotFound(c *C) { session, err := mgo.Dial("localhost:40011") c.Assert(err, IsNil) From 1407fa35a856bf446ccdaf9f618745114ba2f4e9 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 22 Jan 2015 15:56:07 -0200 Subject: [PATCH 120/305] Unmarshal null and "" in JSON as empty ObjectId. Fixes #67. --- bson/bson.go | 7 ++++ bson/bson_test.go | 82 ++++++++++++++++++++++++++++++++++------------- 2 files changed, 66 insertions(+), 23 deletions(-) diff --git a/bson/bson.go b/bson/bson.go index 68e932fb1..41816b874 100644 --- a/bson/bson.go +++ b/bson/bson.go @@ -33,6 +33,7 @@ package bson import ( + "bytes" "crypto/md5" "crypto/rand" "encoding/binary" @@ -262,8 +263,14 @@ func (id ObjectId) MarshalJSON() ([]byte, error) { return []byte(fmt.Sprintf(`"%x"`, string(id))), nil } +var nullBytes = []byte("null") + // UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller. func (id *ObjectId) UnmarshalJSON(data []byte) error { + if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) { + *id = "" + return nil + } if len(data) != 26 || data[0] != '"' || data[25] != '"' { return errors.New(fmt.Sprintf("Invalid ObjectId in JSON: %s", string(data))) } diff --git a/bson/bson_test.go b/bson/bson_test.go index 899c013e9..5116f639d 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -1298,7 +1298,9 @@ var oneWayCrossItems = []crossTypeItem{ {&shortIface{int64(1) << 30}, map[string]interface{}{"v": 1 << 30}}, // Ensure omitempty on struct with private fields works properly. - {&struct{ V struct { v time.Time } ",omitempty" }{}, map[string]interface{}{}}, + {&struct { + V struct{ v time.Time } ",omitempty" + }{}, map[string]interface{}{}}, } func testCrossPair(c *C, dump interface{}, load interface{}) { @@ -1449,31 +1451,65 @@ func (s *S) TestNewObjectIdWithTime(c *C) { // ObjectId JSON marshalling. type jsonType struct { - Id *bson.ObjectId -} + Id bson.ObjectId +} + +var jsonIdTests = []struct { + value jsonType + json string + marshal bool + unmarshal bool + error string +}{{ + value: jsonType{Id: bson.ObjectIdHex("4d88e15b60f486e428412dc9")}, + json: `{"Id":"4d88e15b60f486e428412dc9"}`, + marshal: true, + unmarshal: true, +}, { + value: jsonType{}, + json: `{"Id":""}`, + marshal: true, + unmarshal: true, +}, { + value: jsonType{}, + json: `{"Id":null}`, + marshal: false, + unmarshal: true, +}, { + json: `{"Id":"4d88e15b60f486e428412dc9A"}`, + error: `Invalid ObjectId in JSON: "4d88e15b60f486e428412dc9A"`, + marshal: false, + unmarshal: true, +}, { + json: `{"Id":"4d88e15b60f486e428412dcZ"}`, + error: `Invalid ObjectId in JSON: "4d88e15b60f486e428412dcZ" .*`, + marshal: false, + unmarshal: true, +}} func (s *S) TestObjectIdJSONMarshaling(c *C) { - id := bson.ObjectIdHex("4d88e15b60f486e428412dc9") - v := jsonType{Id: &id} - data, err := json.Marshal(&v) - c.Assert(err, IsNil) - c.Assert(string(data), Equals, `{"Id":"4d88e15b60f486e428412dc9"}`) -} - -func (s *S) TestObjectIdJSONUnmarshaling(c *C) { - data := []byte(`{"Id":"4d88e15b60f486e428412dc9"}`) - v := jsonType{} - err := json.Unmarshal(data, &v) - c.Assert(err, IsNil) - c.Assert(*v.Id, Equals, bson.ObjectIdHex("4d88e15b60f486e428412dc9")) -} + for _, test := range jsonIdTests { + if test.marshal { + data, err := json.Marshal(&test.value) + if test.error == "" { + c.Assert(err, IsNil) + c.Assert(string(data), Equals, test.json) + } else { + c.Assert(err, ErrorMatches, test.error) + } + } -func (s *S) TestObjectIdJSONUnmarshalingError(c *C) { - v := jsonType{} - err := json.Unmarshal([]byte(`{"Id":"4d88e15b60f486e428412dc9A"}`), &v) - c.Assert(err, ErrorMatches, `Invalid ObjectId in JSON: "4d88e15b60f486e428412dc9A"`) - err = json.Unmarshal([]byte(`{"Id":"4d88e15b60f486e428412dcZ"}`), &v) - c.Assert(err, ErrorMatches, `Invalid ObjectId in JSON: "4d88e15b60f486e428412dcZ" .*`) + if test.unmarshal { + var value jsonType + err := json.Unmarshal([]byte(test.json), &value) + if test.error == "" { + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, test.value) + } else { + c.Assert(err, ErrorMatches, test.error) + } + } + } } // -------------------------------------------------------------------------- From c6a7dce14133ccac2dcac3793f1d6e2ef048503a Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sat, 24 Jan 2015 09:37:22 -0200 Subject: [PATCH 121/305] Preparing release of v2 r2015.01.24. --- auth.go | 4 ++-- auth_test.go | 2 +- bson/bson_test.go | 14 +++++++------- bulk_test.go | 2 +- cluster.go | 2 +- cluster_test.go | 4 ++-- gridfs.go | 2 +- gridfs_test.go | 4 ++-- internal/scram/scram_test.go | 2 +- saslimpl.go | 2 +- server.go | 2 +- session.go | 2 +- session_test.go | 6 +++--- socket.go | 2 +- suite_test.go | 4 ++-- txn/debug.go | 2 +- txn/flusher.go | 4 ++-- txn/mgo_test.go | 2 +- txn/sim_test.go | 6 +++--- txn/tarjan.go | 2 +- txn/tarjan_test.go | 2 +- txn/txn.go | 4 ++-- txn/txn_test.go | 6 +++--- 23 files changed, 41 insertions(+), 41 deletions(-) diff --git a/auth.go b/auth.go index 7787ea198..dc26e52f5 100644 --- a/auth.go +++ b/auth.go @@ -34,8 +34,8 @@ import ( "fmt" "sync" - "gopkg.in/mgo.v2-unstable/bson" - "gopkg.in/mgo.v2-unstable/internal/scram" + "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2/internal/scram" ) type authCmd struct { diff --git a/auth_test.go b/auth_test.go index e0216abec..8fd5d5f2c 100644 --- a/auth_test.go +++ b/auth_test.go @@ -39,7 +39,7 @@ import ( "time" . "gopkg.in/check.v1" - "gopkg.in/mgo.v2-unstable" + "gopkg.in/mgo.v2" ) func (s *S) TestAuthLoginDatabase(c *C) { diff --git a/bson/bson_test.go b/bson/bson_test.go index 5116f639d..5c1b869ee 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -37,7 +37,7 @@ import ( "time" . "gopkg.in/check.v1" - "gopkg.in/mgo.v2-unstable/bson" + "gopkg.in/mgo.v2/bson" ) func TestAll(t *testing.T) { @@ -1476,14 +1476,14 @@ var jsonIdTests = []struct { marshal: false, unmarshal: true, }, { - json: `{"Id":"4d88e15b60f486e428412dc9A"}`, - error: `Invalid ObjectId in JSON: "4d88e15b60f486e428412dc9A"`, - marshal: false, + json: `{"Id":"4d88e15b60f486e428412dc9A"}`, + error: `Invalid ObjectId in JSON: "4d88e15b60f486e428412dc9A"`, + marshal: false, unmarshal: true, }, { - json: `{"Id":"4d88e15b60f486e428412dcZ"}`, - error: `Invalid ObjectId in JSON: "4d88e15b60f486e428412dcZ" .*`, - marshal: false, + json: `{"Id":"4d88e15b60f486e428412dcZ"}`, + error: `Invalid ObjectId in JSON: "4d88e15b60f486e428412dcZ" .*`, + marshal: false, unmarshal: true, }} diff --git a/bulk_test.go b/bulk_test.go index d35c42cad..24af1b102 100644 --- a/bulk_test.go +++ b/bulk_test.go @@ -28,7 +28,7 @@ package mgo_test import ( . "gopkg.in/check.v1" - "gopkg.in/mgo.v2-unstable" + "gopkg.in/mgo.v2" ) func (s *S) TestBulkInsert(c *C) { diff --git a/cluster.go b/cluster.go index 2db8a6d37..bd5d9acc6 100644 --- a/cluster.go +++ b/cluster.go @@ -33,7 +33,7 @@ import ( "sync" "time" - "gopkg.in/mgo.v2-unstable/bson" + "gopkg.in/mgo.v2/bson" ) // --------------------------------------------------------------------------- diff --git a/cluster_test.go b/cluster_test.go index cd1366957..cef5438a1 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -35,8 +35,8 @@ import ( "time" . "gopkg.in/check.v1" - "gopkg.in/mgo.v2-unstable" - "gopkg.in/mgo.v2-unstable/bson" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" ) func (s *S) TestNewSession(c *C) { diff --git a/gridfs.go b/gridfs.go index d6bdcd421..54b3dd50e 100644 --- a/gridfs.go +++ b/gridfs.go @@ -36,7 +36,7 @@ import ( "sync" "time" - "gopkg.in/mgo.v2-unstable/bson" + "gopkg.in/mgo.v2/bson" ) type GridFS struct { diff --git a/gridfs_test.go b/gridfs_test.go index 2c43df2e0..5a6ed5559 100644 --- a/gridfs_test.go +++ b/gridfs_test.go @@ -32,8 +32,8 @@ import ( "time" . "gopkg.in/check.v1" - "gopkg.in/mgo.v2-unstable" - "gopkg.in/mgo.v2-unstable/bson" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" ) func (s *S) TestGridFSCreate(c *C) { diff --git a/internal/scram/scram_test.go b/internal/scram/scram_test.go index ff4abe941..9c20fdfc4 100644 --- a/internal/scram/scram_test.go +++ b/internal/scram/scram_test.go @@ -5,7 +5,7 @@ import ( "testing" . "gopkg.in/check.v1" - "gopkg.in/mgo.v2-unstable/internal/scram" + "gopkg.in/mgo.v2/internal/scram" "strings" ) diff --git a/saslimpl.go b/saslimpl.go index 49c94091d..58c0891c6 100644 --- a/saslimpl.go +++ b/saslimpl.go @@ -3,7 +3,7 @@ package mgo import ( - "gopkg.in/mgo.v2-unstable/sasl" + "gopkg.in/mgo.v2/sasl" ) func saslNew(cred Credential, host string) (saslStepper, error) { diff --git a/server.go b/server.go index 54fe233eb..d5086a290 100644 --- a/server.go +++ b/server.go @@ -33,7 +33,7 @@ import ( "sync" "time" - "gopkg.in/mgo.v2-unstable/bson" + "gopkg.in/mgo.v2/bson" ) // --------------------------------------------------------------------------- diff --git a/session.go b/session.go index 93988ba7f..178891ae4 100644 --- a/session.go +++ b/session.go @@ -41,7 +41,7 @@ import ( "sync" "time" - "gopkg.in/mgo.v2-unstable/bson" + "gopkg.in/mgo.v2/bson" ) type mode int diff --git a/session_test.go b/session_test.go index 324a27a27..7468b4e31 100644 --- a/session_test.go +++ b/session_test.go @@ -38,8 +38,8 @@ import ( "time" . "gopkg.in/check.v1" - "gopkg.in/mgo.v2-unstable" - "gopkg.in/mgo.v2-unstable/bson" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" ) func (s *S) TestRunString(c *C) { @@ -2567,7 +2567,7 @@ var indexTests = []struct { }, }, { mgo.Index{ - Key: []string{"$text:$**"}, + Key: []string{"$text:$**"}, }, M{ "name": "$**_text", diff --git a/socket.go b/socket.go index 53848c7f5..1fb0dff77 100644 --- a/socket.go +++ b/socket.go @@ -32,7 +32,7 @@ import ( "sync" "time" - "gopkg.in/mgo.v2-unstable/bson" + "gopkg.in/mgo.v2/bson" ) type replyFunc func(err error, reply *replyOp, docNum int, docData []byte) diff --git a/suite_test.go b/suite_test.go index 962a6c1cb..334407e31 100644 --- a/suite_test.go +++ b/suite_test.go @@ -38,8 +38,8 @@ import ( "time" . "gopkg.in/check.v1" - "gopkg.in/mgo.v2-unstable" - "gopkg.in/mgo.v2-unstable/bson" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" ) var fast = flag.Bool("fast", false, "Skip slow tests") diff --git a/txn/debug.go b/txn/debug.go index e3300c433..8224bb313 100644 --- a/txn/debug.go +++ b/txn/debug.go @@ -6,7 +6,7 @@ import ( "sort" "sync/atomic" - "gopkg.in/mgo.v2-unstable/bson" + "gopkg.in/mgo.v2/bson" ) var ( diff --git a/txn/flusher.go b/txn/flusher.go index 86e711037..25b2f0319 100644 --- a/txn/flusher.go +++ b/txn/flusher.go @@ -3,8 +3,8 @@ package txn import ( "fmt" - "gopkg.in/mgo.v2-unstable" - "gopkg.in/mgo.v2-unstable/bson" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" ) func flush(r *Runner, t *transaction) error { diff --git a/txn/mgo_test.go b/txn/mgo_test.go index 3e7073a3a..5abc47335 100644 --- a/txn/mgo_test.go +++ b/txn/mgo_test.go @@ -2,7 +2,7 @@ package txn_test import ( "bytes" - "gopkg.in/mgo.v2-unstable" + "gopkg.in/mgo.v2" . "gopkg.in/check.v1" "os/exec" "time" diff --git a/txn/sim_test.go b/txn/sim_test.go index 9fcfb140c..399e585b3 100644 --- a/txn/sim_test.go +++ b/txn/sim_test.go @@ -2,9 +2,9 @@ package txn_test import ( "flag" - "gopkg.in/mgo.v2-unstable" - "gopkg.in/mgo.v2-unstable/bson" - "gopkg.in/mgo.v2-unstable/txn" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2/txn" . "gopkg.in/check.v1" "math/rand" "time" diff --git a/txn/tarjan.go b/txn/tarjan.go index d5ae94690..e56541c9b 100644 --- a/txn/tarjan.go +++ b/txn/tarjan.go @@ -1,7 +1,7 @@ package txn import ( - "gopkg.in/mgo.v2-unstable/bson" + "gopkg.in/mgo.v2/bson" "sort" ) diff --git a/txn/tarjan_test.go b/txn/tarjan_test.go index b5633bc5e..79745c39b 100644 --- a/txn/tarjan_test.go +++ b/txn/tarjan_test.go @@ -2,7 +2,7 @@ package txn import ( "fmt" - "gopkg.in/mgo.v2-unstable/bson" + "gopkg.in/mgo.v2/bson" . "gopkg.in/check.v1" ) diff --git a/txn/txn.go b/txn/txn.go index d81c1f16f..5809e2d3a 100644 --- a/txn/txn.go +++ b/txn/txn.go @@ -14,8 +14,8 @@ import ( "strings" "sync" - "gopkg.in/mgo.v2-unstable" - "gopkg.in/mgo.v2-unstable/bson" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" crand "crypto/rand" mrand "math/rand" diff --git a/txn/txn_test.go b/txn/txn_test.go index e52b119d3..1e396eadc 100644 --- a/txn/txn_test.go +++ b/txn/txn_test.go @@ -6,9 +6,9 @@ import ( "time" . "gopkg.in/check.v1" - "gopkg.in/mgo.v2-unstable" - "gopkg.in/mgo.v2-unstable/bson" - "gopkg.in/mgo.v2-unstable/txn" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2/txn" ) func TestAll(t *testing.T) { From 13ee89b58b0eefd8abe3ea00997cb150f0b91211 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 26 Feb 2015 01:08:08 -0300 Subject: [PATCH 122/305] Support createIndexes command in EnsureIndex. Reported by Louisa Berger. --- session.go | 18 +++++++++++------- session_test.go | 4 ++-- suite_test.go | 6 +++--- 3 files changed, 16 insertions(+), 12 deletions(-) diff --git a/session.go b/session.go index 93988ba7f..8ae582c2d 100644 --- a/session.go +++ b/session.go @@ -1183,17 +1183,21 @@ NextField: panic("weight provided for field that is not part of index key: " + name) } - session = session.Clone() - defer session.Close() - session.SetMode(Strong, false) - session.EnsureSafe(&Safe{}) + cloned := session.Clone() + defer cloned.Close() + cloned.SetMode(Strong, false) + cloned.EnsureSafe(&Safe{}) + db := c.Database.With(cloned) - db := c.Database.With(session) - err = db.C("system.indexes").Insert(&spec) + // Try with a command first. + err = db.Run(bson.D{{"createIndexes", c.Name}, {"indexes", []indexSpec{spec}}}, nil) + if isNoCmd(err) { + // Command not yet supported. Insert into the indexes collection instead. + err = db.C("system.indexes").Insert(&spec) + } if err == nil { session.cluster().CacheIndex(cacheKey, true) } - session.Close() return err } diff --git a/session_test.go b/session_test.go index 324a27a27..1e1f0c316 100644 --- a/session_test.go +++ b/session_test.go @@ -2750,7 +2750,7 @@ func (s *S) TestEnsureIndexCaching(c *C) { c.Assert(err, IsNil) stats = mgo.GetStats() - c.Assert(stats.SentOps, Equals, 2) + c.Assert(stats.SentOps >= 1 && stats.SentOps <= 2, Equals, true) // Dropping the index should also drop the cached index key. err = coll.DropIndex("a") @@ -2762,7 +2762,7 @@ func (s *S) TestEnsureIndexCaching(c *C) { c.Assert(err, IsNil) stats = mgo.GetStats() - c.Assert(stats.SentOps, Equals, 2) + c.Assert(stats.SentOps >= 1 && stats.SentOps <= 2, Equals, true) } func (s *S) TestEnsureIndexGetIndexes(c *C) { diff --git a/suite_test.go b/suite_test.go index 962a6c1cb..3f488a364 100644 --- a/suite_test.go +++ b/suite_test.go @@ -66,13 +66,13 @@ type S struct { frozen []string } -func (s *S) versionAtLeast(v ...int) bool { +func (s *S) versionAtLeast(v ...int) (result bool) { for i := range v { if i == len(s.build.VersionArray) { return false } - if s.build.VersionArray[i] < v[i] { - return false + if s.build.VersionArray[i] != v[i] { + return s.build.VersionArray[i] >= v[i] } } return true From 40757bbedb2f17d297e30759087799948c631df8 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 26 Feb 2015 01:42:10 -0300 Subject: [PATCH 123/305] Fix index tests for 2.0. --- session_test.go | 26 ++++++++++++++++++-------- testdb/dropall.js | 2 +- testdb/init.js | 2 +- 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/session_test.go b/session_test.go index 1e1f0c316..c5dda5f8a 100644 --- a/session_test.go +++ b/session_test.go @@ -2589,6 +2589,11 @@ func (s *S) TestEnsureIndex(c *C) { idxs := session.DB("mydb").C("system.indexes") for _, test := range indexTests { + if !s.versionAtLeast(2, 4) && test.expected["weights"] != nil { + // No text indexes until 2.4. + continue + } + err = coll.EnsureIndex(test.index) c.Assert(err, IsNil) @@ -2750,7 +2755,7 @@ func (s *S) TestEnsureIndexCaching(c *C) { c.Assert(err, IsNil) stats = mgo.GetStats() - c.Assert(stats.SentOps >= 1 && stats.SentOps <= 2, Equals, true) + c.Assert(stats.SentOps > 0, Equals, true) // Dropping the index should also drop the cached index key. err = coll.DropIndex("a") @@ -2762,7 +2767,7 @@ func (s *S) TestEnsureIndexCaching(c *C) { c.Assert(err, IsNil) stats = mgo.GetStats() - c.Assert(stats.SentOps >= 1 && stats.SentOps <= 2, Equals, true) + c.Assert(stats.SentOps > 0, Equals, true) } func (s *S) TestEnsureIndexGetIndexes(c *C) { @@ -2813,9 +2818,9 @@ func (s *S) TestEnsureIndexEvalGetIndexes(c *C) { c.Assert(err, IsNil) err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({a: 1})"}}, nil) c.Assert(err, IsNil) - err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({c: '2d'})"}}, nil) + err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({c: -1, e: 1})"}}, nil) c.Assert(err, IsNil) - err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({d: -1, e: 1})"}}, nil) + err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({d: '2d'})"}}, nil) c.Assert(err, IsNil) indexes, err := coll.Indexes() @@ -2826,10 +2831,15 @@ func (s *S) TestEnsureIndexEvalGetIndexes(c *C) { c.Assert(indexes[1].Key, DeepEquals, []string{"a"}) c.Assert(indexes[2].Name, Equals, "b_-1") c.Assert(indexes[2].Key, DeepEquals, []string{"-b"}) - c.Assert(indexes[3].Name, Equals, "c_2d") - c.Assert(indexes[3].Key, DeepEquals, []string{"$2d:c"}) - c.Assert(indexes[4].Name, Equals, "d_-1_e_1") - c.Assert(indexes[4].Key, DeepEquals, []string{"-d", "e"}) + c.Assert(indexes[3].Name, Equals, "c_-1_e_1") + c.Assert(indexes[3].Key, DeepEquals, []string{"-c", "e"}) + if s.versionAtLeast(2, 2) { + c.Assert(indexes[4].Name, Equals, "d_2d") + c.Assert(indexes[4].Key, DeepEquals, []string{"$2d:d"}) + } else { + c.Assert(indexes[4].Name, Equals, "d_") + c.Assert(indexes[4].Key, DeepEquals, []string{"$2d:d"}) + } } var testTTL = flag.Bool("test-ttl", false, "test TTL collections (may take 1 minute)") diff --git a/testdb/dropall.js b/testdb/dropall.js index 5b654f337..f52de61c9 100644 --- a/testdb/dropall.js +++ b/testdb/dropall.js @@ -3,7 +3,7 @@ var ports = [40001, 40002, 40011, 40012, 40013, 40021, 40022, 40023, 40041, 4010 var auth = [40002, 40103, 40203, 40031] var db1 = new Mongo("localhost:40001") -if (db1.getDB("admin").serverBuildInfo().OpenSSLVersion != "") { +if (db1.getDB("admin").serverBuildInfo().OpenSSLVersion) { ports.push(40003) auth.push(40003) } diff --git a/testdb/init.js b/testdb/init.js index 7deb67e1c..8e5d80115 100644 --- a/testdb/init.js +++ b/testdb/init.js @@ -33,7 +33,7 @@ for (var i = 0; i != 60; i++) { } function hasSSL() { - return db1.serverBuildInfo().OpenSSLVersion != "" + return Boolean(db1.serverBuildInfo().OpenSSLVersion) } rs1a.runCommand({replSetInitiate: rs1cfg}) From 2a8c89365bd434bb604cece1e5249ba3e07f119c Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 16 Mar 2015 16:38:16 -0300 Subject: [PATCH 124/305] Specialize Database.Run logic as Database.run. The new function accepts a socket, and will be used by an upcoming version of writeQuery. --- session.go | 50 +++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 47 insertions(+), 3 deletions(-) diff --git a/session.go b/session.go index 8ae582c2d..e3b21f94c 100644 --- a/session.go +++ b/session.go @@ -594,10 +594,14 @@ func (db *Database) GridFS(prefix string) *GridFS { // http://www.mongodb.org/display/DOCS/List+of+Database+CommandSkips // func (db *Database) Run(cmd interface{}, result interface{}) error { - if name, ok := cmd.(string); ok { - cmd = bson.D{{name, 1}} + socket, err := db.Session.acquireSocket(true) + if err != nil { + return err } - return db.C("$cmd").Find(cmd).One(result) + defer socket.Release() + + // This is an optimized form of db.C("$cmd").Find(cmd).One(result). + return db.run(socket, cmd, result) } // Credential holds details to authenticate with a MongoDB server. @@ -2738,6 +2742,46 @@ func (q *Query) One(result interface{}) (err error) { return checkQueryError(op.collection, data) } +// run duplicates the behavior of collection.Find(query).One(&result) +// as performed by Database.Run, specializing the logic for running +// database commands on a given socket. +func (db *Database) run(socket *mongoSocket, cmd, result interface{}) (err error) { + // Database.Run: + if name, ok := cmd.(string); ok { + cmd = bson.D{{name, 1}} + } + + // Collection.Find: + session := db.Session + session.m.RLock() + op := session.queryConfig.op // Copy. + session.m.RUnlock() + op.query = cmd + op.collection = db.Name + ".$cmd" + + // Query.One: + op.flags |= session.slaveOkFlag() + op.limit = -1 + + data, err := socket.SimpleQuery(&op) + if err != nil { + return err + } + if data == nil { + return ErrNotFound + } + if result != nil { + err = bson.Unmarshal(data, result) + if err == nil { + debugf("Run command unmarshaled: %#v", op, result) + } else { + debugf("Run command unmarshaling failed: %#v", op, err) + return err + } + } + return checkQueryError(op.collection, data) +} + // The DBRef type implements support for the database reference MongoDB // convention as supported by multiple drivers. This convention enables // cross-referencing documents between collections and databases using From 5ee5ca25ba7457bfba557205b5e34d7e60811af7 Mon Sep 17 00:00:00 2001 From: Min-Young Wu Date: Fri, 20 Mar 2015 11:07:49 -0700 Subject: [PATCH 125/305] Adding support for $maxTimeMS on queries --- session.go | 16 ++++++++++++++++ socket.go | 1 + 2 files changed, 17 insertions(+) diff --git a/session.go b/session.go index 8ae582c2d..6ec5a0b14 100644 --- a/session.go +++ b/session.go @@ -2612,6 +2612,22 @@ func (q *Query) SetMaxScan(n int) *Query { return q } +// SetMaxTimeMS constrains the query to stop after running for the specified +// time. +// +// This modifier is better at stopping queries from running too long than a +// socket timeout since mongo will continue to finish the query even after +// the socket has been closed by the client. +// +// http://blog.mongodb.org/post/83621787773/maxtimems-and-query-optimizer-introspection-in +func (q *Query) SetMaxTimeMS(n int) *Query { + q.m.Lock() + q.op.options.MaxTimeMS = n + q.op.hasOptions = true + q.m.Unlock() + return q +} + // Snapshot will force the performed query to make use of an available // index on the _id field to prevent the same document from being returned // more than once in a single iteration. This might happen without this diff --git a/socket.go b/socket.go index 53848c7f5..eb468bcc8 100644 --- a/socket.go +++ b/socket.go @@ -87,6 +87,7 @@ type queryWrapper struct { Snapshot bool "$snapshot,omitempty" ReadPreference bson.D "$readPreference,omitempty" MaxScan int "$maxScan,omitempty" + MaxTimeMS int "$maxTimeMS,omitempty" } func (op *queryOp) finalQuery(socket *mongoSocket) interface{} { From 5bc2d3e95e64d61cf14dc06619b4929f6a059abe Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 26 Mar 2015 19:31:23 -0300 Subject: [PATCH 126/305] Use insert/update/delete commands on 2.6+ This is temporary, for testing the feature in the v2-unstable branch. The feature will most likely be enabled on 3.0+ only. --- auth_test.go | 2 +- bulk_test.go | 2 +- cluster_test.go | 2 - session.go | 171 +++++++++++++++++++++++++++++++++++----------- session_test.go | 58 ++++++++++++---- testdb/dropall.js | 8 ++- 6 files changed, 182 insertions(+), 61 deletions(-) diff --git a/auth_test.go b/auth_test.go index e0216abec..cb2777543 100644 --- a/auth_test.go +++ b/auth_test.go @@ -576,7 +576,7 @@ func (s *S) TestAuthLoginCachingWithNewSession(c *C) { coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"n": 1}) - c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized for .*") + c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*") } func (s *S) TestAuthLoginCachingAcrossPool(c *C) { diff --git a/bulk_test.go b/bulk_test.go index d35c42cad..f6092d612 100644 --- a/bulk_test.go +++ b/bulk_test.go @@ -58,7 +58,7 @@ func (s *S) TestBulkInsertError(c *C) { coll := session.DB("mydb").C("mycoll") bulk := coll.Bulk() - bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"n": 3}) + bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3}) _, err = bulk.Run() c.Assert(err, ErrorMatches, ".*duplicate key.*") diff --git a/cluster_test.go b/cluster_test.go index cd1366957..8da3c3b98 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -1185,8 +1185,6 @@ func (s *S) TestRemovalOfClusterMember(c *C) { c.Logf("========== Removing slave: %s ==========", slaveAddr) master.Run(bson.D{{"$eval", `rs.remove("` + slaveAddr + `")`}}, nil) - err = master.Ping() - c.Assert(err, Equals, io.EOF) master.Refresh() diff --git a/session.go b/session.go index e3b21f94c..dccdbbe40 100644 --- a/session.go +++ b/session.go @@ -97,7 +97,7 @@ type query struct { } type getLastError struct { - CmdName int "getLastError" + CmdName int "getLastError,omitempty" W interface{} "w,omitempty" WTimeout int "wtimeout,omitempty" FSync bool "fsync,omitempty" @@ -2752,12 +2752,12 @@ func (db *Database) run(socket *mongoSocket, cmd, result interface{}) (err error } // Collection.Find: - session := db.Session - session.m.RLock() - op := session.queryConfig.op // Copy. - session.m.RUnlock() - op.query = cmd - op.collection = db.Name + ".$cmd" + session := db.Session + session.m.RLock() + op := session.queryConfig.op // Copy. + session.m.RUnlock() + op.query = cmd + op.collection = db.Name + ".$cmd" // Query.One: op.flags |= session.slaveOkFlag() @@ -2773,7 +2773,9 @@ func (db *Database) run(socket *mongoSocket, cmd, result interface{}) (err error if result != nil { err = bson.Unmarshal(data, result) if err == nil { - debugf("Run command unmarshaled: %#v", op, result) + var res bson.M + bson.Unmarshal(data, &res) + debugf("Run command unmarshaled: %#v, result: %#v", op, res) } else { debugf("Run command unmarshaling failed: %#v", op, err) return err @@ -3964,6 +3966,27 @@ func (iter *Iter) replyFunc() replyFunc { } } +type writeCmdResult struct { + Ok bool + N int + NModified int `bson:"nModified"` + Upserted []struct { + Index int + Id interface{} `_id` + } + Errors []struct { + Ok bool + Index int + Code int + N int + ErrMsg string + } `bson:"writeErrors"` + ConcernError struct { + Code int + ErrMsg string + } `bson:"writeConcernError"` +} + // writeQuery runs the given modifying operation, potentially followed up // by a getLastError command in case the session is in safe mode. The // LastError result is made available in lerr, and if lerr.Err is set it @@ -3981,44 +4004,112 @@ func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) { safeOp := s.safeOp s.m.RUnlock() - if safeOp == nil { - return nil, socket.Query(op) - } else { - var mutex sync.Mutex - var replyData []byte - var replyErr error - mutex.Lock() - query := *safeOp // Copy the data. - query.collection = dbname + ".$cmd" - query.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { - replyData = docData - replyErr = err - mutex.Unlock() - } - err = socket.Query(op, &query) - if err != nil { - return nil, err + if socket.ServerInfo().MaxWireVersion >= 2 { + // Servers with the write protocol >= 2 benefit from write commands. + + var writeConcern interface{} + if safeOp == nil { + writeConcern = bson.D{{"w", 0}} + } else { + writeConcern = s.safeOp.query.(*getLastError) + } + + var cmd bson.D + switch op := op.(type) { + case *insertOp: + // http://docs.mongodb.org/manual/reference/command/insert + cmd = bson.D{ + {"insert", c.Name}, + {"documents", op.documents}, + {"writeConcern", writeConcern}, + {"ordered", op.flags&1 == 0}, + } + case *updateOp: + // http://docs.mongodb.org/manual/reference/command/update + selector := op.selector + if selector == nil { + selector = bson.D{} + } + cmd = bson.D{ + {"update", c.Name}, + {"updates", []bson.D{{{"q", selector}, {"u", op.update}, {"upsert", op.flags&1 != 0}, {"multi", op.flags&2 != 0}}}}, + {"writeConcern", writeConcern}, + //{"ordered", }, + } + case *deleteOp: + // http://docs.mongodb.org/manual/reference/command/delete + selector := op.selector + if selector == nil { + selector = bson.D{} + } + cmd = bson.D{ + {"delete", c.Name}, + {"deletes", []bson.D{{{"q", selector}, {"limit", op.flags & 1}}}}, + {"writeConcern", writeConcern}, + //{"ordered", }, + } } - mutex.Lock() // Wait. - if replyErr != nil { - return nil, replyErr // XXX TESTME + var result writeCmdResult + err := c.Database.run(socket, cmd, &result) + debugf("Write command result: %#v (err=%v)", result, err) + // TODO Should lerr.N be result.NModified on updates? + lerr := &LastError{UpdatedExisting: result.NModified != 0, N: result.N} + if len(result.Upserted) > 0 { + lerr.UpsertedId = result.Upserted[0].Id } - if hasErrMsg(replyData) { - // Looks like getLastError itself failed. - err = checkQueryError(query.collection, replyData) - if err != nil { - return nil, err + if len(result.Errors) > 0 { + e := result.Errors[0] + if !e.Ok { + lerr.Code = e.Code + lerr.Err = e.ErrMsg + err = lerr } + } else if result.ConcernError.Code != 0 { + e := result.ConcernError + lerr.Code = e.Code + lerr.Err = e.ErrMsg + err = lerr } - result := &LastError{} - bson.Unmarshal(replyData, &result) - debugf("Result from writing query: %#v", result) - if result.Err != "" { - return result, result + return lerr, err + } + + if safeOp == nil { + return nil, socket.Query(op) + } + + var mutex sync.Mutex + var replyData []byte + var replyErr error + mutex.Lock() + query := *safeOp // Copy the data. + query.collection = dbname + ".$cmd" + query.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { + replyData = docData + replyErr = err + mutex.Unlock() + } + err = socket.Query(op, &query) + if err != nil { + return nil, err + } + mutex.Lock() // Wait. + if replyErr != nil { + return nil, replyErr // XXX TESTME + } + if hasErrMsg(replyData) { + // Looks like getLastError itself failed. + err = checkQueryError(query.collection, replyData) + if err != nil { + return nil, err } - return result, nil } - panic("unreachable") + result := &LastError{} + bson.Unmarshal(replyData, &result) + debugf("Result from writing query: %#v", result) + if result.Err != "" { + return result, result + } + return result, nil } func hasErrMsg(d []byte) bool { diff --git a/session_test.go b/session_test.go index c5dda5f8a..934b28234 100644 --- a/session_test.go +++ b/session_test.go @@ -425,7 +425,6 @@ func (s *S) TestUpdateNil(c *C) { err = coll.Find(M{"k": 45}).One(result) c.Assert(err, IsNil) c.Assert(result["n"], Equals, 46) - } func (s *S) TestUpsert(c *C) { @@ -632,6 +631,16 @@ func (s *S) TestRemoveAll(c *C) { err = coll.Find(M{"n": 44}).One(result) c.Assert(err, Equals, mgo.ErrNotFound) + + info, err = coll.RemoveAll(nil) + c.Assert(err, IsNil) + c.Assert(info.Updated, Equals, 0) + c.Assert(info.Removed, Equals, 3) + c.Assert(info.UpsertedId, IsNil) + + n, err := coll.Find(nil).Count() + c.Assert(err, IsNil) + c.Assert(n, Equals, 0) } func (s *S) TestDropDatabase(c *C) { @@ -1369,15 +1378,16 @@ func (s *S) TestFindIterLimitWithMore(c *C) { // Should amount to more than 4MB bson payload, // the default limit per result chunk. - const total = 4096 + const total = 5000 var d struct{ A [1024]byte } - docs := make([]interface{}, total) - for i := 0; i < total; i++ { + docs := make([]interface{}, 1000) + for i := 0; i < 1000; i++ { docs[i] = &d } - err = coll.Insert(docs...) - c.Assert(err, IsNil) - + for i := 0; i < total/1000; i++ { + err = coll.Insert(docs...) + c.Assert(err, IsNil) + } n, err := coll.Count() c.Assert(err, IsNil) c.Assert(n, Equals, total) @@ -1589,7 +1599,11 @@ func (s *S) TestFindTailTimeoutWithSleep(c *C) { // 1*QUERY for nonce + 1*GET_MORE_OP on Next + 1*GET_MORE_OP on Next after sleep + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 5) + if s.versionAtLeast(2, 6) { + c.Assert(stats.SentOps, Equals, 4) + } else { + c.Assert(stats.SentOps, Equals, 5) + } c.Assert(stats.ReceivedOps, Equals, 4) // REPLY_OPs for 1*QUERY_OP for nonce + 2*GET_MORE_OPs + 1*QUERY_OP c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response @@ -1682,7 +1696,11 @@ func (s *S) TestFindTailTimeoutNoSleep(c *C) { // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 4) + if s.versionAtLeast(2, 6) { + c.Assert(stats.SentOps, Equals, 3) + } else { + c.Assert(stats.SentOps, Equals, 4) + } c.Assert(stats.ReceivedOps, Equals, 3) // REPLY_OPs for 1*QUERY_OP for nonce + 1*GET_MORE_OPs and 1*QUERY_OP c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response @@ -1774,7 +1792,11 @@ func (s *S) TestFindTailNoTimeout(c *C) { // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 4) + if s.versionAtLeast(2, 6) { + c.Assert(stats.SentOps, Equals, 3) + } else { + c.Assert(stats.SentOps, Equals, 4) + } c.Assert(stats.ReceivedOps, Equals, 3) // REPLY_OPs for 1*QUERY_OP for nonce + 1*GET_MORE_OPs and 1*QUERY_OP c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response @@ -2210,12 +2232,14 @@ func (s *S) TestPrefetching(c *C) { coll := session.DB("mydb").C("mycoll") + const total = 600 mgo.SetDebug(false) - docs := make([]interface{}, 800) - for i := 0; i != 600; i++ { + docs := make([]interface{}, total) + for i := 0; i != total; i++ { docs[i] = bson.D{{"n", i}} } - coll.Insert(docs...) + err = coll.Insert(docs...) + c.Assert(err, IsNil) for testi := 0; testi < 5; testi++ { mgo.ResetStats() @@ -2392,7 +2416,11 @@ func (s *S) TestSafeInsert(c *C) { // It must have sent two operations (INSERT_OP + getLastError QUERY_OP) stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 2) + if s.versionAtLeast(2, 6) { + c.Assert(stats.SentOps, Equals, 1) + } else { + c.Assert(stats.SentOps, Equals, 2) + } mgo.ResetStats() @@ -2416,7 +2444,7 @@ func (s *S) TestSafeParameters(c *C) { // Tweak the safety parameters to something unachievable. session.SetSafe(&mgo.Safe{W: 4, WTimeout: 100}) err = coll.Insert(M{"_id": 1}) - c.Assert(err, ErrorMatches, "timeout|timed out waiting for slaves|Not enough data-bearing nodes") + c.Assert(err, ErrorMatches, "timeout|timed out waiting for slaves|Not enough data-bearing nodes|waiting for replication timed out") // :-( if !s.versionAtLeast(2, 6) { // 2.6 turned it into a query error. c.Assert(err.(*mgo.LastError).WTimeout, Equals, true) diff --git a/testdb/dropall.js b/testdb/dropall.js index f52de61c9..2059349db 100644 --- a/testdb/dropall.js +++ b/testdb/dropall.js @@ -32,12 +32,12 @@ for (var i in ports) { } var result = admin.runCommand({"listDatabases": 1}) for (var j = 0; j != 100; j++) { - if (typeof result.databases != "undefined" || result.errmsg == "not master") { + if (typeof result.databases != "undefined" || notMaster(result)) { break } result = admin.runCommand({"listDatabases": 1}) } - if (result.errmsg == "not master") { + if (notMaster(result)) { continue } if (typeof result.databases == "undefined") { @@ -59,4 +59,8 @@ for (var i in ports) { } } +function notMaster(result) { + return typeof result.errmsg != "undefined" && result.errmsg.indexOf("not master") >= 0 +} + // vim:ts=4:sw=4:et From 9eef06de0353d5dd6a9e700214b52efdfbfbf355 Mon Sep 17 00:00:00 2001 From: Oleg Bulatov Date: Sat, 28 Mar 2015 20:21:08 +0300 Subject: [PATCH 127/305] Adjust stats for master connection only once. --- cluster.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/cluster.go b/cluster.go index 2db8a6d37..88195a2f3 100644 --- a/cluster.go +++ b/cluster.go @@ -209,9 +209,12 @@ func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerI if result.IsMaster { debugf("SYNC %s is a master.", addr) - // Made an incorrect assumption above, so fix stats. - stats.conn(-1, false) - stats.conn(+1, true) + if !server.info.Master { + // Made an incorrect assumption above, so fix stats. + stats.conn(-1, false) + stats.conn(+1, true) + server.info.Master = true + } } else if result.Secondary { debugf("SYNC %s is a slave.", addr) } else if cluster.direct { From 647f28853eacb1f660eff027698f04425252bfdc Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 30 Mar 2015 17:56:41 -0300 Subject: [PATCH 128/305] Tune doc for Distinct. --- session.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/session.go b/session.go index dccdbbe40..5999c9268 100644 --- a/session.go +++ b/session.go @@ -3429,9 +3429,7 @@ type distinctCmd struct { Query interface{} ",omitempty" } -// Distinct returns a list of distinct values for the given key within -// the result set. The list of distinct values will be unmarshalled -// in the "values" key of the provided result parameter. +// Distinct unmarshals into result the list of distinct values for the given key. // // For example: // From 994ffac98abedf3f337829fac65ad1a6e7eff4d3 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 30 Mar 2015 18:04:40 -0300 Subject: [PATCH 129/305] Also check unknown command error codes. --- session.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/session.go b/session.go index 5999c9268..0ac3ca256 100644 --- a/session.go +++ b/session.go @@ -855,7 +855,7 @@ func (db *Database) UpsertUser(user *User) error { func isNoCmd(err error) bool { e, ok := err.(*QueryError) - return ok && strings.HasPrefix(e.Message, "no such cmd:") + return ok && (e.Code == 59 || e.Code == 13390 || strings.HasPrefix(e.Message, "no such cmd:")) } func isNotFound(err error) bool { From 7a4943433e00707e38099a2b2e904d96681d14bc Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 30 Mar 2015 18:23:59 -0300 Subject: [PATCH 130/305] Handle Setter and Getter interfaces in slice types. --- bson/bson_test.go | 34 ++++++++++++++++++++++++++++------ bson/decode.go | 19 ++++++++++++------- bson/encode.go | 18 +++++++++--------- 3 files changed, 49 insertions(+), 22 deletions(-) diff --git a/bson/bson_test.go b/bson/bson_test.go index 5116f639d..974a4d4c3 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -1056,6 +1056,25 @@ func (i *getterSetterInt) SetBSON(raw bson.Raw) error { return err } +type ifaceType interface { + Hello() +} + +type ifaceSlice []ifaceType + +func (s *ifaceSlice) SetBSON(raw bson.Raw) error { + var ns []int + if err := raw.Unmarshal(&ns); err != nil { + return err + } + *s = make(ifaceSlice, ns[0]) + return nil +} + +func (s ifaceSlice) GetBSON() (interface{}, error) { + return []int{len(s)}, nil +} + type ( MyString string MyBytes []byte @@ -1281,6 +1300,9 @@ var twoWayCrossItems = []crossTypeItem{ // bson.D <=> non-struct getter/setter {&bson.D{{"a", 1}}, &getterSetterD{{"a", 1}, {"suffix", true}}}, {&bson.D{{"a", 42}}, &gsintvar}, + + // Interface slice setter. + {&struct{ V ifaceSlice }{ifaceSlice{nil, nil, nil}}, bson.M{"v": []interface{}{3}}}, } // Same thing, but only one way (obj1 => obj2). @@ -1476,14 +1498,14 @@ var jsonIdTests = []struct { marshal: false, unmarshal: true, }, { - json: `{"Id":"4d88e15b60f486e428412dc9A"}`, - error: `Invalid ObjectId in JSON: "4d88e15b60f486e428412dc9A"`, - marshal: false, + json: `{"Id":"4d88e15b60f486e428412dc9A"}`, + error: `Invalid ObjectId in JSON: "4d88e15b60f486e428412dc9A"`, + marshal: false, unmarshal: true, }, { - json: `{"Id":"4d88e15b60f486e428412dcZ"}`, - error: `Invalid ObjectId in JSON: "4d88e15b60f486e428412dcZ" .*`, - marshal: false, + json: `{"Id":"4d88e15b60f486e428412dcZ"}`, + error: `Invalid ObjectId in JSON: "4d88e15b60f486e428412dcZ" .*`, + marshal: false, unmarshal: true, }} diff --git a/bson/decode.go b/bson/decode.go index 782e9338a..bdd2e0287 100644 --- a/bson/decode.go +++ b/bson/decode.go @@ -1,18 +1,18 @@ // BSON library for Go -// +// // Copyright (c) 2010-2012 - Gustavo Niemeyer -// +// // All rights reserved. // // Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// +// modification, are permitted provided that the following conditions are met: +// // 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. +// list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// +// and/or other materials provided with the distribution. +// // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -474,6 +474,11 @@ func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { panic("Can't happen. Handled above.") case 0x04: // Array outt := out.Type() + if setterStyle(outt) != setterNone { + // Skip the value so its data is handed to the setter below. + d.dropElem(kind) + break + } for outt.Kind() == reflect.Ptr { outt = outt.Elem() } diff --git a/bson/encode.go b/bson/encode.go index 81a13add8..8599f037d 100644 --- a/bson/encode.go +++ b/bson/encode.go @@ -1,18 +1,18 @@ // BSON library for Go -// +// // Copyright (c) 2010-2012 - Gustavo Niemeyer -// +// // All rights reserved. // // Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// +// modification, are permitted provided that the following conditions are met: +// // 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. +// list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// +// and/or other materials provided with the distribution. +// // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -212,7 +212,7 @@ func (e *encoder) addSlice(v reflect.Value) { return } l := v.Len() - et := v.Type().Elem() + et := v.Type().Elem() if et == typeDocElem { for i := 0; i < l; i++ { elem := v.Index(i).Interface().(DocElem) @@ -415,7 +415,7 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { case time.Time: // MongoDB handles timestamps as milliseconds. e.addElemName('\x09', name) - e.addInt64(s.Unix() * 1000 + int64(s.Nanosecond() / 1e6)) + e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6)) case url.URL: e.addElemName('\x02', name) From d7cb1267439d1fcb34ae331a1eb22a4e83a595b3 Mon Sep 17 00:00:00 2001 From: Deiwin Sarjas Date: Tue, 7 Apr 2015 01:09:23 +0300 Subject: [PATCH 131/305] add support for geoHaystack index As seen on: http://docs.mongodb.org/manual/tutorial/build-a-geohaystack-index/ the haystack index uses a property called bucketSize, which isn't currently available through this driver. This commit adds the property --- session.go | 21 ++++++++++++--------- session_test.go | 11 +++++++++++ 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/session.go b/session.go index 0ac3ca256..8a8bc5944 100644 --- a/session.go +++ b/session.go @@ -947,15 +947,16 @@ func (db *Database) RemoveUser(user string) error { type indexSpec struct { Name, NS string Key bson.D - Unique bool ",omitempty" - DropDups bool "dropDups,omitempty" - Background bool ",omitempty" - Sparse bool ",omitempty" - Bits, Min, Max int ",omitempty" - ExpireAfter int "expireAfterSeconds,omitempty" - Weights bson.D ",omitempty" - DefaultLanguage string "default_language,omitempty" - LanguageOverride string "language_override,omitempty" + Unique bool ",omitempty" + DropDups bool "dropDups,omitempty" + Background bool ",omitempty" + Sparse bool ",omitempty" + Bits, Min, Max int ",omitempty" + BucketSize float64 "bucketSize,omitempty" + ExpireAfter int "expireAfterSeconds,omitempty" + Weights bson.D ",omitempty" + DefaultLanguage string "default_language,omitempty" + LanguageOverride string "language_override,omitempty" } type Index struct { @@ -975,6 +976,7 @@ type Index struct { // Properties for spatial indexes. Bits, Min, Max int + BucketSize float64 // Properties for text indexes. DefaultLanguage string @@ -1170,6 +1172,7 @@ func (c *Collection) EnsureIndex(index Index) error { Bits: index.Bits, Min: index.Min, Max: index.Max, + BucketSize: index.BucketSize, ExpireAfter: int(index.ExpireAfter / time.Second), Weights: keyInfo.weights, DefaultLanguage: index.DefaultLanguage, diff --git a/session_test.go b/session_test.go index 934b28234..4cce448ac 100644 --- a/session_test.go +++ b/session_test.go @@ -2564,6 +2564,17 @@ var indexTests = []struct { "max": 500, "bits": 32, }, +}, { + mgo.Index{ + Key: []string{"$geoHaystack:loc", "type"}, + BucketSize: 1, + }, + M{ + "name": "loc_geoHaystack_type_1", + "key": M{"loc": "geoHaystack", "type": 1}, + "ns": "mydb.mycoll", + "bucketSize": 1, + }, }, { mgo.Index{ Key: []string{"$text:a", "$text:b"}, From 0257d5e83afc34c03d3751a26ae8bbe0ed68abd6 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 8 Apr 2015 23:12:59 -0300 Subject: [PATCH 132/305] Split bulk insert operations to stay under limit. --- session.go | 153 ++++++++++++++++++++++++++++-------------------- session_test.go | 33 ++++++++--- 2 files changed, 115 insertions(+), 71 deletions(-) diff --git a/session.go b/session.go index 0ac3ca256..ec91d5093 100644 --- a/session.go +++ b/session.go @@ -4003,72 +4003,25 @@ func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) { s.m.RUnlock() if socket.ServerInfo().MaxWireVersion >= 2 { - // Servers with the write protocol >= 2 benefit from write commands. - - var writeConcern interface{} - if safeOp == nil { - writeConcern = bson.D{{"w", 0}} - } else { - writeConcern = s.safeOp.query.(*getLastError) - } - - var cmd bson.D - switch op := op.(type) { - case *insertOp: - // http://docs.mongodb.org/manual/reference/command/insert - cmd = bson.D{ - {"insert", c.Name}, - {"documents", op.documents}, - {"writeConcern", writeConcern}, - {"ordered", op.flags&1 == 0}, - } - case *updateOp: - // http://docs.mongodb.org/manual/reference/command/update - selector := op.selector - if selector == nil { - selector = bson.D{} - } - cmd = bson.D{ - {"update", c.Name}, - {"updates", []bson.D{{{"q", selector}, {"u", op.update}, {"upsert", op.flags&1 != 0}, {"multi", op.flags&2 != 0}}}}, - {"writeConcern", writeConcern}, - //{"ordered", }, - } - case *deleteOp: - // http://docs.mongodb.org/manual/reference/command/delete - selector := op.selector - if selector == nil { - selector = bson.D{} - } - cmd = bson.D{ - {"delete", c.Name}, - {"deletes", []bson.D{{{"q", selector}, {"limit", op.flags & 1}}}}, - {"writeConcern", writeConcern}, - //{"ordered", }, - } - } - var result writeCmdResult - err := c.Database.run(socket, cmd, &result) - debugf("Write command result: %#v (err=%v)", result, err) - // TODO Should lerr.N be result.NModified on updates? - lerr := &LastError{UpdatedExisting: result.NModified != 0, N: result.N} - if len(result.Upserted) > 0 { - lerr.UpsertedId = result.Upserted[0].Id - } - if len(result.Errors) > 0 { - e := result.Errors[0] - if !e.Ok { - lerr.Code = e.Code - lerr.Err = e.ErrMsg - err = lerr + // Servers with a more recent write protocol benefit from write commands. + if op, ok := op.(*insertOp); ok && len(op.documents) > 1000 { + // Maximum batch size is 1000. Must split out in separate operations for compatibility. + all := op.documents + for i := 0; i < len(all); i += 1000 { + l := i+1000 + if l > len(all) { + l = len(all) + } + op.documents = all[i:l] + _, err := c.writeCommand(socket, safeOp, op) + if err != nil { + // TODO: Handle unordered. + return nil, err + } } - } else if result.ConcernError.Code != 0 { - e := result.ConcernError - lerr.Code = e.Code - lerr.Err = e.ErrMsg - err = lerr + return nil, nil } - return lerr, err + return c.writeCommand(socket, safeOp, op) } if safeOp == nil { @@ -4110,6 +4063,78 @@ func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) { return result, nil } +func (c *Collection) writeCommand(socket *mongoSocket, safeOp *queryOp, op interface{}) (lerr *LastError, err error) { + var writeConcern interface{} + if safeOp == nil { + writeConcern = bson.D{{"w", 0}} + } else { + writeConcern = safeOp.query.(*getLastError) + } + + var cmd bson.D + switch op := op.(type) { + case *insertOp: + // http://docs.mongodb.org/manual/reference/command/insert + cmd = bson.D{ + {"insert", c.Name}, + {"documents", op.documents}, + {"writeConcern", writeConcern}, + {"ordered", op.flags&1 == 0}, + } + case *updateOp: + // http://docs.mongodb.org/manual/reference/command/update + selector := op.selector + if selector == nil { + selector = bson.D{} + } + cmd = bson.D{ + {"update", c.Name}, + {"updates", []bson.D{{{"q", selector}, {"u", op.update}, {"upsert", op.flags&1 != 0}, {"multi", op.flags&2 != 0}}}}, + {"writeConcern", writeConcern}, + //{"ordered", }, + } + case *deleteOp: + // http://docs.mongodb.org/manual/reference/command/delete + selector := op.selector + if selector == nil { + selector = bson.D{} + } + cmd = bson.D{ + {"delete", c.Name}, + {"deletes", []bson.D{{{"q", selector}, {"limit", op.flags & 1}}}}, + {"writeConcern", writeConcern}, + //{"ordered", }, + } + } + + var result writeCmdResult + err = c.Database.run(socket, cmd, &result) + debugf("Write command result: %#v (err=%v)", result, err) + // TODO Should lerr.N be result.NModified on updates? + lerr = &LastError{UpdatedExisting: result.NModified != 0, N: result.N} + if len(result.Upserted) > 0 { + lerr.UpsertedId = result.Upserted[0].Id + } + if len(result.Errors) > 0 { + e := result.Errors[0] + if !e.Ok { + lerr.Code = e.Code + lerr.Err = e.ErrMsg + err = lerr + } + } else if result.ConcernError.Code != 0 { + e := result.ConcernError + lerr.Code = e.Code + lerr.Err = e.ErrMsg + err = lerr + } + + if err == nil && safeOp == nil { + return nil, nil + } + return lerr, err +} + func hasErrMsg(d []byte) bool { l := len(d) for i := 0; i+8 < l; i++ { diff --git a/session_test.go b/session_test.go index 934b28234..0ca6d1605 100644 --- a/session_test.go +++ b/session_test.go @@ -602,6 +602,26 @@ func (s *S) TestRemoveId(c *C) { c.Assert(coll.FindId(42).One(nil), IsNil) } +func (s *S) TestRemoveUnsafe(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + session.SetSafe(nil) + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"_id": 40}, M{"_id": 41}, M{"_id": 42}) + c.Assert(err, IsNil) + + err = coll.RemoveId(41) + c.Assert(err, IsNil) + + c.Assert(coll.FindId(40).One(nil), IsNil) + c.Assert(coll.FindId(41).One(nil), Equals, mgo.ErrNotFound) + c.Assert(coll.FindId(42).One(nil), IsNil) +} + func (s *S) TestRemoveAll(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) @@ -1378,16 +1398,15 @@ func (s *S) TestFindIterLimitWithMore(c *C) { // Should amount to more than 4MB bson payload, // the default limit per result chunk. - const total = 5000 + const total = 4096 var d struct{ A [1024]byte } - docs := make([]interface{}, 1000) - for i := 0; i < 1000; i++ { + docs := make([]interface{}, total) + for i := 0; i < total; i++ { docs[i] = &d } - for i := 0; i < total/1000; i++ { - err = coll.Insert(docs...) - c.Assert(err, IsNil) - } + err = coll.Insert(docs...) + c.Assert(err, IsNil) + n, err := coll.Count() c.Assert(err, IsNil) c.Assert(n, Equals, total) From 235365258247a4cca6e19ffe0e509a7f50557aab Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Fri, 8 May 2015 16:53:51 -0300 Subject: [PATCH 133/305] Handle unordered batch splitting with commands. --- bulk_test.go | 38 ++++++++++++++++++++++++++++++++++++++ session.go | 12 +++++++++--- 2 files changed, 47 insertions(+), 3 deletions(-) diff --git a/bulk_test.go b/bulk_test.go index f6092d612..e126f9d20 100644 --- a/bulk_test.go +++ b/bulk_test.go @@ -91,3 +91,41 @@ func (s *S) TestBulkInsertErrorUnordered(c *C) { c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}}) } + +func (s *S) TestBulkInsertErrorUnorderedSplitBatch(c *C) { + // The server has a batch limit of 1000 documents when using write commands. + // This artificial limit did not exist with the old wire protocol, so to + // avoid compatibility issues the implementation internally split batches + // into the proper size and delivers them one by one. This test ensures that + // the behavior of unordered (that is, continue on error) remains correct + // when errors happen and there are batches left. + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + bulk := coll.Bulk() + bulk.Unordered() + + const total = 4096 + type doc struct { + Id int `_id` + } + docs := make([]interface{}, total) + for i := 0; i < total; i++ { + docs[i] = doc{i} + } + docs[1] = doc{0} + bulk.Insert(docs...) + _, err = bulk.Run() + c.Assert(err, ErrorMatches, ".*duplicate key.*") + + n, err := coll.Count() + c.Assert(err, IsNil) + c.Assert(n, Equals, total-1) + + var res doc + err = coll.FindId(1500).One(&res) + c.Assert(err, IsNil) + c.Assert(res.Id, Equals, 1500) +} diff --git a/session.go b/session.go index ec91d5093..f1f4bcd34 100644 --- a/session.go +++ b/session.go @@ -4005,6 +4005,7 @@ func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) { if socket.ServerInfo().MaxWireVersion >= 2 { // Servers with a more recent write protocol benefit from write commands. if op, ok := op.(*insertOp); ok && len(op.documents) > 1000 { + var firstErr error // Maximum batch size is 1000. Must split out in separate operations for compatibility. all := op.documents for i := 0; i < len(all); i += 1000 { @@ -4015,11 +4016,16 @@ func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) { op.documents = all[i:l] _, err := c.writeCommand(socket, safeOp, op) if err != nil { - // TODO: Handle unordered. - return nil, err + if op.flags&1 != 0 { + if firstErr == nil { + firstErr = err + } + } else { + return nil, err + } } } - return nil, nil + return nil, firstErr } return c.writeCommand(socket, safeOp, op) } From 00f3f637b2af8b7ce0985f9fce009f47a970ce2e Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 11 May 2015 15:48:48 -0300 Subject: [PATCH 134/305] Twist SetMaxTime to follow conventions. --- session.go | 38 ++++++++++++++++++++++++++++++-------- session_test.go | 25 ++++++++++++++++++++++++- 2 files changed, 54 insertions(+), 9 deletions(-) diff --git a/session.go b/session.go index f9ad6a18f..c5004a7b4 100644 --- a/session.go +++ b/session.go @@ -2616,17 +2616,39 @@ func (q *Query) SetMaxScan(n int) *Query { return q } -// SetMaxTimeMS constrains the query to stop after running for the specified -// time. +// SetMaxTime constrains the query to stop after running for the specified time. // -// This modifier is better at stopping queries from running too long than a -// socket timeout since mongo will continue to finish the query even after -// the socket has been closed by the client. +// When the time limit is reached MongoDB automatically cancels the query. +// This can be used to efficiently prevent and identify unexpectedly slow queries. // -// http://blog.mongodb.org/post/83621787773/maxtimems-and-query-optimizer-introspection-in -func (q *Query) SetMaxTimeMS(n int) *Query { +// A few important notes about the mechanism enforcing this limit: +// +// - Requests can block behind locking operations on the server, and that blocking +// time is not accounted for. In other words, the timer starts ticking only after +// the actual start of the query when it initially acquires the appropriate lock; +// +// - Operations are interrupted only at interrupt points where an operation can be +// safely aborted – the total execution time may exceed the specified value; +// +// - The limit can be applied to both CRUD operations and commands, but not all +// commands are interruptible; +// +// - While iterating over results, computing follow up batches is included in the +// total time and the iteration continues until the alloted time is over, but +// network roundtrips are not taken into account for the limit. +// +// - This limit does not override the inactive cursor timeout for idle cursors +// (default is 10 min). +// +// This mechanism was introduced in MongoDB 2.6. +// +// Relevant documentation: +// +// http://blog.mongodb.org/post/83621787773/maxtimems-and-query-optimizer-introspection-in +// +func (q *Query) SetMaxTime(d time.Duration) *Query { q.m.Lock() - q.op.options.MaxTimeMS = n + q.op.options.MaxTimeMS = int(d/time.Millisecond) q.op.hasOptions = true q.m.Unlock() return q diff --git a/session_test.go b/session_test.go index 0ca6d1605..f4c4ff756 100644 --- a/session_test.go +++ b/session_test.go @@ -1078,7 +1078,7 @@ func (s *S) TestQueryExplain(c *C) { c.Assert(n, Equals, 2) } -func (s *S) TestQueryMaxScan(c *C) { +func (s *S) TestQuerySetMaxScan(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() @@ -1097,6 +1097,29 @@ func (s *S) TestQueryMaxScan(c *C) { c.Assert(result, HasLen, 2) } +func (s *S) TestQuerySetMaxTime(c *C) { + if !s.versionAtLeast(2, 6) { + c.Skip("SetMaxTime only supported in 2.6+") + } + + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + coll := session.DB("mydb").C("mycoll") + + for i := 0; i < 1000; i++ { + err := coll.Insert(M{"n": i}) + c.Assert(err, IsNil) + } + + query := coll.Find(nil) + query.SetMaxTime(1*time.Millisecond) + query.Batch(2) + var result []M + err = query.All(&result) + c.Assert(err, ErrorMatches, "operation exceeded time limit") +} + func (s *S) TestQueryHint(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) From 95431c887c47e3b0cf8aec7e11d6db7140eb3b42 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 11 May 2015 16:41:19 -0300 Subject: [PATCH 135/305] Expose ParseURL. Addresses #85 and #97. --- session.go | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/session.go b/session.go index c5004a7b4..eafdbba8c 100644 --- a/session.go +++ b/session.go @@ -218,7 +218,18 @@ func Dial(url string) (*Session, error) { // // See SetSyncTimeout for customizing the timeout for the session. func DialWithTimeout(url string, timeout time.Duration) (*Session, error) { - uinfo, err := parseURL(url) + info, err := ParseURL(url) + if err != nil { + return nil, err + } + info.Timeout = timeout + return DialWithInfo(info) +} + +// ParseURL parses a MongoDB URL as accepted by the Dial function and returns +// a value suitable for providing into DialWithInfo. +func ParseURL(url string) (*DialInfo, error) { + uinfo, err := extractURL(url) if err != nil { return nil, err } @@ -259,7 +270,6 @@ func DialWithTimeout(url string, timeout time.Duration) (*Session, error) { info := DialInfo{ Addrs: uinfo.addrs, Direct: direct, - Timeout: timeout, Database: uinfo.db, Username: uinfo.user, Password: uinfo.pass, @@ -269,7 +279,7 @@ func DialWithTimeout(url string, timeout time.Duration) (*Session, error) { PoolLimit: poolLimit, ReplicaSetName: setName, } - return DialWithInfo(&info) + return &info, nil } // DialInfo holds options for establishing a session with a MongoDB cluster. @@ -428,7 +438,7 @@ type urlInfo struct { options map[string]string } -func parseURL(s string) (*urlInfo, error) { +func extractURL(s string) (*urlInfo, error) { if strings.HasPrefix(s, "mongodb://") { s = s[10:] } From b62503f99756978997918b1613843d179b452b4e Mon Sep 17 00:00:00 2001 From: Menno Smits Date: Mon, 11 May 2015 17:47:31 +1200 Subject: [PATCH 136/305] Check for iteration errors in PurgeMissing Not checking for iterator errors was hiding problems with pipeline result size limits being exceeded when PurgeMissing was being used to recover a production database. --- txn/txn.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/txn/txn.go b/txn/txn.go index d81c1f16f..5540c2eab 100644 --- a/txn/txn.go +++ b/txn/txn.go @@ -417,6 +417,9 @@ func (r *Runner) PurgeMissing(collections ...string) error { return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err) } } + if err := iter.Close(); err != nil { + return fmt.Errorf("transaction queue iteration error for collection %s: %v", collection, err) + } colls[collection] = true } @@ -442,6 +445,9 @@ func (r *Runner) PurgeMissing(collections ...string) error { return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err) } } + if err := iter.Close(); err != nil { + return fmt.Errorf("transaction stash iteration error: %v", err) + } return nil } From 9aad82c770ae9f427c710a92ccfb805ed7d1759b Mon Sep 17 00:00:00 2001 From: Menno Smits Date: Mon, 11 May 2015 21:36:30 +1200 Subject: [PATCH 137/305] Remove unused `coll` map from PurgeMissing --- txn/txn.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/txn/txn.go b/txn/txn.go index 5540c2eab..143e82a6f 100644 --- a/txn/txn.go +++ b/txn/txn.go @@ -395,7 +395,6 @@ func (r *Runner) PurgeMissing(collections ...string) error { } found := make(map[bson.ObjectId]bool) - colls := make(map[string]bool) sort.Strings(collections) for _, collection := range collections { @@ -420,7 +419,6 @@ func (r *Runner) PurgeMissing(collections ...string) error { if err := iter.Close(); err != nil { return fmt.Errorf("transaction queue iteration error for collection %s: %v", collection, err) } - colls[collection] = true } type StashTRef struct { From 541cb7bdea901d5273538e3cb3edd54d9eb91bfe Mon Sep 17 00:00:00 2001 From: Menno Smits Date: Mon, 11 May 2015 21:37:35 +1200 Subject: [PATCH 138/305] Fixed spelling error in TestPurgeMissing --- txn/txn_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/txn/txn_test.go b/txn/txn_test.go index e52b119d3..68d2c9a9e 100644 --- a/txn/txn_test.go +++ b/txn/txn_test.go @@ -534,7 +534,7 @@ func (s *S) TestPurgeMissing(c *C) { err = s.runner.Run(ops2, "", nil) c.Assert(err, ErrorMatches, "cannot find transaction .*") - c.Logf("---- Puring missing transactions") + c.Logf("---- Purging missing transactions") err = s.runner.PurgeMissing("accounts") c.Assert(err, IsNil) From e5d775ad02fa2dfc96d4a4cc91d2938d49bdd4c3 Mon Sep 17 00:00:00 2001 From: Menno Smits Date: Mon, 11 May 2015 22:07:50 +1200 Subject: [PATCH 139/305] Actually use `again` id in TestPurgeMissing --- txn/txn_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/txn/txn_test.go b/txn/txn_test.go index 68d2c9a9e..75acfe525 100644 --- a/txn/txn_test.go +++ b/txn/txn_test.go @@ -531,7 +531,7 @@ func (s *S) TestPurgeMissing(c *C) { again := bson.NewObjectId() c.Logf("---- Running ops2 again under transaction %q, to fail for missing transaction", again.Hex()) - err = s.runner.Run(ops2, "", nil) + err = s.runner.Run(ops2, again, nil) c.Assert(err, ErrorMatches, "cannot find transaction .*") c.Logf("---- Purging missing transactions") From 88e8c10c4f985c0258879c4d9a95f4e765e6a3a4 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 12 May 2015 15:26:33 -0300 Subject: [PATCH 140/305] Move sasl sub-package into internal/. --- {sasl => internal/sasl}/sasl.c | 0 {sasl => internal/sasl}/sasl.go | 0 {sasl => internal/sasl}/sasl_windows.c | 0 {sasl => internal/sasl}/sasl_windows.go | 0 {sasl => internal/sasl}/sasl_windows.h | 0 {sasl => internal/sasl}/sspi_windows.c | 0 {sasl => internal/sasl}/sspi_windows.h | 0 saslimpl.go | 2 +- 8 files changed, 1 insertion(+), 1 deletion(-) rename {sasl => internal/sasl}/sasl.c (100%) rename {sasl => internal/sasl}/sasl.go (100%) rename {sasl => internal/sasl}/sasl_windows.c (100%) rename {sasl => internal/sasl}/sasl_windows.go (100%) rename {sasl => internal/sasl}/sasl_windows.h (100%) rename {sasl => internal/sasl}/sspi_windows.c (100%) rename {sasl => internal/sasl}/sspi_windows.h (100%) diff --git a/sasl/sasl.c b/internal/sasl/sasl.c similarity index 100% rename from sasl/sasl.c rename to internal/sasl/sasl.c diff --git a/sasl/sasl.go b/internal/sasl/sasl.go similarity index 100% rename from sasl/sasl.go rename to internal/sasl/sasl.go diff --git a/sasl/sasl_windows.c b/internal/sasl/sasl_windows.c similarity index 100% rename from sasl/sasl_windows.c rename to internal/sasl/sasl_windows.c diff --git a/sasl/sasl_windows.go b/internal/sasl/sasl_windows.go similarity index 100% rename from sasl/sasl_windows.go rename to internal/sasl/sasl_windows.go diff --git a/sasl/sasl_windows.h b/internal/sasl/sasl_windows.h similarity index 100% rename from sasl/sasl_windows.h rename to internal/sasl/sasl_windows.h diff --git a/sasl/sspi_windows.c b/internal/sasl/sspi_windows.c similarity index 100% rename from sasl/sspi_windows.c rename to internal/sasl/sspi_windows.c diff --git a/sasl/sspi_windows.h b/internal/sasl/sspi_windows.h similarity index 100% rename from sasl/sspi_windows.h rename to internal/sasl/sspi_windows.h diff --git a/saslimpl.go b/saslimpl.go index 49c94091d..e6710dc3f 100644 --- a/saslimpl.go +++ b/saslimpl.go @@ -3,7 +3,7 @@ package mgo import ( - "gopkg.in/mgo.v2-unstable/sasl" + "gopkg.in/mgo.v2-unstable/internal/sasl" ) func saslNew(cred Credential, host string) (saslStepper, error) { From 4e47911f9842747e9f9e838ae94a268b8f2404bb Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 13 May 2015 11:50:20 -0300 Subject: [PATCH 141/305] Add support for $comment flag with Query.Comment. Fixes #82. --- session.go | 16 ++++++++++++++++ session_test.go | 32 ++++++++++++++++++++++++++++++++ socket.go | 1 + 3 files changed, 49 insertions(+) diff --git a/session.go b/session.go index 66611187d..46e18d280 100644 --- a/session.go +++ b/session.go @@ -2698,6 +2698,22 @@ func (q *Query) Snapshot() *Query { return q } +// Comment adds a comment to the query to identify it in the database profiler output. +// +// Relevant documentation: +// +// http://docs.mongodb.org/manual/reference/operator/meta/comment +// http://docs.mongodb.org/manual/reference/command/profile +// http://docs.mongodb.org/manual/administration/analyzing-mongodb-performance/#database-profiling +// +func (q *Query) Comment(comment string) *Query { + q.m.Lock() + q.op.options.Comment = comment + q.op.hasOptions = true + q.m.Unlock() + return q +} + // LogReplay enables an option that optimizes queries that are typically // made on the MongoDB oplog for replaying it. This is an internal // implementation aspect and most likely uninteresting for other uses. diff --git a/session_test.go b/session_test.go index ce8907db5..855accfff 100644 --- a/session_test.go +++ b/session_test.go @@ -1143,6 +1143,38 @@ func (s *S) TestQueryHint(c *C) { } } +func (s *S) TestQueryComment(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + db := session.DB("mydb") + coll := db.C("mycoll") + + err = db.Run(bson.M{"profile": 2}, nil) + c.Assert(err, IsNil) + + ns := []int{40, 41, 42} + for _, n := range ns { + err := coll.Insert(M{"n": n}) + c.Assert(err, IsNil) + } + + query := coll.Find(bson.M{"n": 41}) + query.Comment("some comment") + err = query.One(nil) + c.Assert(err, IsNil) + + query = coll.Find(bson.M{"n": 41}) + query.Comment("another comment") + err = query.One(nil) + c.Assert(err, IsNil) + + n, err := session.DB("mydb").C("system.profile").Find(bson.M{"query.$query.n": 41, "query.$comment": "some comment"}).Count() + c.Assert(err, IsNil) + c.Assert(n, Equals, 1) +} + func (s *S) TestFindOneNotFound(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) diff --git a/socket.go b/socket.go index eb468bcc8..0aa637cb3 100644 --- a/socket.go +++ b/socket.go @@ -88,6 +88,7 @@ type queryWrapper struct { ReadPreference bson.D "$readPreference,omitempty" MaxScan int "$maxScan,omitempty" MaxTimeMS int "$maxTimeMS,omitempty" + Comment string "$comment,omitempty" } func (op *queryOp) finalQuery(socket *mongoSocket) interface{} { From 446b41d43e268968b6242960e0bbdc7e99a8d66e Mon Sep 17 00:00:00 2001 From: William Reade Date: Fri, 22 May 2015 10:07:30 +0200 Subject: [PATCH 142/305] rescan now retries bad queue reads --- txn/flusher.go | 29 +++++++++++++++++++++++------ txn/txn_test.go | 45 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 6 deletions(-) diff --git a/txn/flusher.go b/txn/flusher.go index 86e711037..964b43a31 100644 --- a/txn/flusher.go +++ b/txn/flusher.go @@ -395,12 +395,15 @@ func (f *flusher) rescan(t *transaction, force bool) (revnos []int64, err error) revno := make(map[docKey]int64) info := txnInfo{} for _, dkey := range dkeys { - retry := 0 + const retries = 3 + retry := -1 RetryDoc: + retry++ c := f.tc.Database.C(dkey.C) if err := c.FindId(dkey.Id).Select(txnFields).One(&info); err == mgo.ErrNotFound { // Document is missing. Look in stash. + chaos("") if err := f.sc.FindId(dkey).One(&info); err == mgo.ErrNotFound { // Stash also doesn't exist. Maybe someone applied it. if err := f.reload(t); err != nil { @@ -409,8 +412,7 @@ func (f *flusher) rescan(t *transaction, force bool) (revnos []int64, err error) return t.Revnos, err } // Not applying either. - retry++ - if retry < 3 { + if retry < retries { // Retry since there might be an insert/remove race. goto RetryDoc } @@ -451,13 +453,28 @@ func (f *flusher) rescan(t *transaction, force bool) (revnos []int64, err error) } f.queue[dkey] = info.Queue if !found { - // Previously set txn-queue was popped by someone. - // Transaction is being/has been applied elsewhere. + // Rescanned transaction id was not in the queue. This could mean one + // of three things: + // 1) The transaction was applied and popped by someone else. This is + // the common case. + // 2) We've read an out-of-date queue from the stash. This can happen + // when someone else was paused for a long while preparing another + // transaction for this document, and improperly upserted to the + // stash when unpaused (after someone else inserted the document). + // This is rare but possible. + // 3) There's an actual bug somewhere, or outside interference. Worst + // possible case. f.debugf("Rescanned document %v misses %s in queue: %v", dkey, tt, info.Queue) err := f.reload(t) if t.State == tpreparing || t.State == tprepared { - panic("rescanned document misses transaction in queue") + if retry < retries { + // Case 2. + goto RetryDoc + } + // Case 3. + return nil, fmt.Errorf("cannot find transaction %s in queue for document %v", t, dkey) } + // Case 1. return t.Revnos, err } } diff --git a/txn/txn_test.go b/txn/txn_test.go index 75acfe525..5548dcbb0 100644 --- a/txn/txn_test.go +++ b/txn/txn_test.go @@ -1,6 +1,7 @@ package txn_test import ( + "fmt" "sync" "testing" "time" @@ -563,6 +564,50 @@ func (s *S) TestPurgeMissing(c *C) { } } +func (s *S) TestTxnQueueStashStressTest(c *C) { + txn.SetChaos(txn.Chaos{ + SlowdownChance: 0.3, + Slowdown: 50 * time.Millisecond, + }) + defer txn.SetChaos(txn.Chaos{}) + + // So we can run more iterations of the test in less time. + txn.SetDebug(false) + + const runners = 10 + const inserts = 10 + const repeat = 100 + + for r := 0; r < repeat; r++ { + var wg sync.WaitGroup + wg.Add(runners) + for i := 0; i < runners; i++ { + go func(i, r int) { + defer wg.Done() + + session := s.session.New() + defer session.Close() + runner := txn.NewRunner(s.tc.With(session)) + + for j := 0; j < inserts; j++ { + ops := []txn.Op{{ + C: "accounts", + Id: fmt.Sprintf("insert-%d-%d", r, j), + Insert: bson.M{ + "added-by": i, + }, + }} + err := runner.Run(ops, "", nil) + if err != txn.ErrAborted { + c.Check(err, IsNil) + } + } + }(i, r) + } + wg.Wait() + } +} + func (s *S) TestTxnQueueStressTest(c *C) { txn.SetChaos(txn.Chaos{ SlowdownChance: 0.3, From 3d69d7d38beb26f461b87a81691fb7e289ce41bd Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Fri, 22 May 2015 17:31:50 -0300 Subject: [PATCH 143/305] Added testserver subpackage to help test suites. --- testserver/export_test.go | 12 +++ testserver/testserver.go | 196 ++++++++++++++++++++++++++++++++++ testserver/testserver_test.go | 108 +++++++++++++++++++ 3 files changed, 316 insertions(+) create mode 100644 testserver/export_test.go create mode 100644 testserver/testserver.go create mode 100644 testserver/testserver_test.go diff --git a/testserver/export_test.go b/testserver/export_test.go new file mode 100644 index 000000000..2b2e093fc --- /dev/null +++ b/testserver/export_test.go @@ -0,0 +1,12 @@ +package testserver + +import ( + "os" +) + +func (ts *TestServer) ProcessTest() *os.Process { + if ts.server == nil { + return nil + } + return ts.server.Process +} diff --git a/testserver/testserver.go b/testserver/testserver.go new file mode 100644 index 000000000..b369d88bc --- /dev/null +++ b/testserver/testserver.go @@ -0,0 +1,196 @@ +package testserver + +import ( + "bytes" + "fmt" + "net" + "os" + "os/exec" + "strconv" + "time" + + "gopkg.in/mgo.v2-unstable" + "gopkg.in/tomb.v2" +) + +// TestServer controls a MongoDB server process to be used within test suites. +// +// The test server is started when Session is called the first time and should +// remain running for the duration of all tests, with the Wipe method being +// called between tests (before each of them) to clear stored data. After all tests +// are done, the Stop method should be called to stop the test server. +// +// Before the TestServer is used the SetPath method must be called to define +// the location for the database files to be stored. +type TestServer struct { + session *mgo.Session + output bytes.Buffer + server *exec.Cmd + dbpath string + host string + tomb tomb.Tomb +} + +// SetPath defines the path to the directory where the database files will be +// stored if it is started. The directory path itself is not created or removed +// by the test helper. +func (ts *TestServer) SetPath(dbpath string) { + ts.dbpath = dbpath +} + +func (ts *TestServer) start() { + if ts.server != nil { + panic("TestServer already started") + } + if ts.dbpath == "" { + panic("TestServer.SetPath must be called before using the server") + } + mgo.SetStats(true) + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + panic("unable to listen on a local address: " + err.Error()) + } + addr := l.Addr().(*net.TCPAddr) + l.Close() + ts.host = addr.String() + + args := []string{ + "--dbpath", ts.dbpath, + "--bind_ip", "127.0.0.1", + "--port", strconv.Itoa(addr.Port), + "--nssize", "1", + "--noprealloc", + "--smallfiles", + "--nojournal", + } + ts.tomb = tomb.Tomb{} + ts.server = exec.Command("mongod", args...) + ts.server.Stdout = &ts.output + ts.server.Stderr = &ts.output + err = ts.server.Start() + if err != nil { + panic(err) + } + ts.tomb.Go(ts.monitor) + ts.Wipe() +} + +func (ts *TestServer) monitor() error { + ts.server.Process.Wait() + if ts.tomb.Alive() { + // Present some debugging information. + fmt.Fprintf(os.Stderr, "---- mongod process died unexpectedly:\n") + fmt.Fprintf(os.Stderr, "%s", ts.output.Bytes()) + fmt.Fprintf(os.Stderr, "---- mongod processes running right now:\n") + cmd := exec.Command("/bin/sh", "-c", "ps auxw | grep mongod") + cmd.Stdout = os.Stderr + cmd.Stderr = os.Stderr + cmd.Run() + fmt.Fprintf(os.Stderr, "----------------------------------------\n") + + panic("mongod process died unexpectedly") + } + return nil +} + +// Stop stops the test server process, if it is running. +// +// It's okay to call Stop multiple times. After the test server is +// stopped it cannot be restarted. +// +// All database sessions must be closed before or while the Stop method +// is running. Otherwise Stop will panic after a timeout informing that +// there is a session leak. +func (ts *TestServer) Stop() { + if ts.session != nil { + ts.checkSessions() + if ts.session != nil { + ts.session.Close() + ts.session = nil + } + } + if ts.server != nil { + ts.tomb.Kill(nil) + ts.server.Process.Kill() + select { + case <-ts.tomb.Dead(): + case <-time.After(5 * time.Second): + panic("timeout waiting for mongod process to die") + } + ts.server = nil + } +} + +// Session returns a new session to the server. The returned session +// must be closed after the test is done with it. +// +// The first Session obtained from a TestServer will start it. +func (ts *TestServer) Session() *mgo.Session { + if ts.server == nil { + ts.start() + } + if ts.session == nil { + mgo.ResetStats() + var err error + ts.session, err = mgo.Dial(ts.host + "/test") + if err != nil { + panic(err) + } + } + return ts.session.Copy() +} + +// checkSessions ensures all mgo sessions opened were properly closed. +// For slightly faster tests, it may be disabled setting the +// environmnet variable CHECK_SESSIONS to 0. +func (ts *TestServer) checkSessions() { + if check := os.Getenv("CHECK_SESSIONS"); check == "0" || ts.server == nil || ts.session == nil { + return + } + ts.session.Close() + ts.session = nil + for i := 0; i < 100; i++ { + stats := mgo.GetStats() + if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 { + return + } + time.Sleep(100 * time.Millisecond) + } + panic("There are mgo sessions still alive.") +} + +// Wipe drops all created databases and their data. +// +// The MongoDB server remains running if it was prevoiusly running, +// or stopped if it was previously stopped. +// +// All database sessions must be closed before or while the Wipe method +// is running. Otherwise Wipe will panic after a timeout informing that +// there is a session leak. +func (ts *TestServer) Wipe() { + if ts.server == nil || ts.session == nil { + return + } + ts.checkSessions() + sessionUnset := ts.session == nil + session := ts.Session() + defer session.Close() + if sessionUnset { + ts.session.Close() + ts.session = nil + } + names, err := session.DatabaseNames() + if err != nil { + panic(err) + } + for _, name := range names { + switch name { + case "admin", "local", "config": + default: + err = session.DB(name).DropDatabase() + if err != nil { + panic(err) + } + } + } +} diff --git a/testserver/testserver_test.go b/testserver/testserver_test.go new file mode 100644 index 000000000..59dc5a6c6 --- /dev/null +++ b/testserver/testserver_test.go @@ -0,0 +1,108 @@ +package testserver_test + +import ( + "os" + "testing" + "time" + + . "gopkg.in/check.v1" + + "gopkg.in/mgo.v2-unstable" + "gopkg.in/mgo.v2-unstable/testserver" +) + +type M map[string]interface{} + +func TestAll(t *testing.T) { + TestingT(t) +} + +type S struct { + oldCheckSessions string +} + +var _ = Suite(&S{}) + +func (s *S) SetUpTest(c *C) { + s.oldCheckSessions = os.Getenv("CHECK_SESSIONS") + os.Setenv("CHECK_SESSIONS", "") +} + +func (s *S) TearDownTest(c *C) { + os.Setenv("CHECK_SESSIONS", s.oldCheckSessions) +} + +func (s *S) TestWipeData(c *C) { + var server testserver.TestServer + server.SetPath(c.MkDir()) + defer server.Stop() + + session := server.Session() + err := session.DB("mydb").C("mycoll").Insert(M{"a": 1}) + session.Close() + c.Assert(err, IsNil) + + server.Wipe() + + session = server.Session() + names, err := session.DatabaseNames() + session.Close() + c.Assert(err, IsNil) + for _, name := range names { + if name != "local" && name != "admin" { + c.Fatalf("Wipe should have removed this database: %s", name) + } + } +} + +func (s *S) TestStop(c *C) { + var server testserver.TestServer + server.SetPath(c.MkDir()) + defer server.Stop() + + // Server should not be running. + process := server.ProcessTest() + c.Assert(process, IsNil) + + session := server.Session() + addr := session.LiveServers()[0] + session.Close() + + // Server should be running now. + process = server.ProcessTest() + p, err := os.FindProcess(process.Pid) + c.Assert(err, IsNil) + p.Release() + + server.Stop() + + // Server should not be running anymore. + session, err = mgo.DialWithTimeout(addr, 500 * time.Millisecond) + if session != nil { + session.Close() + c.Fatalf("Stop did not stop the server") + } +} + +func (s *S) TestCheckSessions(c *C) { + var server testserver.TestServer + server.SetPath(c.MkDir()) + defer server.Stop() + + session := server.Session() + defer session.Close() + c.Assert(server.Wipe, PanicMatches, "There are mgo sessions still alive.") +} + +func (s *S) TestCheckSessionsDisabled(c *C) { + var server testserver.TestServer + server.SetPath(c.MkDir()) + defer server.Stop() + + os.Setenv("CHECK_SESSIONS", "0") + + // Should not panic, although it looks to Wipe like this session will leak. + session := server.Session() + defer session.Close() + server.Wipe() +} From ebd6a8ef5787c477889d538148410f6b675a0b57 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 25 May 2015 15:22:29 -0300 Subject: [PATCH 144/305] Fix EnsureIndex test result: bucketSize int => f32 --- session_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/session_test.go b/session_test.go index 855accfff..4cb98ab28 100644 --- a/session_test.go +++ b/session_test.go @@ -2647,7 +2647,7 @@ var indexTests = []struct { "name": "loc_geoHaystack_type_1", "key": M{"loc": "geoHaystack", "type": 1}, "ns": "mydb.mycoll", - "bucketSize": 1, + "bucketSize": 1.0, }, }, { mgo.Index{ From a5d6cbfb62f0668d7615eda1d43762bce3d5637b Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 25 May 2015 15:24:33 -0300 Subject: [PATCH 145/305] Fix serious sync breakage sneaked in PR#83/b9cc139 This was never present in a stable release. --- cluster.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cluster.go b/cluster.go index 88195a2f3..ad17458de 100644 --- a/cluster.go +++ b/cluster.go @@ -213,7 +213,6 @@ func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerI // Made an incorrect assumption above, so fix stats. stats.conn(-1, false) stats.conn(+1, true) - server.info.Master = true } } else if result.Secondary { debugf("SYNC %s is a slave.", addr) @@ -221,8 +220,7 @@ func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerI logf("SYNC %s in unknown state. Pretending it's a slave due to direct connection.", addr) } else { logf("SYNC %s is neither a master nor a slave.", addr) - // Made an incorrect assumption above, so fix stats. - stats.conn(-1, false) + // Let stats track it as whatever was known before. return nil, nil, errors.New(addr + " is not a master nor slave") } From 1b35c4c04642eb82cbb2190845ae0fe79fa3493d Mon Sep 17 00:00:00 2001 From: Tej Chajed Date: Tue, 26 May 2015 15:21:14 -0400 Subject: [PATCH 146/305] Support encoding byte arrays in structs Indexes each element of the array rather than allocating a slice to copy the array into as in #74. Fixes #73. --- bson/bson_test.go | 1 + bson/encode.go | 12 +++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/bson/bson_test.go b/bson/bson_test.go index 974a4d4c3..594d1837d 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -1268,6 +1268,7 @@ var twoWayCrossItems = []crossTypeItem{ // arrays {&struct{ V [2]int }{[...]int{1, 2}}, map[string][2]int{"v": [2]int{1, 2}}}, + {&struct{ V [2]byte }{[...]byte{1, 2}}, map[string][2]byte{"v": [2]byte{1, 2}}}, // zero time {&struct{ V time.Time }{}, map[string]interface{}{"v": time.Time{}}}, diff --git a/bson/encode.go b/bson/encode.go index 8599f037d..e1015091b 100644 --- a/bson/encode.go +++ b/bson/encode.go @@ -365,7 +365,17 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { et := v.Type().Elem() if et.Kind() == reflect.Uint8 { e.addElemName('\x05', name) - e.addBinary('\x00', v.Slice(0, v.Len()).Interface().([]byte)) + if v.CanAddr() { + e.addBinary('\x00', v.Slice(0, v.Len()).Interface().([]byte)) + } else { + n := v.Len() + e.addInt32(int32(n)) + e.addBytes('\x00') + for i := 0; i < n; i++ { + el := v.Index(i) + e.addBytes(byte(el.Uint())) + } + } } else { e.addElemName('\x04', name) e.addDoc(v) From e1e5be458d0d47269bbb6748c97faefe48091824 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Fri, 29 May 2015 09:14:29 -0300 Subject: [PATCH 147/305] Minor doc improvement. --- session.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/session.go b/session.go index 46e18d280..c866bcd7e 100644 --- a/session.go +++ b/session.go @@ -228,6 +228,8 @@ func DialWithTimeout(url string, timeout time.Duration) (*Session, error) { // ParseURL parses a MongoDB URL as accepted by the Dial function and returns // a value suitable for providing into DialWithInfo. +// +// See Dial for more details on the format of url. func ParseURL(url string) (*DialInfo, error) { uinfo, err := extractURL(url) if err != nil { From 25b7543f00cbd57bb76f80243724e0b58800d24b Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Fri, 29 May 2015 09:21:18 -0300 Subject: [PATCH 148/305] Only enable write commands on wire >= 3 for now. --- session.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/session.go b/session.go index c866bcd7e..6222fd766 100644 --- a/session.go +++ b/session.go @@ -4071,7 +4071,8 @@ func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) { safeOp := s.safeOp s.m.RUnlock() - if socket.ServerInfo().MaxWireVersion >= 2 { + // TODO Enable this path for wire version 2 as well. + if socket.ServerInfo().MaxWireVersion >= 3 { // Servers with a more recent write protocol benefit from write commands. if op, ok := op.(*insertOp); ok && len(op.documents) > 1000 { var firstErr error From 90e509a57c0f54ceb8f2a763cd8bba5012bb0e0f Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Fri, 29 May 2015 09:36:52 -0300 Subject: [PATCH 149/305] Fix test expecting GLE not to be sent on 2.6. --- session.go | 8 ++++---- session_test.go | 14 ++++++++------ testserver/testserver_test.go | 2 +- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/session.go b/session.go index 6222fd766..7063b7f3e 100644 --- a/session.go +++ b/session.go @@ -2637,7 +2637,7 @@ func (q *Query) SetMaxScan(n int) *Query { // This can be used to efficiently prevent and identify unexpectedly slow queries. // // A few important notes about the mechanism enforcing this limit: -// +// // - Requests can block behind locking operations on the server, and that blocking // time is not accounted for. In other words, the timer starts ticking only after // the actual start of the query when it initially acquires the appropriate lock; @@ -2654,7 +2654,7 @@ func (q *Query) SetMaxScan(n int) *Query { // // - This limit does not override the inactive cursor timeout for idle cursors // (default is 10 min). -// +// // This mechanism was introduced in MongoDB 2.6. // // Relevant documentation: @@ -2663,7 +2663,7 @@ func (q *Query) SetMaxScan(n int) *Query { // func (q *Query) SetMaxTime(d time.Duration) *Query { q.m.Lock() - q.op.options.MaxTimeMS = int(d/time.Millisecond) + q.op.options.MaxTimeMS = int(d / time.Millisecond) q.op.hasOptions = true q.m.Unlock() return q @@ -4079,7 +4079,7 @@ func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) { // Maximum batch size is 1000. Must split out in separate operations for compatibility. all := op.documents for i := 0; i < len(all); i += 1000 { - l := i+1000 + l := i + 1000 if l > len(all) { l = len(all) } diff --git a/session_test.go b/session_test.go index 4cb98ab28..779c9e7e1 100644 --- a/session_test.go +++ b/session_test.go @@ -1113,7 +1113,7 @@ func (s *S) TestQuerySetMaxTime(c *C) { } query := coll.Find(nil) - query.SetMaxTime(1*time.Millisecond) + query.SetMaxTime(1 * time.Millisecond) query.Batch(2) var result []M err = query.All(&result) @@ -1673,7 +1673,7 @@ func (s *S) TestFindTailTimeoutWithSleep(c *C) { // 1*QUERY for nonce + 1*GET_MORE_OP on Next + 1*GET_MORE_OP on Next after sleep + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 stats := mgo.GetStats() - if s.versionAtLeast(2, 6) { + if s.versionAtLeast(3, 0) { // TODO Will be 2.6 when write commands are enabled for it. c.Assert(stats.SentOps, Equals, 4) } else { c.Assert(stats.SentOps, Equals, 5) @@ -1770,7 +1770,7 @@ func (s *S) TestFindTailTimeoutNoSleep(c *C) { // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 stats := mgo.GetStats() - if s.versionAtLeast(2, 6) { + if s.versionAtLeast(3, 0) { // TODO Will be 2.6 when write commands are enabled for it. c.Assert(stats.SentOps, Equals, 3) } else { c.Assert(stats.SentOps, Equals, 4) @@ -1866,7 +1866,7 @@ func (s *S) TestFindTailNoTimeout(c *C) { // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 stats := mgo.GetStats() - if s.versionAtLeast(2, 6) { + if s.versionAtLeast(3, 0) { // TODO Will be 2.6 when write commands are enabled for it. c.Assert(stats.SentOps, Equals, 3) } else { c.Assert(stats.SentOps, Equals, 4) @@ -2490,7 +2490,9 @@ func (s *S) TestSafeInsert(c *C) { // It must have sent two operations (INSERT_OP + getLastError QUERY_OP) stats := mgo.GetStats() - if s.versionAtLeast(2, 6) { + + // TODO Will be 2.6 when write commands are enabled for it. + if s.versionAtLeast(3, 0) { c.Assert(stats.SentOps, Equals, 1) } else { c.Assert(stats.SentOps, Equals, 2) @@ -2680,7 +2682,7 @@ var indexTests = []struct { }, }, { mgo.Index{ - Key: []string{"$text:$**"}, + Key: []string{"$text:$**"}, }, M{ "name": "$**_text", diff --git a/testserver/testserver_test.go b/testserver/testserver_test.go index 59dc5a6c6..4fb3f2837 100644 --- a/testserver/testserver_test.go +++ b/testserver/testserver_test.go @@ -77,7 +77,7 @@ func (s *S) TestStop(c *C) { server.Stop() // Server should not be running anymore. - session, err = mgo.DialWithTimeout(addr, 500 * time.Millisecond) + session, err = mgo.DialWithTimeout(addr, 500*time.Millisecond) if session != nil { session.Close() c.Fatalf("Stop did not stop the server") From e4a8adfd80b51f33b068f67872a00dddabe3236d Mon Sep 17 00:00:00 2001 From: Menno Smits Date: Mon, 11 May 2015 22:05:52 +1200 Subject: [PATCH 150/305] Avoid MongoDB pipeline size limit in PurgeMissing Under MongoDB 2.4 and earlier, aggregation pipeline results may be no bigger than 16MB, even when an iterator is used. It was possible for PurgeMissing to hit this limit when dealing with large txn-queue arrays, causing it to fail. This change refactors PurgeMissing so that it no longer uses an aggregation pipeline, working around the limitation. A regression test is included which triggered the failure mode for the previous version of PurgeMissing. The aggregation pipeline result size limitation was removed in MongoDB 2.6. See: http://docs.mongodb.org/manual/core/aggregation-pipeline-limits/ --- txn/txn.go | 80 ++++++++++++++++++++++++------------------------ txn/txn_test.go | 81 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 120 insertions(+), 41 deletions(-) diff --git a/txn/txn.go b/txn/txn.go index 143e82a6f..25013ec17 100644 --- a/txn/txn.go +++ b/txn/txn.go @@ -382,16 +382,10 @@ func (r *Runner) ChangeLog(logc *mgo.Collection) { func (r *Runner) PurgeMissing(collections ...string) error { type M map[string]interface{} type S []interface{} - pipeline := []M{ - {"$project": M{"_id": 1, "txn-queue": 1}}, - {"$unwind": "$txn-queue"}, - {"$sort": M{"_id": 1, "txn-queue": 1}}, - //{"$group": M{"_id": M{"$substr": S{"$txn-queue", 0, 24}}, "docids": M{"$push": "$_id"}}}, - } - type TRef struct { - DocId interface{} "_id" - TxnId string "txn-queue" + type TDoc struct { + Id interface{} "_id" + TxnQueue []string "txn-queue" } found := make(map[bson.ObjectId]bool) @@ -399,10 +393,40 @@ func (r *Runner) PurgeMissing(collections ...string) error { sort.Strings(collections) for _, collection := range collections { c := r.tc.Database.C(collection) - iter := c.Pipe(pipeline).Iter() - var tref TRef - for iter.Next(&tref) { - txnId := bson.ObjectIdHex(tref.TxnId[:24]) + iter := c.Find(nil).Select(bson.M{"_id": 1, "txn-queue": 1}).Iter() + var tdoc TDoc + for iter.Next(&tdoc) { + for _, txnToken := range tdoc.TxnQueue { + txnId := bson.ObjectIdHex(txnToken[:24]) + if found[txnId] { + continue + } + if r.tc.FindId(txnId).One(nil) == nil { + found[txnId] = true + continue + } + logf("WARNING: purging from document %s/%v the missing transaction id %s", collection, tdoc.Id, txnId) + err := c.UpdateId(tdoc.Id, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}}) + if err != nil { + return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err) + } + } + } + if err := iter.Close(); err != nil { + return fmt.Errorf("transaction queue iteration error for %s: %v", collection, err) + } + } + + type StashTDoc struct { + Id docKey "_id" + TxnQueue []string "txn-queue" + } + + iter := r.sc.Find(nil).Select(bson.M{"_id": 1, "txn-queue": 1}).Iter() + var stdoc StashTDoc + for iter.Next(&stdoc) { + for _, txnToken := range stdoc.TxnQueue { + txnId := bson.ObjectIdHex(txnToken[:24]) if found[txnId] { continue } @@ -410,38 +434,12 @@ func (r *Runner) PurgeMissing(collections ...string) error { found[txnId] = true continue } - logf("WARNING: purging from document %s/%v the missing transaction id %s", collection, tref.DocId, txnId) - err := c.UpdateId(tref.DocId, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}}) + logf("WARNING: purging from stash document %s/%v the missing transaction id %s", stdoc.Id.C, stdoc.Id.Id, txnId) + err := r.sc.UpdateId(stdoc.Id, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}}) if err != nil { return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err) } } - if err := iter.Close(); err != nil { - return fmt.Errorf("transaction queue iteration error for collection %s: %v", collection, err) - } - } - - type StashTRef struct { - Id docKey "_id" - TxnId string "txn-queue" - } - - iter := r.sc.Pipe(pipeline).Iter() - var stref StashTRef - for iter.Next(&stref) { - txnId := bson.ObjectIdHex(stref.TxnId[:24]) - if found[txnId] { - continue - } - if r.tc.FindId(txnId).One(nil) == nil { - found[txnId] = true - continue - } - logf("WARNING: purging from stash document %s/%v the missing transaction id %s", stref.Id.C, stref.Id.Id, txnId) - err := r.sc.UpdateId(stref.Id, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}}) - if err != nil { - return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err) - } } if err := iter.Close(); err != nil { return fmt.Errorf("transaction stash iteration error: %v", err) diff --git a/txn/txn_test.go b/txn/txn_test.go index 5548dcbb0..f819f9e59 100644 --- a/txn/txn_test.go +++ b/txn/txn_test.go @@ -608,6 +608,87 @@ func (s *S) TestTxnQueueStashStressTest(c *C) { } } +func (s *S) TestPurgeMissingPipelineSizeLimit(c *C) { + // This test ensures that PurgeMissing can handle very large + // txn-queue fields. Previous iterations of PurgeMissing would + // trigger a 16MB aggregation pipeline result size limit when run + // against a documents or stashes with large numbers of txn-queue + // entries. PurgeMissing now no longer uses aggregation pipelines + // to work around this limit. + + // The pipeline result size limitation was removed from MongoDB in 2.6 so + // this test is only run for older MongoDB version. + build, err := s.session.BuildInfo() + c.Assert(err, IsNil) + if build.VersionAtLeast(2, 6) { + c.Skip("This tests a problem that can only happen with MongoDB < 2.6 ") + } + + // Insert a single document to work with. + err = s.accounts.Insert(M{"_id": 0, "balance": 100}) + c.Assert(err, IsNil) + + ops := []txn.Op{{ + C: "accounts", + Id: 0, + Update: M{"$inc": M{"balance": 100}}, + }} + + // Generate one successful transaction. + good := bson.NewObjectId() + c.Logf("---- Running ops under transaction %q", good.Hex()) + err = s.runner.Run(ops, good, nil) + c.Assert(err, IsNil) + + // Generate another transaction which which will go missing. + missing := bson.NewObjectId() + c.Logf("---- Running ops under transaction %q (which will go missing)", missing.Hex()) + err = s.runner.Run(ops, missing, nil) + c.Assert(err, IsNil) + + err = s.tc.RemoveId(missing) + c.Assert(err, IsNil) + + // Generate a txn-queue on the test document that's large enough + // that it used to cause PurgeMissing to exceed MongoDB's pipeline + // result 16MB size limit (MongoDB 2.4 and older only). + // + // The contents of the txn-queue field doesn't matter, only that + // it's big enough to trigger the size limit. The required size + // can also be achieved by using multiple documents as long as the + // cumulative size of all the txn-queue fields exceeds the + // pipeline limit. A single document is easier to work with for + // this test however. + // + // The txn id of the successful transaction is used fill the + // txn-queue because this takes advantage of a short circuit in + // PurgeMissing, dramatically speeding up the test run time. + const fakeQueueLen = 250000 + fakeTxnQueue := make([]string, fakeQueueLen) + token := good.Hex() + "_12345678" // txn id + nonce + for i := 0; i < fakeQueueLen; i++ { + fakeTxnQueue[i] = token + } + + err = s.accounts.UpdateId(0, bson.M{ + "$set": bson.M{"txn-queue": fakeTxnQueue}, + }) + c.Assert(err, IsNil) + + // PurgeMissing could hit the same pipeline result size limit when + // processing the txn-queue fields of stash documents so insert + // the large txn-queue there too to ensure that no longer happens. + err = s.sc.Insert( + bson.D{{"c", "accounts"}, {"id", 0}}, + bson.M{"txn-queue": fakeTxnQueue}, + ) + c.Assert(err, IsNil) + + c.Logf("---- Purging missing transactions") + err = s.runner.PurgeMissing("accounts") + c.Assert(err, IsNil) +} + func (s *S) TestTxnQueueStressTest(c *C) { txn.SetChaos(txn.Chaos{ SlowdownChance: 0.3, From 8466119628bf77a0b41257e945a4b61832307c39 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 3 Jun 2015 15:39:22 -0400 Subject: [PATCH 151/305] Fix LastError.UpdatedExisting on 3.0+. Fixes #113. --- session.go | 8 +++++--- session_test.go | 18 +++++++++++++++++- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/session.go b/session.go index 7063b7f3e..1273cc4bf 100644 --- a/session.go +++ b/session.go @@ -3829,7 +3829,7 @@ func (q *Query) Apply(change Change, result interface{}) (info *ChangeInfo, err if doc.LastError.N == 0 { return nil, ErrNotFound } - if doc.Value.Kind != 0x0A { + if doc.Value.Kind != 0x0A && result != nil { err = doc.Value.Unmarshal(result) if err != nil { return nil, err @@ -4186,8 +4186,10 @@ func (c *Collection) writeCommand(socket *mongoSocket, safeOp *queryOp, op inter var result writeCmdResult err = c.Database.run(socket, cmd, &result) debugf("Write command result: %#v (err=%v)", result, err) - // TODO Should lerr.N be result.NModified on updates? - lerr = &LastError{UpdatedExisting: result.NModified != 0, N: result.N} + lerr = &LastError{ + UpdatedExisting: result.N > 0 && len(result.Upserted) == 0, + N: result.N, + } if len(result.Upserted) > 0 { lerr.UpsertedId = result.Upserted[0].Id } diff --git a/session_test.go b/session_test.go index 779c9e7e1..64e893642 100644 --- a/session_test.go +++ b/session_test.go @@ -354,6 +354,10 @@ func (s *S) TestUpdate(c *C) { c.Assert(err, IsNil) } + // No changes is a no-op and shouldn't return an error. + err = coll.Update(M{"k": 42}, M{"$set": M{"n": 42}}) + c.Assert(err, IsNil) + err = coll.Update(M{"k": 42}, M{"$inc": M{"n": 1}}) c.Assert(err, IsNil) @@ -530,7 +534,12 @@ func (s *S) TestUpdateAll(c *C) { c.Assert(err, IsNil) } - info, err := coll.UpdateAll(M{"k": M{"$gt": 42}}, M{"$inc": M{"n": 1}}) + // Don't actually modify the documents. Should still report 4 matching updates. + info, err := coll.UpdateAll(M{"k": M{"$gt": 42}}, M{"$unset": M{"missing": 1}}) + c.Assert(err, IsNil) + c.Assert(info.Updated, Equals, 4) + + info, err = coll.UpdateAll(M{"k": M{"$gt": 42}}, M{"$inc": M{"n": 1}}) c.Assert(err, IsNil) c.Assert(info.Updated, Equals, 4) @@ -899,6 +908,13 @@ func (s *S) TestFindAndModify(c *C) { c.Assert(info.Removed, Equals, 0) c.Assert(info.UpsertedId, IsNil) + // A nil result parameter should be acceptable. + info, err = coll.Find(M{"n": 43}).Apply(mgo.Change{Update: M{"$unset": M{"missing": 1}}}, nil) + c.Assert(err, IsNil) + c.Assert(info.Updated, Equals, 1) + c.Assert(info.Removed, Equals, 0) + c.Assert(info.UpsertedId, IsNil) + result = M{} info, err = coll.Find(M{"n": 43}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}, ReturnNew: true}, result) c.Assert(err, IsNil) From 5ef240212ee8031d74877463a60e7afa9b9fd87f Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 4 Jun 2015 11:22:54 -0400 Subject: [PATCH 152/305] Rename testserver as dbtest in compatible way. --- dbtest/dbserver.go | 196 +++++++++++++++++++++++++++++++++++++++ dbtest/dbserver_test.go | 108 +++++++++++++++++++++ dbtest/export_test.go | 12 +++ testserver/testserver.go | 42 ++------- 4 files changed, 323 insertions(+), 35 deletions(-) create mode 100644 dbtest/dbserver.go create mode 100644 dbtest/dbserver_test.go create mode 100644 dbtest/export_test.go diff --git a/dbtest/dbserver.go b/dbtest/dbserver.go new file mode 100644 index 000000000..85113969d --- /dev/null +++ b/dbtest/dbserver.go @@ -0,0 +1,196 @@ +package dbtest + +import ( + "bytes" + "fmt" + "net" + "os" + "os/exec" + "strconv" + "time" + + "gopkg.in/mgo.v2-unstable" + "gopkg.in/tomb.v2" +) + +// DBServer controls a MongoDB server process to be used within test suites. +// +// The test server is started when Session is called the first time and should +// remain running for the duration of all tests, with the Wipe method being +// called between tests (before each of them) to clear stored data. After all tests +// are done, the Stop method should be called to stop the test server. +// +// Before the DBServer is used the SetPath method must be called to define +// the location for the database files to be stored. +type DBServer struct { + session *mgo.Session + output bytes.Buffer + server *exec.Cmd + dbpath string + host string + tomb tomb.Tomb +} + +// SetPath defines the path to the directory where the database files will be +// stored if it is started. The directory path itself is not created or removed +// by the test helper. +func (dbs *DBServer) SetPath(dbpath string) { + dbs.dbpath = dbpath +} + +func (dbs *DBServer) start() { + if dbs.server != nil { + panic("DBServer already started") + } + if dbs.dbpath == "" { + panic("DBServer.SetPath must be called before using the server") + } + mgo.SetStats(true) + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + panic("unable to listen on a local address: " + err.Error()) + } + addr := l.Addr().(*net.TCPAddr) + l.Close() + dbs.host = addr.String() + + args := []string{ + "--dbpath", dbs.dbpath, + "--bind_ip", "127.0.0.1", + "--port", strconv.Itoa(addr.Port), + "--nssize", "1", + "--noprealloc", + "--smallfiles", + "--nojournal", + } + dbs.tomb = tomb.Tomb{} + dbs.server = exec.Command("mongod", args...) + dbs.server.Stdout = &dbs.output + dbs.server.Stderr = &dbs.output + err = dbs.server.Start() + if err != nil { + panic(err) + } + dbs.tomb.Go(dbs.monitor) + dbs.Wipe() +} + +func (dbs *DBServer) monitor() error { + dbs.server.Process.Wait() + if dbs.tomb.Alive() { + // Present some debugging information. + fmt.Fprintf(os.Stderr, "---- mongod process died unexpectedly:\n") + fmt.Fprintf(os.Stderr, "%s", dbs.output.Bytes()) + fmt.Fprintf(os.Stderr, "---- mongod processes running right now:\n") + cmd := exec.Command("/bin/sh", "-c", "ps auxw | grep mongod") + cmd.Stdout = os.Stderr + cmd.Stderr = os.Stderr + cmd.Run() + fmt.Fprintf(os.Stderr, "----------------------------------------\n") + + panic("mongod process died unexpectedly") + } + return nil +} + +// Stop stops the test server process, if it is running. +// +// It's okay to call Stop multiple times. After the test server is +// stopped it cannot be restarted. +// +// All database sessions must be closed before or while the Stop method +// is running. Otherwise Stop will panic after a timeout informing that +// there is a session leak. +func (dbs *DBServer) Stop() { + if dbs.session != nil { + dbs.checkSessions() + if dbs.session != nil { + dbs.session.Close() + dbs.session = nil + } + } + if dbs.server != nil { + dbs.tomb.Kill(nil) + dbs.server.Process.Kill() + select { + case <-dbs.tomb.Dead(): + case <-time.After(5 * time.Second): + panic("timeout waiting for mongod process to die") + } + dbs.server = nil + } +} + +// Session returns a new session to the server. The returned session +// must be closed after the test is done with it. +// +// The first Session obtained from a DBServer will start it. +func (dbs *DBServer) Session() *mgo.Session { + if dbs.server == nil { + dbs.start() + } + if dbs.session == nil { + mgo.ResetStats() + var err error + dbs.session, err = mgo.Dial(dbs.host + "/test") + if err != nil { + panic(err) + } + } + return dbs.session.Copy() +} + +// checkSessions ensures all mgo sessions opened were properly closed. +// For slightly faster tests, it may be disabled setting the +// environmnet variable CHECK_SESSIONS to 0. +func (dbs *DBServer) checkSessions() { + if check := os.Getenv("CHECK_SESSIONS"); check == "0" || dbs.server == nil || dbs.session == nil { + return + } + dbs.session.Close() + dbs.session = nil + for i := 0; i < 100; i++ { + stats := mgo.GetStats() + if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 { + return + } + time.Sleep(100 * time.Millisecond) + } + panic("There are mgo sessions still alive.") +} + +// Wipe drops all created databases and their data. +// +// The MongoDB server remains running if it was prevoiusly running, +// or stopped if it was previously stopped. +// +// All database sessions must be closed before or while the Wipe method +// is running. Otherwise Wipe will panic after a timeout informing that +// there is a session leak. +func (dbs *DBServer) Wipe() { + if dbs.server == nil || dbs.session == nil { + return + } + dbs.checkSessions() + sessionUnset := dbs.session == nil + session := dbs.Session() + defer session.Close() + if sessionUnset { + dbs.session.Close() + dbs.session = nil + } + names, err := session.DatabaseNames() + if err != nil { + panic(err) + } + for _, name := range names { + switch name { + case "admin", "local", "config": + default: + err = session.DB(name).DropDatabase() + if err != nil { + panic(err) + } + } + } +} diff --git a/dbtest/dbserver_test.go b/dbtest/dbserver_test.go new file mode 100644 index 000000000..cce76cdd4 --- /dev/null +++ b/dbtest/dbserver_test.go @@ -0,0 +1,108 @@ +package dbtest_test + +import ( + "os" + "testing" + "time" + + . "gopkg.in/check.v1" + + "gopkg.in/mgo.v2-unstable" + "gopkg.in/mgo.v2-unstable/dbtest" +) + +type M map[string]interface{} + +func TestAll(t *testing.T) { + TestingT(t) +} + +type S struct { + oldCheckSessions string +} + +var _ = Suite(&S{}) + +func (s *S) SetUpTest(c *C) { + s.oldCheckSessions = os.Getenv("CHECK_SESSIONS") + os.Setenv("CHECK_SESSIONS", "") +} + +func (s *S) TearDownTest(c *C) { + os.Setenv("CHECK_SESSIONS", s.oldCheckSessions) +} + +func (s *S) TestWipeData(c *C) { + var server dbtest.DBServer + server.SetPath(c.MkDir()) + defer server.Stop() + + session := server.Session() + err := session.DB("mydb").C("mycoll").Insert(M{"a": 1}) + session.Close() + c.Assert(err, IsNil) + + server.Wipe() + + session = server.Session() + names, err := session.DatabaseNames() + session.Close() + c.Assert(err, IsNil) + for _, name := range names { + if name != "local" && name != "admin" { + c.Fatalf("Wipe should have removed this database: %s", name) + } + } +} + +func (s *S) TestStop(c *C) { + var server dbtest.DBServer + server.SetPath(c.MkDir()) + defer server.Stop() + + // Server should not be running. + process := server.ProcessTest() + c.Assert(process, IsNil) + + session := server.Session() + addr := session.LiveServers()[0] + session.Close() + + // Server should be running now. + process = server.ProcessTest() + p, err := os.FindProcess(process.Pid) + c.Assert(err, IsNil) + p.Release() + + server.Stop() + + // Server should not be running anymore. + session, err = mgo.DialWithTimeout(addr, 500*time.Millisecond) + if session != nil { + session.Close() + c.Fatalf("Stop did not stop the server") + } +} + +func (s *S) TestCheckSessions(c *C) { + var server dbtest.DBServer + server.SetPath(c.MkDir()) + defer server.Stop() + + session := server.Session() + defer session.Close() + c.Assert(server.Wipe, PanicMatches, "There are mgo sessions still alive.") +} + +func (s *S) TestCheckSessionsDisabled(c *C) { + var server dbtest.DBServer + server.SetPath(c.MkDir()) + defer server.Stop() + + os.Setenv("CHECK_SESSIONS", "0") + + // Should not panic, although it looks to Wipe like this session will leak. + session := server.Session() + defer session.Close() + server.Wipe() +} diff --git a/dbtest/export_test.go b/dbtest/export_test.go new file mode 100644 index 000000000..65f1cb023 --- /dev/null +++ b/dbtest/export_test.go @@ -0,0 +1,12 @@ +package dbtest + +import ( + "os" +) + +func (dbs *DBServer) ProcessTest() *os.Process { + if dbs.server == nil { + return nil + } + return dbs.server.Process +} diff --git a/testserver/testserver.go b/testserver/testserver.go index b369d88bc..7c7b713e4 100644 --- a/testserver/testserver.go +++ b/testserver/testserver.go @@ -1,3 +1,4 @@ +// WARNING: This package was replaced by mgo.v2/dbtest. package testserver import ( @@ -13,15 +14,7 @@ import ( "gopkg.in/tomb.v2" ) -// TestServer controls a MongoDB server process to be used within test suites. -// -// The test server is started when Session is called the first time and should -// remain running for the duration of all tests, with the Wipe method being -// called between tests (before each of them) to clear stored data. After all tests -// are done, the Stop method should be called to stop the test server. -// -// Before the TestServer is used the SetPath method must be called to define -// the location for the database files to be stored. +// WARNING: This package was replaced by mgo.v2/dbtest. type TestServer struct { session *mgo.Session output bytes.Buffer @@ -31,9 +24,7 @@ type TestServer struct { tomb tomb.Tomb } -// SetPath defines the path to the directory where the database files will be -// stored if it is started. The directory path itself is not created or removed -// by the test helper. +// WARNING: This package was replaced by mgo.v2/dbtest. func (ts *TestServer) SetPath(dbpath string) { ts.dbpath = dbpath } @@ -93,14 +84,7 @@ func (ts *TestServer) monitor() error { return nil } -// Stop stops the test server process, if it is running. -// -// It's okay to call Stop multiple times. After the test server is -// stopped it cannot be restarted. -// -// All database sessions must be closed before or while the Stop method -// is running. Otherwise Stop will panic after a timeout informing that -// there is a session leak. +// WARNING: This package was replaced by mgo.v2/dbtest. func (ts *TestServer) Stop() { if ts.session != nil { ts.checkSessions() @@ -121,10 +105,7 @@ func (ts *TestServer) Stop() { } } -// Session returns a new session to the server. The returned session -// must be closed after the test is done with it. -// -// The first Session obtained from a TestServer will start it. +// WARNING: This package was replaced by mgo.v2/dbtest. func (ts *TestServer) Session() *mgo.Session { if ts.server == nil { ts.start() @@ -140,9 +121,7 @@ func (ts *TestServer) Session() *mgo.Session { return ts.session.Copy() } -// checkSessions ensures all mgo sessions opened were properly closed. -// For slightly faster tests, it may be disabled setting the -// environmnet variable CHECK_SESSIONS to 0. +// WARNING: This package was replaced by mgo.v2/dbtest. func (ts *TestServer) checkSessions() { if check := os.Getenv("CHECK_SESSIONS"); check == "0" || ts.server == nil || ts.session == nil { return @@ -159,14 +138,7 @@ func (ts *TestServer) checkSessions() { panic("There are mgo sessions still alive.") } -// Wipe drops all created databases and their data. -// -// The MongoDB server remains running if it was prevoiusly running, -// or stopped if it was previously stopped. -// -// All database sessions must be closed before or while the Wipe method -// is running. Otherwise Wipe will panic after a timeout informing that -// there is a session leak. +// WARNING: This package was replaced by mgo.v2/dbtest. func (ts *TestServer) Wipe() { if ts.server == nil || ts.session == nil { return From 3569c88678d88179dcbd68d02ab081cbca3cd4d0 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 4 Jun 2015 11:26:27 -0400 Subject: [PATCH 153/305] dbtest: mgo.v2-unstable => mgo.v2 --- dbtest/dbserver.go | 2 +- dbtest/dbserver_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/dbtest/dbserver.go b/dbtest/dbserver.go index 85113969d..dc9fe9e1d 100644 --- a/dbtest/dbserver.go +++ b/dbtest/dbserver.go @@ -9,7 +9,7 @@ import ( "strconv" "time" - "gopkg.in/mgo.v2-unstable" + "gopkg.in/mgo.v2" "gopkg.in/tomb.v2" ) diff --git a/dbtest/dbserver_test.go b/dbtest/dbserver_test.go index cce76cdd4..79812fde3 100644 --- a/dbtest/dbserver_test.go +++ b/dbtest/dbserver_test.go @@ -7,8 +7,8 @@ import ( . "gopkg.in/check.v1" - "gopkg.in/mgo.v2-unstable" - "gopkg.in/mgo.v2-unstable/dbtest" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/dbtest" ) type M map[string]interface{} From 19dcc4f7eaa7886d30ead5840e968ea9bdb0c5d3 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Fri, 12 Jun 2015 08:54:04 -0300 Subject: [PATCH 154/305] Report document corruption on negative lengths. The panic in those situations was a slice out of bounds error, which is safe but not ideal. Fixes #116. --- bson/bson_test.go | 3 +++ bson/decode.go | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/bson/bson_test.go b/bson/bson_test.go index 594d1837d..5024b4432 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -704,6 +704,9 @@ var corruptedData = []string{ // String with corrupted end. wrapInDoc("\x02\x00\x03\x00\x00\x00yo\xFF"), + + // Random data causing slice out of bounds per issue #116. + "!\x00\x00\x00\f000000000000000\x00000\xdf0000000\x00", } func (s *S) TestUnmarshalMapDocumentTooShort(c *C) { diff --git a/bson/decode.go b/bson/decode.go index bdd2e0287..f63ced8f4 100644 --- a/bson/decode.go +++ b/bson/decode.go @@ -816,9 +816,12 @@ func (d *decoder) readByte() byte { } func (d *decoder) readBytes(length int32) []byte { + if length < 0 { + corrupted() + } start := d.i d.i += int(length) - if d.i > len(d.in) { + if d.i < start || d.i > len(d.in) { corrupted() } return d.in[start : start+int(length)] From 33508a2e10cdc4c90b1fbad5b83e475af29b101b Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Fri, 12 Jun 2015 09:32:10 -0300 Subject: [PATCH 155/305] Do not marshal broken empty Raw documents. Fixes #117. --- bson/bson_test.go | 8 ++++++-- bson/encode.go | 8 +++++++- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/bson/bson_test.go b/bson/bson_test.go index 5024b4432..61140c99e 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -582,8 +582,12 @@ var marshalErrorItems = []testItemType{ "Can't marshal complex128 in a BSON document"}, {&structWithDupKeys{}, "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, - {bson.Raw{0x0A, []byte{}}, - "Attempted to unmarshal Raw kind 10 as a document"}, + {bson.Raw{0xA, []byte{}}, + "Attempted to marshal Raw kind 10 as a document"}, + {bson.Raw{0x3, []byte{}}, + "Attempted to marshal empty Raw document"}, + {bson.M{"w": bson.Raw{0x3, []byte{}}}, + "Attempted to marshal empty Raw document"}, {&inlineCantPtr{&struct{ A, B int }{1, 2}}, "Option ,inline needs a struct value or map field"}, {&inlineDupName{1, struct{ A, B int }{2, 3}}, diff --git a/bson/encode.go b/bson/encode.go index e1015091b..36eb29ce6 100644 --- a/bson/encode.go +++ b/bson/encode.go @@ -101,7 +101,10 @@ func (e *encoder) addDoc(v reflect.Value) { if v.Type() == typeRaw { raw := v.Interface().(Raw) if raw.Kind != 0x03 && raw.Kind != 0x00 { - panic("Attempted to unmarshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document") + panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document") + } + if len(raw.Data) == 0 { + panic("Attempted to marshal empty Raw document") } e.addBytes(raw.Data...) return @@ -389,6 +392,9 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { if kind == 0x00 { kind = 0x03 } + if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F { + panic("Attempted to marshal empty Raw document") + } e.addElemName(kind, name) e.addBytes(s.Data...) From 7d0c3292d228c50afd01306aa6f20e37539b09d5 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Fri, 12 Jun 2015 09:37:50 -0300 Subject: [PATCH 156/305] Test proper omitempty handling for Raw. --- bson/bson_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/bson/bson_test.go b/bson/bson_test.go index 61140c99e..653288895 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -986,6 +986,9 @@ type condTime struct { type condStruct struct { V struct{ A []int } ",omitempty" } +type condRaw struct { + V bson.Raw ",omitempty" +} type shortInt struct { V int64 ",minsize" @@ -1239,6 +1242,9 @@ var twoWayCrossItems = []crossTypeItem{ {&condStruct{struct{ A []int }{[]int{1}}}, bson.M{"v": bson.M{"a": []interface{}{1}}}}, {&condStruct{struct{ A []int }{}}, bson.M{}}, + {&condRaw{bson.Raw{Kind: 0x0A, Data: []byte{}}}, bson.M{"v": nil}}, + {&condRaw{bson.Raw{Kind: 0x00}}, bson.M{}}, + {&namedCondStr{"yo"}, map[string]string{"myv": "yo"}}, {&namedCondStr{}, map[string]string{}}, From 89d0282b53a3021ebbec7895a513307db202e7dd Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sat, 13 Jun 2015 11:38:20 -0300 Subject: [PATCH 157/305] Ignore BSON array if being unmarshaled into RawD. Fixes #120. --- bson/bson_test.go | 3 +++ bson/decode.go | 6 +++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/bson/bson_test.go b/bson/bson_test.go index 653288895..25dbe410e 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -1337,6 +1337,9 @@ var oneWayCrossItems = []crossTypeItem{ {&struct { V struct{ v time.Time } ",omitempty" }{}, map[string]interface{}{}}, + + // Attempt to marshal slice into RawD (issue #120). + {bson.M{"x": []int{1, 2, 3}}, &struct{ X bson.RawD }{}}, } func testCrossPair(c *C, dump interface{}, load interface{}) { diff --git a/bson/decode.go b/bson/decode.go index f63ced8f4..7befd777b 100644 --- a/bson/decode.go +++ b/bson/decode.go @@ -325,6 +325,10 @@ func (d *decoder) readArrayDocTo(out reflect.Value) { func (d *decoder) readSliceDoc(t reflect.Type) interface{} { tmp := make([]reflect.Value, 0, 8) elemType := t.Elem() + if elemType == typeRawDocElem { + d.dropElem(0x04) + return reflect.Zero(t).Interface() + } end := int(d.readInt32()) end += d.i - 4 @@ -437,7 +441,7 @@ func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { start := d.i - if kind == '\x03' { + if kind == 0x03 { // Delegate unmarshaling of documents. outt := out.Type() outk := out.Kind() From 9e7209e40e2f40d55688f225f9204a6e8eb93192 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sat, 13 Jun 2015 12:24:52 -0300 Subject: [PATCH 158/305] Cleaner test cases for negative string lengths. --- bson/bson_test.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/bson/bson_test.go b/bson/bson_test.go index 25dbe410e..3055da6bb 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -709,8 +709,15 @@ var corruptedData = []string{ // String with corrupted end. wrapInDoc("\x02\x00\x03\x00\x00\x00yo\xFF"), - // Random data causing slice out of bounds per issue #116. - "!\x00\x00\x00\f000000000000000\x00000\xdf0000000\x00", + // String with negative length (issue #116). + "\x0c\x00\x00\x00\x02x\x00\xff\xff\xff\xff\x00", + + // String with zero length (must include trailing '\x00') + "\x0c\x00\x00\x00\x02x\x00\x00\x00\x00\x00\x00", + + // Binary with negative length. + "\r\x00\x00\x00\x05x\x00\xff\xff\xff\xff\x00\x00", + } func (s *S) TestUnmarshalMapDocumentTooShort(c *C) { From 85f82c52963e2601eb5d1a87e8c98cc7f4f1e27b Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 15 Jul 2015 10:38:43 -0300 Subject: [PATCH 159/305] Import BSON tests from Jeff's spec. --- bson/bson_test.go | 66 ++++++++++- bson/decode.go | 8 +- bson/specdata/update.sh | 27 +++++ bson/specdata_test.go | 241 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 338 insertions(+), 4 deletions(-) create mode 100755 bson/specdata/update.sh create mode 100644 bson/specdata_test.go diff --git a/bson/bson_test.go b/bson/bson_test.go index 3055da6bb..c95cb7d7b 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -29,15 +29,18 @@ package bson_test import ( "encoding/binary" + "encoding/hex" "encoding/json" "errors" "net/url" "reflect" + "strings" "testing" "time" . "gopkg.in/check.v1" "gopkg.in/mgo.v2-unstable/bson" + "gopkg.in/yaml.v2" ) func TestAll(t *testing.T) { @@ -639,6 +642,10 @@ var unmarshalErrorItems = []unmarshalErrorType{ {123, "\x10name\x00\x08\x00\x00\x00", "Unmarshal needs a map or a pointer to a struct."}, + + {nil, + "\x08\x62\x00\x02", + "encoded boolean must be 1 or 0, found 2"}, } func (s *S) TestUnmarshalErrorItems(c *C) { @@ -691,7 +698,7 @@ func (s *S) TestUnmarshalRawErrorItems(c *C) { } var corruptedData = []string{ - "\x04\x00\x00\x00\x00", // Shorter than minimum + "\x04\x00\x00\x00\x00", // Document shorter than minimum "\x06\x00\x00\x00\x00", // Not enough data "\x05\x00\x00", // Broken length "\x05\x00\x00\x00\xff", // Corrupted termination @@ -717,7 +724,6 @@ var corruptedData = []string{ // Binary with negative length. "\r\x00\x00\x00\x05x\x00\xff\xff\xff\xff\x00\x00", - } func (s *S) TestUnmarshalMapDocumentTooShort(c *C) { @@ -1558,6 +1564,62 @@ func (s *S) TestObjectIdJSONMarshaling(c *C) { } } +type specTest struct { + Description string + Documents []struct { + Decoded map[string]interface{} + Encoded string + DecodeOnly bool `yaml:"decodeOnly"` + Error interface{} + } +} + +func (s *S) TestSpecTests(c *C) { + for _, data := range specTests { + var test specTest + err := yaml.Unmarshal([]byte(data), &test) + c.Assert(err, IsNil) + + c.Logf("Running spec test set %q", test.Description) + + for _, doc := range test.Documents { + if doc.Error != nil { + continue + } + c.Logf("Ensuring %q decodes as %v", doc.Encoded, doc.Decoded) + var decoded map[string]interface{} + encoded, err := hex.DecodeString(doc.Encoded) + c.Assert(err, IsNil) + err = bson.Unmarshal(encoded, &decoded) + c.Assert(err, IsNil) + c.Assert(decoded, DeepEquals, doc.Decoded) + } + + for _, doc := range test.Documents { + if doc.DecodeOnly || doc.Error != nil { + continue + } + c.Logf("Ensuring %v encodes as %q", doc.Decoded, doc.Encoded) + encoded, err := bson.Marshal(doc.Decoded) + c.Assert(err, IsNil) + c.Assert(strings.ToUpper(hex.EncodeToString(encoded)), Equals, doc.Encoded) + } + + for _, doc := range test.Documents { + if doc.Error == nil { + continue + } + c.Logf("Ensuring %q errors when decoded: %s", doc.Encoded, doc.Error) + var decoded map[string]interface{} + encoded, err := hex.DecodeString(doc.Encoded) + c.Assert(err, IsNil) + err = bson.Unmarshal(encoded, &decoded) + c.Assert(err, NotNil) + c.Logf("Failed with: %v", err) + } + } +} + // -------------------------------------------------------------------------- // Some simple benchmarks. diff --git a/bson/decode.go b/bson/decode.go index 7befd777b..ba7a3dfa2 100644 --- a/bson/decode.go +++ b/bson/decode.go @@ -780,10 +780,14 @@ func (d *decoder) readCStr() string { } func (d *decoder) readBool() bool { - if d.readByte() == 1 { + b := d.readByte() + if b == 0 { + return false + } + if b == 1 { return true } - return false + panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b)) } func (d *decoder) readFloat64() float64 { diff --git a/bson/specdata/update.sh b/bson/specdata/update.sh new file mode 100755 index 000000000..1efd3d3b6 --- /dev/null +++ b/bson/specdata/update.sh @@ -0,0 +1,27 @@ +#!/bin/sh + +set -e + +if [ ! -d specifications ]; then + git clone -b bson git@github.com:jyemin/specifications +fi + +TESTFILE="../specdata_test.go" + +cat < $TESTFILE +package bson_test + +var specTests = []string{ +END + +for file in specifications/source/bson/tests/*.yml; do + ( + echo '`' + cat $file + echo -n '`,' + ) >> $TESTFILE +done + +echo '}' >> $TESTFILE + +gofmt -w $TESTFILE diff --git a/bson/specdata_test.go b/bson/specdata_test.go new file mode 100644 index 000000000..513f9b209 --- /dev/null +++ b/bson/specdata_test.go @@ -0,0 +1,241 @@ +package bson_test + +var specTests = []string{ + ` +--- +description: "Array type" +documents: + - + decoded: + a : [] + encoded: 0D000000046100050000000000 + - + decoded: + a: [10] + encoded: 140000000461000C0000001030000A0000000000 + - + # Decode an array that uses an empty string as the key + decodeOnly : true + decoded: + a: [10] + encoded: 130000000461000B00000010000A0000000000 + - + # Decode an array that uses a non-numeric string as the key + decodeOnly : true + decoded: + a: [10] + encoded: 150000000461000D000000106162000A0000000000 + + +`, ` +--- +description: "Boolean type" +documents: + - + encoded: "090000000862000100" + decoded: { "b" : true } + - + encoded: "090000000862000000" + decoded: { "b" : false } + + + `, ` +--- +description: "Corrupted BSON" +documents: + - + encoded: "09000000016600" + error: "truncated double" + - + encoded: "09000000026600" + error: "truncated string" + - + encoded: "09000000036600" + error: "truncated document" + - + encoded: "09000000046600" + error: "truncated array" + - + encoded: "09000000056600" + error: "truncated binary" + - + encoded: "09000000076600" + error: "truncated objectid" + - + encoded: "09000000086600" + error: "truncated boolean" + - + encoded: "09000000096600" + error: "truncated date" + - + encoded: "090000000b6600" + error: "truncated regex" + - + encoded: "090000000c6600" + error: "truncated db pointer" + - + encoded: "0C0000000d6600" + error: "truncated javascript" + - + encoded: "0C0000000e6600" + error: "truncated symbol" + - + encoded: "0C0000000f6600" + error: "truncated javascript with scope" + - + encoded: "0C000000106600" + error: "truncated int32" + - + encoded: "0C000000116600" + error: "truncated timestamp" + - + encoded: "0C000000126600" + error: "truncated int64" + - + encoded: "0400000000" + error: basic + - + encoded: "0500000001" + error: basic + - + encoded: "05000000" + error: basic + - + encoded: "0700000002610078563412" + error: basic + - + encoded: "090000001061000500" + error: basic + - + encoded: "00000000000000000000" + error: basic + - + encoded: "1300000002666f6f00040000006261720000" + error: "basic" + - + encoded: "1800000003666f6f000f0000001062617200ffffff7f0000" + error: basic + - + encoded: "1500000003666f6f000c0000000862617200010000" + error: basic + - + encoded: "1c00000003666f6f001200000002626172000500000062617a000000" + error: basic + - + encoded: "1000000002610004000000616263ff00" + error: string is not null-terminated + - + encoded: "0c0000000200000000000000" + error: bad_string_length + - + encoded: "120000000200ffffffff666f6f6261720000" + error: bad_string_length + - + encoded: "0c0000000e00000000000000" + error: bad_string_length + - + encoded: "120000000e00ffffffff666f6f6261720000" + error: bad_string_length + - + encoded: "180000000c00fa5bd841d6585d9900" + error: "" + - + encoded: "1e0000000c00ffffffff666f6f626172005259b56afa5bd841d6585d9900" + error: bad_string_length + - + encoded: "0c0000000d00000000000000" + error: bad_string_length + - + encoded: "0c0000000d00ffffffff0000" + error: bad_string_length + - + encoded: "1c0000000f001500000000000000000c000000020001000000000000" + error: bad_string_length + - + encoded: "1c0000000f0015000000ffffffff000c000000020001000000000000" + error: bad_string_length + - + encoded: "1c0000000f001500000001000000000c000000020000000000000000" + error: bad_string_length + - + encoded: "1c0000000f001500000001000000000c0000000200ffffffff000000" + error: bad_string_length + - + encoded: "0E00000008616263646566676869707172737475" + error: "Run-on CString" + - + encoded: "0100000000" + error: "An object size that's too small to even include the object size, but is correctly encoded, along with a correct EOO (and no data)" + - + encoded: "1a0000000e74657374000c00000068656c6c6f20776f726c6400000500000000" + error: "One object, but with object size listed smaller than it is in the data" + - + encoded: "05000000" + error: "One object, missing the EOO at the end" + - + encoded: "0500000001" + error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x01" + - + encoded: "05000000ff" + error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0xff" + - + encoded: "0500000070" + error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x70" + - + encoded: "07000000000000" + error: "Invalid BSON type low range" + - + encoded: "07000000800000" + error: "Invalid BSON type high range" + - + encoded: "090000000862000200" + error: "Invalid boolean value of 2" + - + encoded: "09000000086200ff00" + error: "Invalid boolean value of -1" + `, ` +--- +description: "Int32 type" +documents: + - + decoded: + i: -2147483648 + encoded: 0C0000001069000000008000 + - + decoded: + i: 2147483647 + encoded: 0C000000106900FFFFFF7F00 + - + decoded: + i: -1 + encoded: 0C000000106900FFFFFFFF00 + - + decoded: + i: 0 + encoded: 0C0000001069000000000000 + - + decoded: + i: 1 + encoded: 0C0000001069000100000000 + +`, ` +--- +description: "String type" +documents: + - + decoded: + s : "" + encoded: 0D000000027300010000000000 + - + decoded: + s: "a" + encoded: 0E00000002730002000000610000 + - + decoded: + s: "This is a string" + encoded: 1D0000000273001100000054686973206973206120737472696E670000 + - + decoded: + s: "κόσμε" + encoded: 180000000273000C000000CEBAE1BDB9CF83CEBCCEB50000 +`} From 162b080019054e3df8de9ae8ca55540df26fb7ce Mon Sep 17 00:00:00 2001 From: Wisdom Omuya Date: Mon, 20 Jul 2015 10:33:49 -0400 Subject: [PATCH 160/305] MGO-64: GridFS completeWrite doesn't create unique index --- gridfs.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/gridfs.go b/gridfs.go index b67e3ad45..7a8127195 100644 --- a/gridfs.go +++ b/gridfs.go @@ -528,7 +528,13 @@ func (file *GridFile) completeWrite() { } file.doc.MD5 = hexsum file.err = file.gfs.Files.Insert(file.doc) - file.gfs.Chunks.EnsureIndexKey("files_id", "n") + if file.err == nil { + index := Index{ + Key: []string{"files_id", "n"}, + Unique: true, + } + file.err = file.gfs.Chunks.EnsureIndex(index) + } } if file.err != nil { file.gfs.Chunks.RemoveAll(bson.D{{"files_id", file.doc.Id}}) From 242fb0d7df48acc84b895e4568a0e316aafd4929 Mon Sep 17 00:00:00 2001 From: Ryan Chipman Date: Thu, 23 Jul 2015 13:13:54 -0400 Subject: [PATCH 161/305] Add missing documentation for Dial Two connection string options (connect=replicaSet and replicaSet=) were supported in the code but not documented alongside the other supported connection string options. --- session.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/session.go b/session.go index 1273cc4bf..b488b7b5a 100644 --- a/session.go +++ b/session.go @@ -174,6 +174,19 @@ const defaultPrefetch = 0.25 // must be relaxed to Monotonic or Eventual via SetMode. // // +// connect=replicaSet +// +// Equivalent to the default connection behavior, but is a valid +// connection option that will not cause an error to be thrown. +// +// +// replicaSet= +// +// Defines the set name for the topology being monitored, and informs the +// automatic server discovery logic that the topology being monitored is +// a replica set. +// +// // authSource= // // Informs the database used to establish credentials and privileges From b61a0f95d77be7df839751965dc07087c723c8d5 Mon Sep 17 00:00:00 2001 From: Ryan Chipman Date: Thu, 23 Jul 2015 13:10:27 -0400 Subject: [PATCH 162/305] Add newline for consistent spacing in Dial godoc --- session.go | 1 + 1 file changed, 1 insertion(+) diff --git a/session.go b/session.go index b488b7b5a..c58793eb6 100644 --- a/session.go +++ b/session.go @@ -205,6 +205,7 @@ const defaultPrefetch = 0.25 // Defines the service name to use when authenticating with the GSSAPI // mechanism. Defaults to "mongodb". // +// // maxPoolSize= // // Defines the per-server socket pool limit. Defaults to 4096. From 4c778842c2647d6196e0bfcc042985940a698b3f Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 30 Jul 2015 17:45:37 +0200 Subject: [PATCH 163/305] Reduce excessive timeout delay on initial dial. The pinger was being run synchronously to obtain an initial ping value on mongoServer creation, which meant a large delay when the server was not available. Now the server is initialized with a large ping value to penalize it until the background pinger has a chance to update the value, and the server is returned immediately. Reported by John Morales on https://jira.mongodb.org/browse/MGO-54 Fixes #142. --- cluster_test.go | 2 +- server.go | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/cluster_test.go b/cluster_test.go index 8da3c3b98..b8ed98696 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -1460,7 +1460,7 @@ func (s *S) TestPrimaryShutdownOnAuthShard(c *C) { } func (s *S) TestNearestSecondary(c *C) { - defer mgo.HackPingDelay(3 * time.Second)() + defer mgo.HackPingDelay(300 * time.Millisecond)() rs1a := "127.0.0.1:40011" rs1b := "127.0.0.1:40012" diff --git a/server.go b/server.go index 54fe233eb..9007d7e68 100644 --- a/server.go +++ b/server.go @@ -84,9 +84,8 @@ func newServer(addr string, tcpaddr *net.TCPAddr, sync chan bool, dial dialer) * sync: sync, dial: dial, info: &defaultServerInfo, + pingValue: time.Hour, // Push it back before an actual ping. } - // Once so the server gets a ping value, then loop in background. - server.pinger(false) go server.pinger(true) return server } @@ -274,7 +273,7 @@ NextTagSet: return false } -var pingDelay = 5 * time.Second +var pingDelay = 15 * time.Second func (server *mongoServer) pinger(loop bool) { var delay time.Duration @@ -297,7 +296,7 @@ func (server *mongoServer) pinger(loop bool) { time.Sleep(delay) } op := op - socket, _, err := server.AcquireSocket(0, 3*delay) + socket, _, err := server.AcquireSocket(0, delay) if err == nil { start := time.Now() _, _ = socket.SimpleQuery(&op) From dcc66e8824f4bbc7dcc7c330a5d295eaed4aab14 Mon Sep 17 00:00:00 2001 From: Gabriel Russell Date: Mon, 17 Aug 2015 16:02:40 -0400 Subject: [PATCH 164/305] make the mode type public --- session.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/session.go b/session.go index 1273cc4bf..4dba916f1 100644 --- a/session.go +++ b/session.go @@ -44,12 +44,12 @@ import ( "gopkg.in/mgo.v2-unstable/bson" ) -type mode int +type Mode int const ( - Eventual mode = 0 - Monotonic mode = 1 - Strong mode = 2 + Eventual Mode = 0 + Monotonic Mode = 1 + Strong Mode = 2 ) // When changing the Session type, check if newSession and copySession @@ -61,7 +61,7 @@ type Session struct { slaveSocket *mongoSocket masterSocket *mongoSocket slaveOk bool - consistency mode + consistency Mode queryConfig query safeOp *queryOp syncTimeout time.Duration @@ -481,7 +481,7 @@ func extractURL(s string) (*urlInfo, error) { return info, nil } -func newSession(consistency mode, cluster *mongoCluster, timeout time.Duration) (session *Session) { +func newSession(consistency Mode, cluster *mongoCluster, timeout time.Duration) (session *Session) { cluster.Acquire() session = &Session{ cluster_: cluster, @@ -1489,7 +1489,7 @@ func (s *Session) Refresh() { // Shifting between Monotonic and Strong modes will keep a previously // reserved connection for the session unless refresh is true or the // connection is unsuitable (to a secondary server in a Strong session). -func (s *Session) SetMode(consistency mode, refresh bool) { +func (s *Session) SetMode(consistency Mode, refresh bool) { s.m.Lock() debugf("Session %p: setting mode %d with refresh=%v (master=%p, slave=%p)", s, consistency, refresh, s.masterSocket, s.slaveSocket) s.consistency = consistency @@ -1505,7 +1505,7 @@ func (s *Session) SetMode(consistency mode, refresh bool) { } // Mode returns the current consistency mode for the session. -func (s *Session) Mode() mode { +func (s *Session) Mode() Mode { s.m.RLock() mode := s.consistency s.m.RUnlock() From 75bbbc6bf54a239dacbba502035b7e90127ee8e3 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 19 Aug 2015 10:18:35 -0300 Subject: [PATCH 165/305] Initial support for bulk updates. Includes internal improvements to the bulk running. --- bulk.go | 115 +++++++++++++++++++++++++++++++++++++++++--- bulk_test.go | 133 ++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 240 insertions(+), 8 deletions(-) diff --git a/bulk.go b/bulk.go index 23f450853..9fb3a9181 100644 --- a/bulk.go +++ b/bulk.go @@ -3,8 +3,6 @@ package mgo // Bulk represents an operation that can be prepared with several // orthogonal changes before being delivered to the server. // -// WARNING: This API is still experimental. -// // Relevant documentation: // // http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api @@ -12,7 +10,20 @@ package mgo type Bulk struct { c *Collection ordered bool - inserts []interface{} + actions []bulkAction +} + +type bulkOp int + +const ( + bulkInsert bulkOp = iota + 1 + bulkUpdate + bulkUpdateAll +) + +type bulkAction struct { + op bulkOp + docs []interface{} } // BulkError holds an error returned from running a Bulk operation. @@ -52,20 +63,110 @@ func (b *Bulk) Unordered() { b.ordered = false } +func (b *Bulk) action(op bulkOp) *bulkAction { + if len(b.actions) > 0 && b.actions[len(b.actions)-1].op == op { + return &b.actions[len(b.actions)-1] + } + if !b.ordered { + for i := range b.actions { + if b.actions[i].op == op { + return &b.actions[i] + } + } + } + b.actions = append(b.actions, bulkAction{op: op}) + return &b.actions[len(b.actions)-1] +} + + // Insert queues up the provided documents for insertion. func (b *Bulk) Insert(docs ...interface{}) { - b.inserts = append(b.inserts, docs...) + action := b.action(bulkInsert) + action.docs = append(action.docs, docs...) +} + +// Update queues up the provided pairs of updating instructions. +// The first element of each pair selects which documents must be +// updated, and the second element defines how to update it. +// Each pair matches exactly one document for updating at most. +func (b *Bulk) Update(pairs ...interface{}) { + if len(pairs)%2 != 0 { + panic("Bulk.Update requires an even number of parameters") + } + action := b.action(bulkUpdate) + action.docs = append(action.docs, pairs...) +} + +// UpdateAll queues up the provided pairs of updating instructions. +// The first element of each pair selects which documents must be +// updated, and the second element defines how to update it. +// Each pair updates all documents matching the selector. +func (b *Bulk) UpdateAll(pairs ...interface{}) { + if len(pairs)%2 != 0 { + panic("Bulk.UpdateAll requires an even number of parameters") + } + action := b.action(bulkUpdateAll) + action.docs = append(action.docs, pairs...) } // Run runs all the operations queued up. func (b *Bulk) Run() (*BulkResult, error) { - op := &insertOp{b.c.FullName, b.inserts, 0} + var result BulkResult + var berr bulkError + var failed bool + for i := range b.actions { + action := &b.actions[i] + var ok bool + switch action.op { + case bulkInsert: + ok = b.runInsert(action, &result, &berr) + case bulkUpdate: + ok = b.runUpdate(action, &result, &berr, 0) + case bulkUpdateAll: + ok = b.runUpdate(action, &result, &berr, 2) + default: + panic("unknown bulk operation") + } + if !ok { + failed = true + if b.ordered { + break + } + } + } + if failed { + return nil, &berr + } + return &result, nil +} + +func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *bulkError) bool { + op := &insertOp{b.c.FullName, action.docs, 0} if !b.ordered { op.flags = 1 // ContinueOnError } _, err := b.c.writeQuery(op) if err != nil { - return nil, &bulkError{err} + berr.err = err + return false + } + return true +} + +func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *bulkError, flags uint32) bool { + ok := true + for i := 0; i < len(action.docs); i += 2 { + _, err := b.c.writeQuery(&updateOp{b.c.FullName, action.docs[i], action.docs[i+1], flags}) + if err != nil { + ok = false + berr.err = &bulkError{err} + if b.ordered { + break + } + } + // TODO Report number of updates into result. } - return &BulkResult{}, nil + return ok } + +// TODO Introduce IsNotFound that also works on bulk. diff --git a/bulk_test.go b/bulk_test.go index e126f9d20..32a52315e 100644 --- a/bulk_test.go +++ b/bulk_test.go @@ -1,6 +1,6 @@ // mgo - MongoDB driver for Go // -// Copyright (c) 2010-2014 - Gustavo Niemeyer +// Copyright (c) 2010-2015 - Gustavo Niemeyer // // All rights reserved. // @@ -61,6 +61,7 @@ func (s *S) TestBulkInsertError(c *C) { bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3}) _, err = bulk.Run() c.Assert(err, ErrorMatches, ".*duplicate key.*") + c.Assert(mgo.IsDup(err), Equals, true) type doc struct { N int `_id` @@ -129,3 +130,133 @@ func (s *S) TestBulkInsertErrorUnorderedSplitBatch(c *C) { c.Assert(err, IsNil) c.Assert(res.Id, Equals, 1500) } + +func (s *S) TestBulkUpdate(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) + c.Assert(err, IsNil) + + bulk := coll.Bulk() + bulk.Update(M{"n": 2}, M{"$set": M{"n": 20}}) + bulk.Update(M{"n": 1}, M{"$set": M{"n": 10}}, M{"n": 3}, M{"$set": M{"n": 30}}) + r, err := bulk.Run() + c.Assert(err, IsNil) + c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) + + type doc struct{ N int } + var res []doc + err = coll.Find(nil).Sort("n").All(&res) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []doc{{10}, {20}, {30}}) +} + +func (s *S) TestBulkUpdateError(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) + c.Assert(err, IsNil) + + bulk := coll.Bulk() + bulk.Update( + M{"n": 1}, M{"$set": M{"n": 10}}, + M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}}, + M{"n": 3}, M{"$set": M{"n": 30}}, + ) + r, err := bulk.Run() + c.Assert(err, ErrorMatches, ".*_id.*") + c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) + + type doc struct{ N int } + var res []doc + err = coll.Find(nil).Sort("n").All(&res) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []doc{{2}, {3}, {10}}) +} + +func (s *S) TestBulkUpdateErrorUnordered(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) + c.Assert(err, IsNil) + + bulk := coll.Bulk() + bulk.Unordered() + bulk.Update( + M{"n": 1}, M{"$set": M{"n": 10}}, + M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}}, + M{"n": 3}, M{"$set": M{"n": 30}}, + ) + r, err := bulk.Run() + c.Assert(err, ErrorMatches, ".*_id.*") + c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) + + type doc struct{ N int } + var res []doc + err = coll.Find(nil).Sort("n").All(&res) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []doc{{2}, {10}, {30}}) +} + +func (s *S) TestBulkUpdateAll(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) + c.Assert(err, IsNil) + + bulk := coll.Bulk() + bulk.UpdateAll(M{"n": 1}, M{"$set": M{"n": 10}}) + bulk.UpdateAll(M{}, M{"$inc": M{"n": 1}}, M{"n": 11}, M{"$set": M{"n": 5}}) + r, err := bulk.Run() + c.Assert(err, IsNil) + c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) + + type doc struct{ N int } + var res []doc + err = coll.Find(nil).Sort("n").All(&res) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []doc{{3}, {4}, {5}}) +} + +func (s *S) TestBulkMixedUnordered(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + // Abuse undefined behavior to ensure the desired implementation is in place. + bulk := coll.Bulk() + bulk.Unordered() + bulk.Insert(M{"n": 1}) + bulk.Update(M{"n": 2}, M{"$inc": M{"n": 1}}) + bulk.Insert(M{"n": 2}) + bulk.Update(M{"n": 3}, M{"$inc": M{"n": 1}}) + bulk.Update(M{"n": 1}, M{"$inc": M{"n": 1}}) + bulk.Insert(M{"n": 3}) + _, err = bulk.Run() + c.Assert(err, IsNil) + + type doc struct{ N int } + var res []doc + err = coll.Find(nil).Sort("n").All(&res) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []doc{{2}, {3}, {4}}) +} + From c84ec505e3b6f7e926e26006a46baac8d1b4bab8 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 19 Aug 2015 10:34:05 -0300 Subject: [PATCH 166/305] IsDup now works on bulkError. --- session.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/session.go b/session.go index 1273cc4bf..72e6606b1 100644 --- a/session.go +++ b/session.go @@ -2204,6 +2204,8 @@ func IsDup(err error) bool { return e.Code == 11000 || e.Code == 11001 || e.Code == 12582 || e.Code == 16460 && strings.Contains(e.Err, " E11000 ") case *QueryError: return e.Code == 11000 || e.Code == 11001 || e.Code == 12582 + case *bulkError: + return IsDup(e.err) } return false } From 159b618c57fd7ebbf3f5b1d9647954ef6155fd7d Mon Sep 17 00:00:00 2001 From: Jamie Osler Date: Fri, 21 Aug 2015 09:55:40 +0100 Subject: [PATCH 167/305] update documentation to io.EOF instead of os.EOF --- gridfs.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gridfs.go b/gridfs.go index b67e3ad45..18ecbfb60 100644 --- a/gridfs.go +++ b/gridfs.go @@ -692,7 +692,7 @@ func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) { // Read reads into b the next available data from the file and // returns the number of bytes written and an error in case // something wrong happened. At the end of the file, n will -// be zero and err will be set to os.EOF. +// be zero and err will be set to io.EOF. // // The parameters and behavior of this function turn the file // into an io.Reader. From f4923a569136442e900b8cf5c1a706c0a8b0883c Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Fri, 21 Aug 2015 12:30:02 -0300 Subject: [PATCH 168/305] Resolve IPv4 addresses only for now. Makes Go 1.5 happy, and fixes #153. --- cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster.go b/cluster.go index 55bb7bbc0..9ea0cb9c1 100644 --- a/cluster.go +++ b/cluster.go @@ -409,7 +409,7 @@ func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoSer func resolveAddr(addr string) (*net.TCPAddr, error) { // This hack allows having a timeout on resolution. - conn, err := net.DialTimeout("udp", addr, 10*time.Second) + conn, err := net.DialTimeout("udp4", addr, 10*time.Second) if err != nil { log("SYNC Failed to resolve server address: ", addr) return nil, errors.New("failed to resolve server address: " + addr) From d650c4e46b67e4e5556f8bf8bd47eb0415880f30 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Fri, 21 Aug 2015 12:33:03 -0300 Subject: [PATCH 169/305] Resolve IPv4 addresses only for now. Makes Go 1.5 happy, and fixes #153. --- cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster.go b/cluster.go index ad17458de..90f5dd12a 100644 --- a/cluster.go +++ b/cluster.go @@ -409,7 +409,7 @@ func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoSer func resolveAddr(addr string) (*net.TCPAddr, error) { // This hack allows having a timeout on resolution. - conn, err := net.DialTimeout("udp", addr, 10*time.Second) + conn, err := net.DialTimeout("udp4", addr, 10*time.Second) if err != nil { log("SYNC Failed to resolve server address: ", addr) return nil, errors.New("failed to resolve server address: " + addr) From e6cf61da1875a67a0c47fc5de34ec1fd4f155d41 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 25 Aug 2015 11:50:14 -0300 Subject: [PATCH 170/305] Allow decoding plain BSON binary into Binary. Doing that is generally a mistake, since one may decode such data straight into a []byte value. But fixing for correctness anyway. Fixes #157. --- bson/bson_test.go | 3 +++ bson/decode.go | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/bson/bson_test.go b/bson/bson_test.go index c95cb7d7b..9b63f8ebb 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -1280,6 +1280,9 @@ var twoWayCrossItems = []crossTypeItem{ {&inlineMapInt{A: 1, M: nil}, map[string]int{"a": 1}}, {&inlineMapMyM{A: 1, M: MyM{"b": MyM{"c": 3}}}, map[string]interface{}{"a": 1, "b": map[string]interface{}{"c": 3}}}, + // []byte <=> Binary + {&struct{ B []byte }{[]byte("abc")}, map[string]bson.Binary{"b": bson.Binary{Data: []byte("abc")}}}, + // []byte <=> MyBytes {&struct{ B MyBytes }{[]byte("abc")}, map[string]string{"b": "abc"}}, {&struct{ B MyBytes }{[]byte{}}, map[string]string{"b": ""}}, diff --git a/bson/decode.go b/bson/decode.go index ba7a3dfa2..0ee8d22d9 100644 --- a/bson/decode.go +++ b/bson/decode.go @@ -727,6 +727,12 @@ func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { out.Set(reflect.ValueOf(u).Elem()) return true } + if outt == typeBinary { + if b, ok := in.([]byte); ok { + out.Set(reflect.ValueOf(Binary{Data: b})) + return true + } + } } return false From d1c150dbcbed7291355c872aa44779fc327bb95e Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Fri, 28 Aug 2015 18:46:55 -0300 Subject: [PATCH 171/305] Improvements in the bulk operations implementation. --- bulk.go | 46 ++++++++++++++++++++++------- session.go | 87 ++++++++++++++++++++++++++++++++++++++++++------------ socket.go | 22 +++++++------- 3 files changed, 116 insertions(+), 39 deletions(-) diff --git a/bulk.go b/bulk.go index 9fb3a9181..d08d51034 100644 --- a/bulk.go +++ b/bulk.go @@ -1,5 +1,9 @@ package mgo +import ( + "gopkg.in/mgo.v2-unstable/bson" +) + // Bulk represents an operation that can be prepared with several // orthogonal changes before being delivered to the server. // @@ -26,6 +30,8 @@ type bulkAction struct { docs []interface{} } +type bulkUpdateOp []interface{} + // BulkError holds an error returned from running a Bulk operation. // // TODO: This is private for the moment, until we understand exactly how @@ -94,7 +100,17 @@ func (b *Bulk) Update(pairs ...interface{}) { panic("Bulk.Update requires an even number of parameters") } action := b.action(bulkUpdate) - action.docs = append(action.docs, pairs...) + for i := 0; i < len(pairs); i += 2 { + selector := pairs[i] + if selector == nil { + selector = bson.D{} + } + action.docs = append(action.docs, &updateOp{ + Collection: b.c.FullName, + Selector: selector, + Update: pairs[i+1], + }) + } } // UpdateAll queues up the provided pairs of updating instructions. @@ -105,8 +121,20 @@ func (b *Bulk) UpdateAll(pairs ...interface{}) { if len(pairs)%2 != 0 { panic("Bulk.UpdateAll requires an even number of parameters") } - action := b.action(bulkUpdateAll) - action.docs = append(action.docs, pairs...) + action := b.action(bulkUpdate) + for i := 0; i < len(pairs); i += 2 { + selector := pairs[i] + if selector == nil { + selector = bson.D{} + } + action.docs = append(action.docs, &updateOp{ + Collection: b.c.FullName, + Selector: selector, + Update: pairs[i+1], + Flags: 2, + Multi: true, + }) + } } // Run runs all the operations queued up. @@ -121,9 +149,7 @@ func (b *Bulk) Run() (*BulkResult, error) { case bulkInsert: ok = b.runInsert(action, &result, &berr) case bulkUpdate: - ok = b.runUpdate(action, &result, &berr, 0) - case bulkUpdateAll: - ok = b.runUpdate(action, &result, &berr, 2) + ok = b.runUpdate(action, &result, &berr) default: panic("unknown bulk operation") } @@ -145,7 +171,7 @@ func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *bulkError if !b.ordered { op.flags = 1 // ContinueOnError } - _, err := b.c.writeQuery(op) + _, err := b.c.writeOp(op, b.ordered) if err != nil { berr.err = err return false @@ -153,10 +179,10 @@ func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *bulkError return true } -func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *bulkError, flags uint32) bool { +func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *bulkError) bool { ok := true - for i := 0; i < len(action.docs); i += 2 { - _, err := b.c.writeQuery(&updateOp{b.c.FullName, action.docs[i], action.docs[i+1], flags}) + for _, op := range action.docs { + _, err := b.c.writeOp(op, b.ordered) if err != nil { ok = false berr.err = &bulkError{err} diff --git a/session.go b/session.go index c8a12434c..0b3601b37 100644 --- a/session.go +++ b/session.go @@ -2215,7 +2215,7 @@ func IsDup(err error) bool { // happens while inserting the provided documents, the returned error will // be of type *LastError. func (c *Collection) Insert(docs ...interface{}) error { - _, err := c.writeQuery(&insertOp{c.FullName, docs, 0}) + _, err := c.writeOp(&insertOp{c.FullName, docs, 0}, true) return err } @@ -2231,7 +2231,15 @@ func (c *Collection) Insert(docs ...interface{}) error { // http://www.mongodb.org/display/DOCS/Atomic+Operations // func (c *Collection) Update(selector interface{}, update interface{}) error { - lerr, err := c.writeQuery(&updateOp{c.FullName, selector, update, 0}) + if selector == nil { + selector = bson.D{} + } + op := updateOp{ + Collection: c.FullName, + Selector: selector, + Update: update, + } + lerr, err := c.writeOp(&op, true) if err == nil && lerr != nil && !lerr.UpdatedExisting { return ErrNotFound } @@ -2267,7 +2275,17 @@ type ChangeInfo struct { // http://www.mongodb.org/display/DOCS/Atomic+Operations // func (c *Collection) UpdateAll(selector interface{}, update interface{}) (info *ChangeInfo, err error) { - lerr, err := c.writeQuery(&updateOp{c.FullName, selector, update, 2}) + if selector == nil { + selector = bson.D{} + } + op := updateOp{ + Collection: c.FullName, + Selector: selector, + Update: update, + Flags: 2, + Multi: true, + } + lerr, err := c.writeOp(&op, true) if err == nil && lerr != nil { info = &ChangeInfo{Updated: lerr.N} } @@ -2288,7 +2306,17 @@ func (c *Collection) UpdateAll(selector interface{}, update interface{}) (info * // http://www.mongodb.org/display/DOCS/Atomic+Operations // func (c *Collection) Upsert(selector interface{}, update interface{}) (info *ChangeInfo, err error) { - lerr, err := c.writeQuery(&updateOp{c.FullName, selector, update, 1}) + if selector == nil { + selector = bson.D{} + } + op := updateOp{ + Collection: c.FullName, + Selector: selector, + Update: update, + Flags: 1, + Upsert: true, + } + lerr, err := c.writeOp(&op, true) if err == nil && lerr != nil { info = &ChangeInfo{} if lerr.UpdatedExisting { @@ -2320,7 +2348,7 @@ func (c *Collection) UpsertId(id interface{}, update interface{}) (info *ChangeI // http://www.mongodb.org/display/DOCS/Removing // func (c *Collection) Remove(selector interface{}) error { - lerr, err := c.writeQuery(&deleteOp{c.FullName, selector, 1}) + lerr, err := c.writeOp(&deleteOp{c.FullName, selector, 1}, true) if err == nil && lerr != nil && lerr.N == 0 { return ErrNotFound } @@ -2346,7 +2374,7 @@ func (c *Collection) RemoveId(id interface{}) error { // http://www.mongodb.org/display/DOCS/Removing // func (c *Collection) RemoveAll(selector interface{}) (info *ChangeInfo, err error) { - lerr, err := c.writeQuery(&deleteOp{c.FullName, selector, 0}) + lerr, err := c.writeOp(&deleteOp{c.FullName, selector, 0}, true) if err == nil && lerr != nil { info = &ChangeInfo{Removed: lerr.N} } @@ -4056,14 +4084,13 @@ type writeCmdResult struct { } `bson:"writeConcernError"` } -// writeQuery runs the given modifying operation, potentially followed up +// writeOp runs the given modifying operation, potentially followed up // by a getLastError command in case the session is in safe mode. The // LastError result is made available in lerr, and if lerr.Err is set it // will also be returned as err. -func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) { +func (c *Collection) writeOp(op interface{}, ordered bool) (lerr *LastError, err error) { s := c.Database.Session - dbname := c.Database.Name - socket, err := s.acquireSocket(dbname == "local") + socket, err := s.acquireSocket(c.Database.Name == "local") if err != nil { return nil, err } @@ -4086,7 +4113,7 @@ func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) { l = len(all) } op.documents = all[i:l] - _, err := c.writeCommand(socket, safeOp, op) + _, err := c.writeOpCommand(socket, safeOp, op, ordered) if err != nil { if op.flags&1 != 0 { if firstErr == nil { @@ -4099,9 +4126,27 @@ func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) { } return nil, firstErr } - return c.writeCommand(socket, safeOp, op) + return c.writeOpCommand(socket, safeOp, op, ordered) + } else if updateOps, ok := op.(bulkUpdateOp); ok { + var firstErr error + for _, updateOp := range updateOps { + _, err := c.writeOpQuery(socket, safeOp, updateOp, ordered) + if err != nil { + if !ordered { + if firstErr == nil { + firstErr = err + } + } else { + return nil, err + } + } + } + return nil, firstErr } + return c.writeOpQuery(socket, safeOp, op, ordered) +} +func (c *Collection) writeOpQuery(socket *mongoSocket, safeOp *queryOp, op interface{}, ordered bool) (lerr *LastError, err error) { if safeOp == nil { return nil, socket.Query(op) } @@ -4111,7 +4156,7 @@ func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) { var replyErr error mutex.Lock() query := *safeOp // Copy the data. - query.collection = dbname + ".$cmd" + query.collection = c.Database.Name + ".$cmd" query.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { replyData = docData replyErr = err @@ -4141,7 +4186,7 @@ func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) { return result, nil } -func (c *Collection) writeCommand(socket *mongoSocket, safeOp *queryOp, op interface{}) (lerr *LastError, err error) { +func (c *Collection) writeOpCommand(socket *mongoSocket, safeOp *queryOp, op interface{}, ordered bool) (lerr *LastError, err error) { var writeConcern interface{} if safeOp == nil { writeConcern = bson.D{{"w", 0}} @@ -4161,15 +4206,19 @@ func (c *Collection) writeCommand(socket *mongoSocket, safeOp *queryOp, op inter } case *updateOp: // http://docs.mongodb.org/manual/reference/command/update - selector := op.selector - if selector == nil { - selector = bson.D{} + cmd = bson.D{ + {"update", c.Name}, + {"updates", []interface{}{op}}, + {"writeConcern", writeConcern}, + {"ordered", ordered}, } + case bulkUpdateOp: + // http://docs.mongodb.org/manual/reference/command/update cmd = bson.D{ {"update", c.Name}, - {"updates", []bson.D{{{"q", selector}, {"u", op.update}, {"upsert", op.flags&1 != 0}, {"multi", op.flags&2 != 0}}}}, + {"updates", op}, {"writeConcern", writeConcern}, - //{"ordered", }, + {"ordered", ordered}, } case *deleteOp: // http://docs.mongodb.org/manual/reference/command/delete diff --git a/socket.go b/socket.go index 0aa637cb3..725c37e94 100644 --- a/socket.go +++ b/socket.go @@ -130,10 +130,12 @@ type insertOp struct { } type updateOp struct { - collection string // "database.collection" - selector interface{} - update interface{} - flags uint32 + Collection string `bson:"-"` // "database.collection" + Selector interface{} `bson:"q"` + Update interface{} `bson:"u"` + Flags uint32 `bson:"-"` + Multi bool `bson:"multi,omitempty"` + Upsert bool `bson:"upsert,omitempty"` } type deleteOp struct { @@ -370,15 +372,15 @@ func (socket *mongoSocket) Query(ops ...interface{}) (err error) { case *updateOp: buf = addHeader(buf, 2001) buf = addInt32(buf, 0) // Reserved - buf = addCString(buf, op.collection) - buf = addInt32(buf, int32(op.flags)) - debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.selector) - buf, err = addBSON(buf, op.selector) + buf = addCString(buf, op.Collection) + buf = addInt32(buf, int32(op.Flags)) + debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.Selector) + buf, err = addBSON(buf, op.Selector) if err != nil { return err } - debugf("Socket %p to %s: serializing update document: %#v", socket, socket.addr, op.update) - buf, err = addBSON(buf, op.update) + debugf("Socket %p to %s: serializing update document: %#v", socket, socket.addr, op.Update) + buf, err = addBSON(buf, op.Update) if err != nil { return err } From 22287bab4379e1fbf6002fb4eb769888f3fb224c Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 2 Sep 2015 10:35:24 -0300 Subject: [PATCH 172/305] Support explicit IPv6 address dialing. Still need to implement full IPv6 resolution properly. --- cluster.go | 19 ++++++++++++++++++- session_test.go | 10 ++++++++++ testdb/supervisord.conf | 2 +- 3 files changed, 29 insertions(+), 2 deletions(-) diff --git a/cluster.go b/cluster.go index 9ea0cb9c1..bcebf4202 100644 --- a/cluster.go +++ b/cluster.go @@ -34,6 +34,8 @@ import ( "time" "gopkg.in/mgo.v2/bson" + "strconv" + "strings" ) // --------------------------------------------------------------------------- @@ -408,7 +410,22 @@ func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoSer } func resolveAddr(addr string) (*net.TCPAddr, error) { - // This hack allows having a timeout on resolution. + // Simple cases that do not need actual resolution. Works with IPv4 and v6. + if host, port, err := net.SplitHostPort(addr); err == nil { + if port, _ := strconv.Atoi(port); port > 0 { + zone := "" + if i := strings.LastIndex(host, "%"); i >= 0 { + zone = host[i+1:] + host = host[:i] + } + ip := net.ParseIP(host) + if ip != nil { + return &net.TCPAddr{IP: ip, Port: port, Zone: zone}, nil + } + } + } + + // This unfortunate hack allows having a timeout on address resolution. conn, err := net.DialTimeout("udp4", addr, 10*time.Second) if err != nil { log("SYNC Failed to resolve server address: ", addr) diff --git a/session_test.go b/session_test.go index 3ec5550a4..8e2d7c948 100644 --- a/session_test.go +++ b/session_test.go @@ -84,6 +84,16 @@ func (s *S) TestPing(c *C) { c.Assert(stats.ReceivedOps, Equals, 1) } +func (s *S) TestDialIPAddress(c *C) { + session, err := mgo.Dial("127.0.0.1:40001") + c.Assert(err, IsNil) + defer session.Close() + + session, err = mgo.Dial("[::1%]:40001") + c.Assert(err, IsNil) + defer session.Close() +} + func (s *S) TestURLSingle(c *C) { session, err := mgo.Dial("mongodb://localhost:40001/") c.Assert(err, IsNil) diff --git a/testdb/supervisord.conf b/testdb/supervisord.conf index 1c2b859a2..a4d634ec6 100644 --- a/testdb/supervisord.conf +++ b/testdb/supervisord.conf @@ -14,7 +14,7 @@ serverurl = http://127.0.0.1:9001 supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface [program:db1] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db1 --bind_ip=127.0.0.1 --port 40001 +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db1 --bind_ip=127.0.0.1,::1 --port 40001 --ipv6 [program:db2] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db2 --bind_ip=127.0.0.1 --port 40002 --auth From 93b17aa0924106cf7826c290c5cff8fada04146f Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 2 Sep 2015 11:01:06 -0300 Subject: [PATCH 173/305] Support explicit IPv6 address dialing. Still need to implement full IPv6 resolution properly. --- cluster.go | 19 ++++++++++++++++++- session_test.go | 10 ++++++++++ testdb/supervisord.conf | 2 +- 3 files changed, 29 insertions(+), 2 deletions(-) diff --git a/cluster.go b/cluster.go index 90f5dd12a..47e04a5f4 100644 --- a/cluster.go +++ b/cluster.go @@ -34,6 +34,8 @@ import ( "time" "gopkg.in/mgo.v2-unstable/bson" + "strconv" + "strings" ) // --------------------------------------------------------------------------- @@ -408,7 +410,22 @@ func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoSer } func resolveAddr(addr string) (*net.TCPAddr, error) { - // This hack allows having a timeout on resolution. + // Simple cases that do not need actual resolution. Works with IPv4 and v6. + if host, port, err := net.SplitHostPort(addr); err == nil { + if port, _ := strconv.Atoi(port); port > 0 { + zone := "" + if i := strings.LastIndex(host, "%"); i >= 0 { + zone = host[i+1:] + host = host[:i] + } + ip := net.ParseIP(host) + if ip != nil { + return &net.TCPAddr{IP: ip, Port: port, Zone: zone}, nil + } + } + } + + // This unfortunate hack allows having a timeout on address resolution. conn, err := net.DialTimeout("udp4", addr, 10*time.Second) if err != nil { log("SYNC Failed to resolve server address: ", addr) diff --git a/session_test.go b/session_test.go index 64e893642..e8290fb05 100644 --- a/session_test.go +++ b/session_test.go @@ -84,6 +84,16 @@ func (s *S) TestPing(c *C) { c.Assert(stats.ReceivedOps, Equals, 1) } +func (s *S) TestDialIPAddress(c *C) { + session, err := mgo.Dial("127.0.0.1:40001") + c.Assert(err, IsNil) + defer session.Close() + + session, err = mgo.Dial("[::1%]:40001") + c.Assert(err, IsNil) + defer session.Close() +} + func (s *S) TestURLSingle(c *C) { session, err := mgo.Dial("mongodb://localhost:40001/") c.Assert(err, IsNil) diff --git a/testdb/supervisord.conf b/testdb/supervisord.conf index 1c2b859a2..a4d634ec6 100644 --- a/testdb/supervisord.conf +++ b/testdb/supervisord.conf @@ -14,7 +14,7 @@ serverurl = http://127.0.0.1:9001 supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface [program:db1] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db1 --bind_ip=127.0.0.1 --port 40001 +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db1 --bind_ip=127.0.0.1,::1 --port 40001 --ipv6 [program:db2] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db2 --bind_ip=127.0.0.1 --port 40002 --auth From 5df595399cd12bb7302f6cce6eb2f447d87d304d Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 8 Sep 2015 15:08:55 -0300 Subject: [PATCH 174/305] Add bulk upsert method. --- bulk.go | 24 ++++++++++++++++++++++++ bulk_test.go | 23 +++++++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/bulk.go b/bulk.go index d08d51034..4c5f4c9b2 100644 --- a/bulk.go +++ b/bulk.go @@ -137,6 +137,30 @@ func (b *Bulk) UpdateAll(pairs ...interface{}) { } } +// Upsert queues up the provided pairs of upserting instructions. +// The first element of each pair selects which documents must be +// updated, and the second element defines how to update it. +// Each pair matches exactly one document for updating at most. +func (b *Bulk) Upsert(pairs ...interface{}) { + if len(pairs)%2 != 0 { + panic("Bulk.Update requires an even number of parameters") + } + action := b.action(bulkUpdate) + for i := 0; i < len(pairs); i += 2 { + selector := pairs[i] + if selector == nil { + selector = bson.D{} + } + action.docs = append(action.docs, &updateOp{ + Collection: b.c.FullName, + Selector: selector, + Update: pairs[i+1], + Flags: 1, + Upsert: true, + }) + } +} + // Run runs all the operations queued up. func (b *Bulk) Run() (*BulkResult, error) { var result BulkResult diff --git a/bulk_test.go b/bulk_test.go index 32a52315e..4c479d67c 100644 --- a/bulk_test.go +++ b/bulk_test.go @@ -260,3 +260,26 @@ func (s *S) TestBulkMixedUnordered(c *C) { c.Assert(res, DeepEquals, []doc{{2}, {3}, {4}}) } +func (s *S) TestBulkUpsert(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) + c.Assert(err, IsNil) + + bulk := coll.Bulk() + bulk.Upsert(M{"n": 2}, M{"$set": M{"n": 20}}) + bulk.Upsert(M{"n": 4}, M{"$set": M{"n": 40}}, M{"n": 3}, M{"$set": M{"n": 30}}) + r, err := bulk.Run() + c.Assert(err, IsNil) + c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) + + type doc struct{ N int } + var res []doc + err = coll.Find(nil).Sort("n").All(&res) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []doc{{1}, {20}, {30}, {40}}) +} From 2e755e422347df8f7bf035612e3397569a69d332 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 8 Sep 2015 16:05:57 -0300 Subject: [PATCH 175/305] Enable write commands on 2.6+ (wire version 2+). --- session.go | 3 +-- session_test.go | 9 ++++----- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/session.go b/session.go index 0b3601b37..9e567e3a6 100644 --- a/session.go +++ b/session.go @@ -4100,8 +4100,7 @@ func (c *Collection) writeOp(op interface{}, ordered bool) (lerr *LastError, err safeOp := s.safeOp s.m.RUnlock() - // TODO Enable this path for wire version 2 as well. - if socket.ServerInfo().MaxWireVersion >= 3 { + if socket.ServerInfo().MaxWireVersion >= 2 { // Servers with a more recent write protocol benefit from write commands. if op, ok := op.(*insertOp); ok && len(op.documents) > 1000 { var firstErr error diff --git a/session_test.go b/session_test.go index e8290fb05..2979068f0 100644 --- a/session_test.go +++ b/session_test.go @@ -1699,7 +1699,7 @@ func (s *S) TestFindTailTimeoutWithSleep(c *C) { // 1*QUERY for nonce + 1*GET_MORE_OP on Next + 1*GET_MORE_OP on Next after sleep + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 stats := mgo.GetStats() - if s.versionAtLeast(3, 0) { // TODO Will be 2.6 when write commands are enabled for it. + if s.versionAtLeast(2, 6) { c.Assert(stats.SentOps, Equals, 4) } else { c.Assert(stats.SentOps, Equals, 5) @@ -1796,7 +1796,7 @@ func (s *S) TestFindTailTimeoutNoSleep(c *C) { // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 stats := mgo.GetStats() - if s.versionAtLeast(3, 0) { // TODO Will be 2.6 when write commands are enabled for it. + if s.versionAtLeast(2, 6) { c.Assert(stats.SentOps, Equals, 3) } else { c.Assert(stats.SentOps, Equals, 4) @@ -1892,7 +1892,7 @@ func (s *S) TestFindTailNoTimeout(c *C) { // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 stats := mgo.GetStats() - if s.versionAtLeast(3, 0) { // TODO Will be 2.6 when write commands are enabled for it. + if s.versionAtLeast(2, 6) { c.Assert(stats.SentOps, Equals, 3) } else { c.Assert(stats.SentOps, Equals, 4) @@ -2517,8 +2517,7 @@ func (s *S) TestSafeInsert(c *C) { // It must have sent two operations (INSERT_OP + getLastError QUERY_OP) stats := mgo.GetStats() - // TODO Will be 2.6 when write commands are enabled for it. - if s.versionAtLeast(3, 0) { + if s.versionAtLeast(2, 6) { c.Assert(stats.SentOps, Equals, 1) } else { c.Assert(stats.SentOps, Equals, 2) From 8b93a9fd8de6b7a089bf90a9168c12023e6f2ab0 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 8 Sep 2015 18:59:51 -0300 Subject: [PATCH 176/305] Added Matched and Modified fields to BulkResult. --- bulk.go | 29 ++++++++++++++++------------- bulk_test.go | 20 +++++++++++++++++--- session.go | 25 ++++++++++++++----------- 3 files changed, 47 insertions(+), 27 deletions(-) diff --git a/bulk.go b/bulk.go index 4c5f4c9b2..57d82ddd0 100644 --- a/bulk.go +++ b/bulk.go @@ -42,6 +42,9 @@ type bulkError struct { // BulkResult holds the results for a bulk operation. type BulkResult struct { + Matched int + Modified int // Available only for MongoDB 2.6+ + // Be conservative while we understand exactly how to report these // results in a useful and convenient way, and also how to emulate // them with prior servers. @@ -84,7 +87,6 @@ func (b *Bulk) action(op bulkOp) *bulkAction { return &b.actions[len(b.actions)-1] } - // Insert queues up the provided documents for insertion. func (b *Bulk) Insert(docs ...interface{}) { action := b.action(bulkInsert) @@ -107,8 +109,8 @@ func (b *Bulk) Update(pairs ...interface{}) { } action.docs = append(action.docs, &updateOp{ Collection: b.c.FullName, - Selector: selector, - Update: pairs[i+1], + Selector: selector, + Update: pairs[i+1], }) } } @@ -129,10 +131,10 @@ func (b *Bulk) UpdateAll(pairs ...interface{}) { } action.docs = append(action.docs, &updateOp{ Collection: b.c.FullName, - Selector: selector, - Update: pairs[i+1], - Flags: 2, - Multi: true, + Selector: selector, + Update: pairs[i+1], + Flags: 2, + Multi: true, }) } } @@ -153,10 +155,10 @@ func (b *Bulk) Upsert(pairs ...interface{}) { } action.docs = append(action.docs, &updateOp{ Collection: b.c.FullName, - Selector: selector, - Update: pairs[i+1], - Flags: 1, - Upsert: true, + Selector: selector, + Update: pairs[i+1], + Flags: 1, + Upsert: true, }) } } @@ -206,7 +208,7 @@ func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *bulkError func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *bulkError) bool { ok := true for _, op := range action.docs { - _, err := b.c.writeOp(op, b.ordered) + lerr, err := b.c.writeOp(op, b.ordered) if err != nil { ok = false berr.err = &bulkError{err} @@ -214,7 +216,8 @@ func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *bulkError break } } - // TODO Report number of updates into result. + result.Matched += lerr.N + result.Modified += lerr.modified } return ok } diff --git a/bulk_test.go b/bulk_test.go index 4c479d67c..b269f4864 100644 --- a/bulk_test.go +++ b/bulk_test.go @@ -142,11 +142,16 @@ func (s *S) TestBulkUpdate(c *C) { c.Assert(err, IsNil) bulk := coll.Bulk() + bulk.Update(M{"n": 1}, M{"$set": M{"n": 1}}) bulk.Update(M{"n": 2}, M{"$set": M{"n": 20}}) + bulk.Update(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match. bulk.Update(M{"n": 1}, M{"$set": M{"n": 10}}, M{"n": 3}, M{"$set": M{"n": 30}}) r, err := bulk.Run() c.Assert(err, IsNil) - c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) + c.Assert(r.Matched, Equals, 4) + if s.versionAtLeast(2, 6) { + c.Assert(r.Modified, Equals, 3) + } type doc struct{ N int } var res []doc @@ -222,10 +227,15 @@ func (s *S) TestBulkUpdateAll(c *C) { bulk := coll.Bulk() bulk.UpdateAll(M{"n": 1}, M{"$set": M{"n": 10}}) + bulk.UpdateAll(M{"n": 2}, M{"$set": M{"n": 2}}) + bulk.UpdateAll(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match. bulk.UpdateAll(M{}, M{"$inc": M{"n": 1}}, M{"n": 11}, M{"$set": M{"n": 5}}) r, err := bulk.Run() c.Assert(err, IsNil) - c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) + c.Assert(r.Matched, Equals, 6) + if s.versionAtLeast(2, 6) { + c.Assert(r.Modified, Equals, 5) + } type doc struct{ N int } var res []doc @@ -250,8 +260,12 @@ func (s *S) TestBulkMixedUnordered(c *C) { bulk.Update(M{"n": 3}, M{"$inc": M{"n": 1}}) bulk.Update(M{"n": 1}, M{"$inc": M{"n": 1}}) bulk.Insert(M{"n": 3}) - _, err = bulk.Run() + r, err := bulk.Run() c.Assert(err, IsNil) + c.Assert(r.Matched, Equals, 3) + if s.versionAtLeast(2, 6) { + c.Assert(r.Modified, Equals, 3) + } type doc struct{ N int } var res []doc diff --git a/session.go b/session.go index 9e567e3a6..3de0eaf0e 100644 --- a/session.go +++ b/session.go @@ -2168,6 +2168,8 @@ type LastError struct { WTimeout bool UpdatedExisting bool `bson:"updatedExisting"` UpsertedId interface{} `bson:"upserted"` + + modified int } func (err *LastError) Error() string { @@ -2236,8 +2238,8 @@ func (c *Collection) Update(selector interface{}, update interface{}) error { } op := updateOp{ Collection: c.FullName, - Selector: selector, - Update: update, + Selector: selector, + Update: update, } lerr, err := c.writeOp(&op, true) if err == nil && lerr != nil && !lerr.UpdatedExisting { @@ -2280,10 +2282,10 @@ func (c *Collection) UpdateAll(selector interface{}, update interface{}) (info * } op := updateOp{ Collection: c.FullName, - Selector: selector, - Update: update, - Flags: 2, - Multi: true, + Selector: selector, + Update: update, + Flags: 2, + Multi: true, } lerr, err := c.writeOp(&op, true) if err == nil && lerr != nil { @@ -2311,10 +2313,10 @@ func (c *Collection) Upsert(selector interface{}, update interface{}) (info *Cha } op := updateOp{ Collection: c.FullName, - Selector: selector, - Update: update, - Flags: 1, - Upsert: true, + Selector: selector, + Update: update, + Flags: 1, + Upsert: true, } lerr, err := c.writeOp(&op, true) if err == nil && lerr != nil { @@ -4238,7 +4240,8 @@ func (c *Collection) writeOpCommand(socket *mongoSocket, safeOp *queryOp, op int debugf("Write command result: %#v (err=%v)", result, err) lerr = &LastError{ UpdatedExisting: result.N > 0 && len(result.Upserted) == 0, - N: result.N, + N: result.N, + modified: result.NModified, } if len(result.Upserted) > 0 { lerr.UpsertedId = result.Upserted[0].Id From f19dc444219c529b01ba84ccdd574380edcffe9b Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 14 Sep 2015 17:34:35 -0300 Subject: [PATCH 177/305] testdb: only rsets with primaries may be healthy. --- testdb/init.js | 8 +++++++- testdb/wait.js | 9 +++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/testdb/init.js b/testdb/init.js index 8e5d80115..7f3a9f0ea 100644 --- a/testdb/init.js +++ b/testdb/init.js @@ -79,14 +79,21 @@ function configAuth() { function countHealthy(rs) { var status = rs.runCommand({replSetGetStatus: 1}) var count = 0 + var primary = 0 if (typeof status.members != "undefined") { for (var i = 0; i != status.members.length; i++) { var m = status.members[i] if (m.health == 1 && (m.state == 1 || m.state == 2)) { count += 1 + if (m.state == 1) { + primary = 1 + } } } } + if (primary == 0) { + count = 0 + } return count } @@ -96,7 +103,6 @@ for (var i = 0; i != 60; i++) { var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a) print("Replica sets have", count, "healthy nodes.") if (count == totalRSMembers) { - sleep(2000) configShards() configAuth() quit(0) diff --git a/testdb/wait.js b/testdb/wait.js index de0d66075..fbde0749f 100644 --- a/testdb/wait.js +++ b/testdb/wait.js @@ -32,14 +32,21 @@ for (var i = 0; i != 60; i++) { function countHealthy(rs) { var status = rs.runCommand({replSetGetStatus: 1}) var count = 0 + var primary = 0 if (typeof status.members != "undefined") { for (var i = 0; i != status.members.length; i++) { var m = status.members[i] if (m.health == 1 && (m.state == 1 || m.state == 2)) { count += 1 + if (m.state == 1) { + primary = 1 + } } } } + if (primary == 0) { + count = 0 + } return count } @@ -56,3 +63,5 @@ for (var i = 0; i != 60; i++) { print("Replica sets didn't sync up properly.") quit(12) + +// vim:ts=4:sw=4:et From c39bc0b04177768f133b6d17c4ae6ef32a883c32 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 14 Sep 2015 21:13:10 -0300 Subject: [PATCH 178/305] Support all the new MongoDB read preference modes. --- cluster.go | 18 ++-- cluster_test.go | 279 +++++++++++++++++++++++++++++++++++++++++++++--- server.go | 8 +- session.go | 37 ++++--- 4 files changed, 301 insertions(+), 41 deletions(-) diff --git a/cluster.go b/cluster.go index 47e04a5f4..dcb2e8009 100644 --- a/cluster.go +++ b/cluster.go @@ -529,8 +529,8 @@ func (cluster *mongoCluster) syncServersIteration(direct bool) { } cluster.Lock() - ml := cluster.masters.Len() - logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", ml, cluster.servers.Len()-ml) + mastersLen := cluster.masters.Len() + logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", mastersLen, cluster.servers.Len()-mastersLen) // Update dynamic seeds, but only if we have any good servers. Otherwise, // leave them alone for better chances of a successful sync in the future. @@ -548,17 +548,17 @@ func (cluster *mongoCluster) syncServersIteration(direct bool) { // AcquireSocket returns a socket to a server in the cluster. If slaveOk is // true, it will attempt to return a socket to a slave server. If it is // false, the socket will necessarily be to a master server. -func (cluster *mongoCluster) AcquireSocket(slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) { +func (cluster *mongoCluster) AcquireSocket(mode Mode, slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) { var started time.Time var syncCount uint warnedLimit := false for { cluster.RLock() for { - ml := cluster.masters.Len() - sl := cluster.servers.Len() - debugf("Cluster has %d known masters and %d known slaves.", ml, sl-ml) - if ml > 0 || slaveOk && sl > 0 { + mastersLen := cluster.masters.Len() + slavesLen := cluster.servers.Len() - mastersLen + debugf("Cluster has %d known masters and %d known slaves.", mastersLen, slavesLen) + if !(slaveOk && mode == Secondary) && mastersLen > 0 || slaveOk && slavesLen > 0 { break } if started.IsZero() { @@ -578,9 +578,9 @@ func (cluster *mongoCluster) AcquireSocket(slaveOk bool, syncTimeout time.Durati var server *mongoServer if slaveOk { - server = cluster.servers.BestFit(serverTags) + server = cluster.servers.BestFit(mode, serverTags) } else { - server = cluster.masters.BestFit(nil) + server = cluster.masters.BestFit(mode, nil) } cluster.RUnlock() diff --git a/cluster_test.go b/cluster_test.go index b8ed98696..dbad407ac 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -158,7 +158,7 @@ func (s *S) TestCloneSession(c *C) { c.Assert(stats.ReceivedDocs, Equals, 1) } -func (s *S) TestSetModeStrong(c *C) { +func (s *S) TestModeStrong(c *C) { session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() @@ -195,7 +195,7 @@ func (s *S) TestSetModeStrong(c *C) { c.Assert(stats.SocketsInUse, Equals, 0) } -func (s *S) TestSetModeMonotonic(c *C) { +func (s *S) TestModeMonotonic(c *C) { // Must necessarily connect to a slave, otherwise the // master connection will be available first. session, err := mgo.Dial("localhost:40012") @@ -206,20 +206,19 @@ func (s *S) TestSetModeMonotonic(c *C) { c.Assert(session.Mode(), Equals, mgo.Monotonic) - result := M{} + var result struct{ IsMaster bool } cmd := session.DB("admin").C("$cmd") err = cmd.Find(M{"ismaster": 1}).One(&result) c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, false) + c.Assert(result.IsMaster, Equals, false) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) - result = M{} err = cmd.Find(M{"ismaster": 1}).One(&result) c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, true) + c.Assert(result.IsMaster, Equals, true) // Wait since the sync also uses sockets. for len(session.LiveServers()) != 3 { @@ -238,7 +237,7 @@ func (s *S) TestSetModeMonotonic(c *C) { c.Assert(stats.SocketsInUse, Equals, 0) } -func (s *S) TestSetModeMonotonicAfterStrong(c *C) { +func (s *S) TestModeMonotonicAfterStrong(c *C) { // Test that a strong session shifting to a monotonic // one preserves the socket untouched. @@ -271,7 +270,7 @@ func (s *S) TestSetModeMonotonicAfterStrong(c *C) { c.Assert(result["ismaster"], Equals, true) } -func (s *S) TestSetModeStrongAfterMonotonic(c *C) { +func (s *S) TestModeStrongAfterMonotonic(c *C) { // Test that shifting from Monotonic to Strong while // using a slave socket will keep the socket reserved // until the master socket is necessary, so that no @@ -311,7 +310,7 @@ func (s *S) TestSetModeStrongAfterMonotonic(c *C) { c.Assert(result["ismaster"], Equals, true) } -func (s *S) TestSetModeMonotonicWriteOnIteration(c *C) { +func (s *S) TestModeMonotonicWriteOnIteration(c *C) { // Must necessarily connect to a slave, otherwise the // master connection will be available first. session, err := mgo.Dial("localhost:40012") @@ -356,7 +355,7 @@ func (s *S) TestSetModeMonotonicWriteOnIteration(c *C) { c.Assert(i, Equals, len(ns)) } -func (s *S) TestSetModeEventual(c *C) { +func (s *S) TestModeEventual(c *C) { // Must necessarily connect to a slave, otherwise the // master connection will be available first. session, err := mgo.Dial("localhost:40012") @@ -393,7 +392,7 @@ func (s *S) TestSetModeEventual(c *C) { c.Assert(stats.SocketsInUse, Equals, 0) } -func (s *S) TestSetModeEventualAfterStrong(c *C) { +func (s *S) TestModeEventualAfterStrong(c *C) { // Test that a strong session shifting to an eventual // one preserves the socket untouched. @@ -431,7 +430,7 @@ func (s *S) TestSetModeEventualAfterStrong(c *C) { c.Assert(stats.SocketsInUse, Equals, 0) } -func (s *S) TestPrimaryShutdownStrong(c *C) { +func (s *S) TestModeStrongFallover(c *C) { if *fast { c.Skip("-fast") } @@ -472,7 +471,7 @@ func (s *S) TestPrimaryShutdownStrong(c *C) { c.Assert(err, IsNil) } -func (s *S) TestPrimaryHiccup(c *C) { +func (s *S) TestModePrimaryHiccup(c *C) { if *fast { c.Skip("-fast") } @@ -523,7 +522,7 @@ func (s *S) TestPrimaryHiccup(c *C) { c.Assert(err, IsNil) } -func (s *S) TestPrimaryShutdownMonotonic(c *C) { +func (s *S) TestModeMonotonicFallover(c *C) { if *fast { c.Skip("-fast") } @@ -566,7 +565,7 @@ func (s *S) TestPrimaryShutdownMonotonic(c *C) { c.Assert(result.Host, Not(Equals), host) } -func (s *S) TestPrimaryShutdownMonotonicWithSlave(c *C) { +func (s *S) TestModeMonotonicWithSlaveFallover(c *C) { if *fast { c.Skip("-fast") } @@ -645,7 +644,7 @@ func (s *S) TestPrimaryShutdownMonotonicWithSlave(c *C) { c.Assert(ssresult.Host, Not(Equals), master) } -func (s *S) TestPrimaryShutdownEventual(c *C) { +func (s *S) TestModeEventualFallover(c *C) { if *fast { c.Skip("-fast") } @@ -682,6 +681,192 @@ func (s *S) TestPrimaryShutdownEventual(c *C) { c.Assert(result.Host, Not(Equals), master) } +func (s *S) TestModeSecondaryJustPrimary(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.Secondary, true) + + err = session.Ping() + c.Assert(err, ErrorMatches, "no reachable servers") +} + +func (s *S) TestModeSecondaryPreferredJustPrimary(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.SecondaryPreferred, true) + + result := &struct{ Host string }{} + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) +} + +func (s *S) TestModeSecondaryPreferredFallover(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + // Ensure secondaries are available for being picked up. + for len(session.LiveServers()) != 3 { + c.Log("Waiting for cluster sync to finish...") + time.Sleep(5e8) + } + + session.SetMode(mgo.SecondaryPreferred, true) + + result := &struct{ Host string }{} + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(supvName(result.Host), Not(Equals), "rs1a") + secondary := result.Host + + // Should connect to the primary when needed. + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + + // Wait a bit for this to be synchronized to slaves. + time.Sleep(3 * time.Second) + + // Kill the primary. + s.Stop("localhost:40011") + + // It can still talk to the selected secondary. + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(result.Host, Equals, secondary) + + // But cannot speak to the primary until reset. + coll = session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, Equals, io.EOF) + + session.Refresh() + + // Can still talk to a secondary. + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(supvName(result.Host), Not(Equals), "rs1a") + + s.StartAll() + + // Should now be able to talk to the primary again. + coll = session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) +} + +func (s *S) TestModePrimaryPreferredFallover(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.PrimaryPreferred, true) + + result := &struct{ Host string }{} + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(supvName(result.Host), Equals, "rs1a") + + // Kill the primary. + s.Stop("localhost:40011") + + // Should now fail as there was a primary socket in use already. + err = session.Run("serverStatus", result) + c.Assert(err, Equals, io.EOF) + + // Refresh so the reserved primary socket goes away. + session.Refresh() + + // Should be able to talk to the secondary. + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + + s.StartAll() + + // Should wait for the new primary to become available. + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + + // And should use the new primary in general, as it is preferred. + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(supvName(result.Host), Equals, "rs1a") +} + +func (s *S) TestModePrimaryFallover(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + session.SetSyncTimeout(3 * time.Second) + + session.SetMode(mgo.Primary, true) + + result := &struct{ Host string }{} + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(supvName(result.Host), Equals, "rs1a") + + // Kill the primary. + s.Stop("localhost:40011") + + session.Refresh() + + err = session.Ping() + c.Assert(err, ErrorMatches, "no reachable servers") +} + +func (s *S) TestModeSecondary(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.Secondary, true) + + result := &struct{ Host string }{} + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(supvName(result.Host), Not(Equals), "rs1a") + secondary := result.Host + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(result.Host, Equals, secondary) +} + func (s *S) TestPreserveSocketCountOnSync(c *C) { if *fast { c.Skip("-fast") @@ -1522,6 +1707,68 @@ func (s *S) TestNearestSecondary(c *C) { } } +func (s *S) TestNearestServer(c *C) { + defer mgo.HackPingDelay(300 * time.Millisecond)() + + rs1a := "127.0.0.1:40011" + rs1b := "127.0.0.1:40012" + rs1c := "127.0.0.1:40013" + + session, err := mgo.Dial(rs1a) + c.Assert(err, IsNil) + defer session.Close() + + s.Freeze(rs1a) + s.Freeze(rs1b) + + // Extra delay to ensure the first two servers get penalized. + time.Sleep(500 * time.Millisecond) + + // Release them. + s.Thaw(rs1a) + s.Thaw(rs1b) + + // Wait for everyone to come up. + for len(session.LiveServers()) != 3 { + c.Log("Waiting for all servers to be alive...") + time.Sleep(100 * time.Millisecond) + } + + session.SetMode(mgo.Nearest, true) + var result struct{ Host string } + + // See which server picks the line, several times to avoid chance. + for i := 0; i < 10; i++ { + session.Refresh() + err = session.Run("serverStatus", &result) + c.Assert(err, IsNil) + c.Assert(hostPort(result.Host), Equals, hostPort(rs1c)) + } + + if *fast { + // Don't hold back for several seconds. + return + } + + // Now hold the two secondaries for long enough to penalize them. + s.Freeze(rs1b) + s.Freeze(rs1c) + time.Sleep(5 * time.Second) + s.Thaw(rs1b) + s.Thaw(rs1c) + + // Wait for the ping to be processed. + time.Sleep(500 * time.Millisecond) + + // Repeating the test should now pick the primary server consistently. + for i := 0; i < 10; i++ { + session.Refresh() + err = session.Run("serverStatus", &result) + c.Assert(err, IsNil) + c.Assert(hostPort(result.Host), Equals, hostPort(rs1a)) + } +} + func (s *S) TestConnectCloseConcurrency(c *C) { restore := mgo.HackPingDelay(500 * time.Millisecond) defer restore() diff --git a/server.go b/server.go index 9007d7e68..1e36b995c 100644 --- a/server.go +++ b/server.go @@ -399,7 +399,7 @@ func (servers *mongoServers) Empty() bool { // BestFit returns the best guess of what would be the most interesting // server to perform operations on at this point in time. -func (servers *mongoServers) BestFit(serverTags []bson.D) *mongoServer { +func (servers *mongoServers) BestFit(mode Mode, serverTags []bson.D) *mongoServer { var best *mongoServer for _, next := range servers.slice { if best == nil { @@ -416,9 +416,9 @@ func (servers *mongoServers) BestFit(serverTags []bson.D) *mongoServer { switch { case serverTags != nil && !next.info.Mongos && !next.hasTags(serverTags): // Must have requested tags. - case next.info.Master != best.info.Master: - // Prefer slaves. - swap = best.info.Master + case next.info.Master != best.info.Master && mode != Nearest: + // Prefer slaves, unless the mode is PrimaryPreferred. + swap = (mode == PrimaryPreferred) != best.info.Master case absDuration(next.pingValue-best.pingValue) > 15*time.Millisecond: // Prefer nearest server. swap = next.pingValue < best.pingValue diff --git a/session.go b/session.go index 3de0eaf0e..4dc43f039 100644 --- a/session.go +++ b/session.go @@ -47,9 +47,20 @@ import ( type Mode int const ( - Eventual Mode = 0 - Monotonic Mode = 1 - Strong Mode = 2 + // Relevant documentation on read preference modes: + // + // http://docs.mongodb.org/manual/reference/read-preference/ + // + Primary Mode = 2 // Default mode. All operations read from the current replica set primary. + PrimaryPreferred Mode = 3 // Read from the primary if available. Read from the secondary otherwise. + Secondary Mode = 4 // Read from one of the nearest secondary members of the replica set. + SecondaryPreferred Mode = 5 // Read from one of the nearest secondaries if available. Read from primary otherwise. + Nearest Mode = 6 // Read from one of the nearest members, irrespective of it being primary or secondary. + + // Read preference modes are specific to mgo: + Eventual Mode = 0 // Same as Nearest, but may change servers between reads. + Monotonic Mode = 1 // Same as SecondaryPreferred before first write. Same as Primary after first write. + Strong Mode = 2 // Same as Primary. ) // When changing the Session type, check if newSession and copySession @@ -3941,14 +3952,16 @@ func (s *Session) acquireSocket(slaveOk bool) (*mongoSocket, error) { // Read-only lock to check for previously reserved socket. s.m.RLock() - if s.masterSocket != nil { - socket := s.masterSocket + // If there is a slave socket reserved and its use is acceptable, take it as long + // as there isn't a master socket which would be preferred by the read preference mode. + if s.slaveSocket != nil && s.slaveOk && slaveOk && (s.masterSocket == nil || s.consistency != PrimaryPreferred && s.consistency != Monotonic) { + socket := s.slaveSocket socket.Acquire() s.m.RUnlock() return socket, nil } - if s.slaveSocket != nil && s.slaveOk && slaveOk { - socket := s.slaveSocket + if s.masterSocket != nil { + socket := s.masterSocket socket.Acquire() s.m.RUnlock() return socket, nil @@ -3960,17 +3973,17 @@ func (s *Session) acquireSocket(slaveOk bool) (*mongoSocket, error) { s.m.Lock() defer s.m.Unlock() + if s.slaveSocket != nil && s.slaveOk && slaveOk && (s.masterSocket == nil || s.consistency != PrimaryPreferred && s.consistency != Monotonic) { + s.slaveSocket.Acquire() + return s.slaveSocket, nil + } if s.masterSocket != nil { s.masterSocket.Acquire() return s.masterSocket, nil } - if s.slaveSocket != nil && s.slaveOk && slaveOk { - s.slaveSocket.Acquire() - return s.slaveSocket, nil - } // Still not good. We need a new socket. - sock, err := s.cluster().AcquireSocket(slaveOk && s.slaveOk, s.syncTimeout, s.sockTimeout, s.queryConfig.op.serverTags, s.poolLimit) + sock, err := s.cluster().AcquireSocket(s.consistency, slaveOk && s.slaveOk, s.syncTimeout, s.sockTimeout, s.queryConfig.op.serverTags, s.poolLimit) if err != nil { return nil, err } From 2f3a9b3f30bf9a18f5a0b42d843ee5919f762d19 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 21 Sep 2015 18:42:17 -0300 Subject: [PATCH 179/305] Resolve IPv4/v6 addresses concurrently. Prefer v4. Fixes #162. Updates #153. --- cluster.go | 40 +++++++++++++++++++++++++++++++++++----- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/cluster.go b/cluster.go index dcb2e8009..f97320e8d 100644 --- a/cluster.go +++ b/cluster.go @@ -425,14 +425,44 @@ func resolveAddr(addr string) (*net.TCPAddr, error) { } } - // This unfortunate hack allows having a timeout on address resolution. - conn, err := net.DialTimeout("udp4", addr, 10*time.Second) - if err != nil { + // Attempt to resolve IPv4 and v6 concurrently. + addrChan := make(chan *net.TCPAddr, 2) + for _, network := range []string{"udp4", "udp6"} { + network := network + go func() { + // The unfortunate UDP dialing hack allows having a timeout on address resolution. + conn, err := net.DialTimeout(network, addr, 10*time.Second) + if err != nil { + addrChan <- nil + } else { + addrChan <- (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr)) + conn.Close() + } + }() + } + + // Wait for the result of IPv4 and v6 resolution. Use IPv4 if available. + tcpaddr := <-addrChan + if tcpaddr == nil || len(tcpaddr.IP) != 4 { + var timeout <-chan time.Time + if tcpaddr != nil { + // Don't wait too long if an IPv6 address is known. + timeout = time.After(50 * time.Millisecond) + } + select { + case <-timeout: + case tcpaddr2 := <-addrChan: + if tcpaddr == nil || tcpaddr2 != nil { + // It's an IPv4 address or the only known address. Use it. + tcpaddr = tcpaddr2 + } + } + } + + if tcpaddr == nil { log("SYNC Failed to resolve server address: ", addr) return nil, errors.New("failed to resolve server address: " + addr) } - tcpaddr := (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr)) - conn.Close() if tcpaddr.String() != addr { debug("SYNC Address ", addr, " resolved as ", tcpaddr.String()) } From 9be224d57f036cca92520e88c32e411682385365 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 29 Sep 2015 17:45:10 +0200 Subject: [PATCH 180/305] Index name support in EnsureIndex and DropIndexName EnsureIndex will now respect the Name field during creation if it is set, and the new DropIndexName method allows dropping an index by name. Fixes #111. --- session.go | 72 +++++++++++++++++++++++++++---- session_test.go | 112 +++++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 169 insertions(+), 15 deletions(-) diff --git a/session.go b/session.go index 4dc43f039..804282389 100644 --- a/session.go +++ b/session.go @@ -993,8 +993,8 @@ type Index struct { // documents with indexed time.Time older than the provided delta. ExpireAfter time.Duration - // Name holds the stored index name. On creation this field is ignored and the index name - // is automatically computed by EnsureIndex based on the index key + // Name holds the stored index name. On creation if this field is unset it is + // computed by EnsureIndex based on the index key. Name string // Properties for spatial indexes. @@ -1202,6 +1202,10 @@ func (c *Collection) EnsureIndex(index Index) error { LanguageOverride: index.LanguageOverride, } + if index.Name != "" { + spec.Name = index.Name + } + NextField: for name, weight := range index.Weights { for i, elem := range spec.Weights { @@ -1231,17 +1235,15 @@ NextField: return err } -// DropIndex removes the index with key from the collection. +// DropIndex drops the index with the provided key from the c collection. // -// The key value determines which fields compose the index. The index ordering -// will be ascending by default. To obtain an index with a descending order, -// the field name should be prefixed by a dash (e.g. []string{"-time"}). +// See EnsureIndex for details on the accepted key variants. // // For example: // -// err := collection.DropIndex("lastname", "firstname") +// err1 := collection.DropIndex("firstField", "-secondField") +// err2 := collection.DropIndex("customIndexName") // -// See the EnsureIndex method for more details on indexes. func (c *Collection) DropIndex(key ...string) error { keyInfo, err := parseIndexKey(key) if err != nil { @@ -1271,6 +1273,60 @@ func (c *Collection) DropIndex(key ...string) error { return nil } +// DropIndexName removes the index with the provided index name. +// +// For example: +// +// err := collection.DropIndex("customIndexName") +// +func (c *Collection) DropIndexName(name string) error { + session := c.Database.Session + + session = session.Clone() + defer session.Close() + session.SetMode(Strong, false) + + c = c.With(session) + + indexes, err := c.Indexes() + if err != nil { + return err + } + + var index Index + for _, idx := range indexes { + if idx.Name == name { + index = idx + break + } + } + + if index.Name != "" { + keyInfo, err := parseIndexKey(index.Key) + if err != nil { + return err + } + + cacheKey := c.FullName + "\x00" + keyInfo.name + session.cluster().CacheIndex(cacheKey, false) + } + + + result := struct { + ErrMsg string + Ok bool + }{} + err = c.Database.Run(bson.D{{"dropIndexes", c.Name}, {"index", name}}, &result) + if err != nil { + return err + } + if !result.Ok { + return errors.New(result.ErrMsg) + } + return nil +} + + // Indexes returns a list of all indexes for the collection. // // For example, this snippet would drop all available indexes: diff --git a/session_test.go b/session_test.go index 2979068f0..67b11afd8 100644 --- a/session_test.go +++ b/session_test.go @@ -2718,6 +2718,16 @@ var indexTests = []struct { "language_override": "language", "textIndexVersion": 2, }, +}, { + mgo.Index{ + Key: []string{"cn"}, + Name: "CustomName", + }, + M{ + "name": "CustomName", + "key": M{"cn": 1}, + "ns": "mydb.mycoll", + }, }} func (s *S) TestEnsureIndex(c *C) { @@ -2737,8 +2747,13 @@ func (s *S) TestEnsureIndex(c *C) { err = coll.EnsureIndex(test.index) c.Assert(err, IsNil) + expectedName := test.index.Name + if expectedName == "" { + expectedName, _ = test.expected["name"].(string) + } + obtained := M{} - err = idxs.Find(M{"name": test.expected["name"]}).One(obtained) + err = idxs.Find(M{"name": expectedName}).One(obtained) c.Assert(err, IsNil) delete(obtained, "v") @@ -2750,8 +2765,13 @@ func (s *S) TestEnsureIndex(c *C) { c.Assert(obtained, DeepEquals, test.expected) - err = coll.DropIndex(test.index.Key...) - c.Assert(err, IsNil) + if test.index.Name == "" { + err = coll.DropIndex(test.index.Key...) + c.Assert(err, IsNil) + } else { + err = coll.DropIndexName(test.index.Name) + c.Assert(err, IsNil) + } } } @@ -2851,24 +2871,57 @@ func (s *S) TestEnsureIndexDropIndex(c *C) { c.Assert(err, IsNil) sysidx := session.DB("mydb").C("system.indexes") - dummy := &struct{}{} - err = sysidx.Find(M{"name": "a_1"}).One(dummy) + err = sysidx.Find(M{"name": "a_1"}).One(nil) c.Assert(err, IsNil) - err = sysidx.Find(M{"name": "b_1"}).One(dummy) + err = sysidx.Find(M{"name": "b_1"}).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) err = coll.DropIndex("a") c.Assert(err, IsNil) - err = sysidx.Find(M{"name": "a_1"}).One(dummy) + err = sysidx.Find(M{"name": "a_1"}).One(nil) c.Assert(err, Equals, mgo.ErrNotFound) err = coll.DropIndex("a") c.Assert(err, ErrorMatches, "index not found.*") } +func (s *S) TestEnsureIndexDropIndexName(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.EnsureIndexKey("a") + c.Assert(err, IsNil) + + err = coll.EnsureIndex(mgo.Index{Key: []string{"b"}, Name: "a"}) + c.Assert(err, IsNil) + + err = coll.DropIndexName("a") + c.Assert(err, IsNil) + + sysidx := session.DB("mydb").C("system.indexes") + + err = sysidx.Find(M{"name": "a_1"}).One(nil) + c.Assert(err, IsNil) + + err = sysidx.Find(M{"name": "a"}).One(nil) + c.Assert(err, Equals, mgo.ErrNotFound) + + err = coll.DropIndexName("a_1") + c.Assert(err, IsNil) + + err = sysidx.Find(M{"name": "a_1"}).One(nil) + c.Assert(err, Equals, mgo.ErrNotFound) + + err = coll.DropIndexName("a_1") + c.Assert(err, ErrorMatches, "index not found.*") +} + func (s *S) TestEnsureIndexCaching(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) @@ -2947,6 +3000,51 @@ func (s *S) TestEnsureIndexGetIndexes(c *C) { c.Assert(indexes[4].Key, DeepEquals, []string{"$2d:d"}) } +func (s *S) TestEnsureIndexNameCaching(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"}) + c.Assert(err, IsNil) + + mgo.ResetStats() + + // Second EnsureIndex should be cached and do nothing. + err = coll.EnsureIndexKey("a") + c.Assert(err, IsNil) + + err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"}) + c.Assert(err, IsNil) + + stats := mgo.GetStats() + c.Assert(stats.SentOps, Equals, 0) + + + // Resetting the cache should make it contact the server again. + session.ResetIndexCache() + + err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"}) + c.Assert(err, IsNil) + + stats = mgo.GetStats() + c.Assert(stats.SentOps > 0, Equals, true) + + // Dropping the index should also drop the cached index key. + err = coll.DropIndexName("custom") + c.Assert(err, IsNil) + + mgo.ResetStats() + + err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"}) + c.Assert(err, IsNil) + + stats = mgo.GetStats() + c.Assert(stats.SentOps > 0, Equals, true) +} + func (s *S) TestEnsureIndexEvalGetIndexes(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) From d27411252e221072810424f7030af398b11beabb Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 29 Sep 2015 23:08:18 +0200 Subject: [PATCH 181/305] Renew expired SSL auth test keys. --- testdb/client.pem | 84 +++++++++++++++++++++-------------------- testdb/newkey.txt | 7 ++++ testdb/server.pem | 79 +++++++++++++++++++++++--------------- testdb/supervisord.conf | 2 +- 4 files changed, 100 insertions(+), 72 deletions(-) create mode 100644 testdb/newkey.txt diff --git a/testdb/client.pem b/testdb/client.pem index cc57eec7a..50cb6f32d 100644 --- a/testdb/client.pem +++ b/testdb/client.pem @@ -1,44 +1,48 @@ -----BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAwE2sl8YeTTSetwo9kykJ5mCZ/FtfPtn/0X4nOlTM2Qc/uWzA -sjSYoSV4UkuOiWjKQQH2EDeXaltshOo7F0oCY5ozVeQe+phe987iKTvLtf7NoXJD -KqNqR4Kb4ylbCrEky7+Xvw6yrrqw8qgWy+9VsrilR3q8LsETE9SBMtfp3BUaaNQp -peNm+iAhx3uZSv3mdzSLFSA/o61kAyG0scLExYDjo/7xyMNQoloLvNmx4Io160+y -lOz077/qqU620tmuDLRz1QdxK/bptmXTnsBCRxl+U8nzbwVZgWFENhXplbcN+SjN -LhdnvTiU2qFhgZmc7ZtCKdPIpx3W6pH9bx7kTwIDAQABAoIBAQCOQygyo8NY9FuS -J8ZDrvF+9+oS8fm1QorpDT2x/ngI+j7fSyAG9bgQRusLXpAVAWvWyb+iYa3nZbkT -X0DVys+XpcTifr+YPc7L3sYbIPxkKBsxm5kq2vfN7Uart7V9ZG1HOfblxdbUQpKT -AVzUA7vPWqATEC5VHEqjuerWlTqRr9YLZE/nkE7ICLISqdl4WDYfUYJwoXWfYkXQ -Lfl5Qh2leyri9S3urvDrhnURTQ1lM182IbTRA+9rUiFzsRW+9U4HPY7Ao2Itp8dr -GRP4rcq4TP+NcF0Ky64cNfKXCWmwqTBRFYAlTD6gwjN/s2BzvWD/2nlnc0DYAXrB -TgFCPk7xAoGBAOwuHICwwTxtzrdWjuRGU3RxL4eLEXedtL8yon/yPci3e+8eploX -1Fp0rEK2gIGDp/X8DiOtrKXih8XPusCwE/I3EvjHdI0RylLZXTPOp1Ei21dXRsiV -YxcF+d5s11q5tJtF+5ISUeIz2iSc9Z2LBnb8JDK1jcCRa5Q212q3ZWW5AoGBANBw -9CoMbxINLG1q0NvOOSwMKDk2OB+9JbQ5lwF4ijZl2I6qRoOCzQ3lBs0Qv/AeBjNR -SerDs2+eWnIBUbgSdiqcOKnXAI/Qbl1IkVFYV/2g9m6dgu1fNWNBv8NIYDHCLfDx -W3fpO5JMf+iE5XC4XqCfSBIME2yxPSGQjal6tB5HAoGAddYDzolhv/6hVoPPQ0F7 -PeuC5UOTcXSzy3k97kw0W0KAiStnoCengYIYuChKMVQ4ptgdTdvG+fTt/NnJuX2g -Vgb4ZjtNgVzQ70kX4VNH04lqmkcnP8iY6dHHexwezls9KwNdouGVDSEFw6K0QOgu -T4s5nDtNADkNzaMXE11xL7ECgYBoML3rstFmTY1ymB0Uck3jtaP5jR+axdpt7weL -Zax4qooILhcXL6++DUhMAt5ecTOaPTzci7xKw/Xj3MLzZs8IV5R/WQhf2sj/+gEh -jy5UijwEaNmEO74dAkWPoMLsvGpocMzO8JeldnXNTXi+0noCgfvtgXnIMAQlnfMh -z0LviwKBgQCg5KR9JC4iuKses7Kfv2YelcO8vOZkRzBu3NdRWMsiJQC+qfetgd57 -RjRjlRWd1WCHJ5Kmx3hkUaZZOrX5knqfsRW3Nl0I74xgWl7Bli2eSJ9VWl59bcd6 -DqphhY7/gcW+QZlhXpnqbf0W8jB2gPhTYERyCBoS9LfhZWZu/11wuQ== +MIIEogIBAAKCAQEAtFIkIZk/h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7 +wQidZwLul+cyDfPRDzzo3za4GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJ +r4f/tItg0riOEBbLslQDzNTtCAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJ +Q6DYEQgCa2BTIWq0Uw3WO20M3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AI +KBhAZwa7vND0RaRYqpO9kyZFzh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5 +Hx+ftNTXnl/69TnxG44BP8M88ZfDWlpzwpsTXwIDAQABAoIBADzCjOAxZkHfuZyu +La0wTHXpkEfXdJ6ltagq5WY7P6MlOYwcRoK152vlhgXzZl9jL6ely4YjRwec0swq +KdwezpV4fOGVPmuTuw45bx47HEnr/49ZQ4p9FgF9EYQPofbz53FQc/NaMACJcogv +bn+osniw+VMFrOVNmGLiZ5p3Smk8zfXE7GRHO8CL5hpWLWO/aK236yytbfWOjM2f +Pr76ICb26TPRNzYaYUEThU6DtgdLU8pLnJ6QKKaDsjn+zqQzRa+Nvc0c0K8gvWwA +Afq7t0325+uMSwfpLgCOFldcaZQ5uvteJ0CAVRq1MvStnSHBmMzPlgS+NzsDm6lp +QH5+rIkCgYEA5j3jrWsv7TueTNbk8Hr/Zwywc+fA2Ex0pBURBHlHyc6ahSXWSCqo +DtvRGX0GDoK1lCfaIf1qb/DLlGaoHpkEeqcNhXQ+hHs+bZAxfbfBY9+ikit5ZTtl +QN1tIlhaiyLDnwhkpi/hMw1tiouxJUf84Io61z0sCL4hyZSPCpjn0H0CgYEAyH6F +Mwl+bCD3VDL/Dr5WSoOr2B/M3bF5SfvdStwy2IPcDJ716je1Ud/2qFCnKGgqvWhJ ++HU15c7CjAWo7/pXq2/pEMD8fDKTYww4Hr4p6duEA7DpbOGkwcUX8u3eknxUWT9F +jOSbTCvAxuDOC1K3AElyMxVVTNUrFFe8M84R9gsCgYBXmb6RkdG3WlKde7m5gaLB +K4PLZabq5RQQBe/mmtpkfxYtiLrh1FEC7kG9h+MRDExX5V3KRugDVUOv3+shUSjy +HbM4ToUm1NloyE78PTj4bfMl2CKlEJcyucy3H5S7kWuKi5/31wnA6d/+sa2huKUP +Lai7kgu5+9VRJBPUfV7d5QKBgCnhk/13TDtWH5QtGu5/gBMMskbxTaA5xHZZ8H4E +xXJJCRxx0Dje7jduK145itF8AQGT2W/XPC0HJciOHh4TE2EyfWMMjTF8dyFHmimB +28uIGWmT+Q7Pi9UWUMxkOAwtgIksGGE4F+CvexOQPjpLSwL6VKqrGCh2lwsm0J+Z +ulLFAoGAKlC93c6XEj1A31c1+usdEhUe9BrmTqtSYLYpDNpeMLdZ3VctrAZuOQPZ +4A4gkkQkqqwZGBYYSEqwqiLU6MsBdHPPZ9u3JXLLOQuh1xGeaKylvHj7qx6iT0Xo +I+FkJ6/3JeMgOina/+wlzD4oyQpqR4Mnh+TuLkDfQTgY+Lg0WPk= -----END RSA PRIVATE KEY----- -----BEGIN CERTIFICATE----- -MIICyTCCAjKgAwIBAgIBATANBgkqhkiG9w0BAQUFADBcMQswCQYDVQQGEwJHTzEM -MAoGA1UECBMDTUdPMQwwCgYDVQQHEwNNR08xDDAKBgNVBAoTA01HTzEPMA0GA1UE -CxMGU2VydmVyMRIwEAYDVQQDEwlsb2NhbGhvc3QwHhcNMTQwOTI0MTQwMzUzWhcN -MTUwOTI0MTQwMzUzWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECBMDTUdPMQwwCgYD -VQQHEwNNR08xDDAKBgNVBAoTA01HTzEPMA0GA1UECxMGQ2xpZW50MRIwEAYDVQQD -Ewlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDATayX -xh5NNJ63Cj2TKQnmYJn8W18+2f/Rfic6VMzZBz+5bMCyNJihJXhSS46JaMpBAfYQ -N5dqW2yE6jsXSgJjmjNV5B76mF73zuIpO8u1/s2hckMqo2pHgpvjKVsKsSTLv5e/ -DrKuurDyqBbL71WyuKVHerwuwRMT1IEy1+ncFRpo1Cml42b6ICHHe5lK/eZ3NIsV -ID+jrWQDIbSxwsTFgOOj/vHIw1CiWgu82bHgijXrT7KU7PTvv+qpTrbS2a4MtHPV -B3Er9um2ZdOewEJHGX5TyfNvBVmBYUQ2FemVtw35KM0uF2e9OJTaoWGBmZztm0Ip -08inHdbqkf1vHuRPAgMBAAGjFzAVMBMGA1UdJQQMMAoGCCsGAQUFBwMCMA0GCSqG -SIb3DQEBBQUAA4GBAJZD7idSIRzhGlJYARPKWnX2CxD4VVB0F5cH5Mlc2YnoUSU/ -rKuPZFuOYND3awKqez6K3rNb3+tQmNitmoOT8ImmX1uJKBo5w9tuo4B2MmLQcPMk -3fhPePuQCjtlArSmKVrNTrYPkyB9NwKS6q0+FzseFTw9ZJUIKiO9sSjMe+HP +MIIDLjCCAhYCAQcwDQYJKoZIhvcNAQELBQAwXDELMAkGA1UEBhMCR08xDDAKBgNV +BAgMA01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNl +cnZlcjESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE1MDkyOTA4NDAzMFoYDzIxMTUw +OTA1MDg0MDMwWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECAwDTUdPMQwwCgYDVQQH +DANNR08xDDAKBgNVBAoMA01HTzEPMA0GA1UECwwGQ2xpZW50MRIwEAYDVQQDDAls +b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0UiQhmT+H +4IIqrn8SMESDzvcl3rwImwUoRIHlmXkovCIZCbvBCJ1nAu6X5zIN89EPPOjfNrgZ +616wPgVV/YEQXp+D7+jTAsE5s8JepRXFdecResmvh/+0i2DSuI4QFsuyVAPM1O0I +AQ5EKgr0weZZmsX6lhPD4uYehV4DxDE0i/8aTAlDoNgRCAJrYFMharRTDdY7bQzd +7ZYab/pK/3DSmOKxl/AFJ8Enmcj9w1bsvy0fgAgoGEBnBru80PRFpFiqk72TJkXO +Hx7zcYFpegtKPbAreTCModaCnjP//fskCp4XJrkfH5+01NeeX/r1OfEbjgE/wzzx +l8NaWnPCmxNfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFwYpje3dCLDOIHYjd+5 +CpFOEb+bJsS4ryqm/NblTjIhCLo58hNpMsBqdJHRbHAFRCOE8fvY8yiWtdHeFZcW +DgVRAXfHONLtN7faZaZQnhy/YzOhLfC/8dUMB0gQA8KXhBCPZqQmexE28AfkEO47 +PwICAxIWINfjm5VnFMkA3b7bDNLHon/pev2m7HqVQ3pRUJQNK3XgFOdDgRrnuXpR +OKAfHORHVGTh1gf1DVwc0oM+0gnkSiJ1VG0n5pE3zhZ24fmZxu6JQ6X515W7APQI +/nKVH+f1Fo+ustyTNLt8Bwxi1XmwT7IXwnkVSE9Ff6VejppXRF01V0aaWsa3kU3r +z3A= -----END CERTIFICATE----- + diff --git a/testdb/newkey.txt b/testdb/newkey.txt new file mode 100644 index 000000000..e1727564a --- /dev/null +++ b/testdb/newkey.txt @@ -0,0 +1,7 @@ +openssl req -newkey rsa:2048 -new -x509 -days 36500 -nodes -out server.crt -keyout server.key +cat server.key server.crt > server.pem +openssl genrsa -out client.key 2048 +openssl req -key client.key -new -out client.req +openssl x509 -req -in client.req -CA server.crt -CAkey server.key -days 36500 -CAserial file.srl -out client.crt +cat client.key client.crt > client.pem +#openssl x509 -extensions usr_cert -req -in client.req -CA server.crt -CAkey server.key -days 36500 -CAserial file.srl -out client.pem diff --git a/testdb/server.pem b/testdb/server.pem index 16fbef16b..487b92d66 100644 --- a/testdb/server.pem +++ b/testdb/server.pem @@ -1,33 +1,50 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD9PlbW9OwAX7aB +Nc/UkrKCMztP/YFceIlzoNEpWOWyFO09i4LeulN10Obp3zp3XstYSj5PZsJPgzNk +mFIYC6f2l4W96F0SVEyvnvGzuPlXVBiPBp0xMGQtC4ogCDpwhI3Uo9TOlRNQqxYi +xvH3uwDS3TCIQ+J9E5vud9IwhVCx3P9z0uVjZQ1gj7kaJTzyIMaDbCt2xrdT6XYb +YpLH/24TdzmIWSLpt16q4uJaYFnqvF+hot7iCTUg2OJ8qyw2yfaLe4niLhOavc9R +ziTHHSYwq5Yrcd2VCwyq2mr74dCYdK+w+tuByOX0fI8mIcOygn7g7ltu1wTnWhBs +uHVtkCFjAgMBAAECggEASRAfRc1L+Z+jrAu2doIMdnwJdL6S//bW0UFolyFKw+I9 +wC/sBg6D3c3zkS4SVDZJPKPO7mGbVg1oWnGH3eAfCYoV0ACmOY+QwGp/GXcYmRVu +MHWcDIEFpelaZHt7QNM9iEfsMd3YwMFblZUIYozVZADk66uKQMPTjS2Muur7qRSi +wuVfSmsVZ5afH3B1Tr96BbmPsHrXLjvNpjO44k2wrnnSPQjUL7+YiZPvtnNW8Fby +yuo2uoAyjg3+68PYZftOvvNneMsv1uyGlUs6Bk+DVWaqofIztWFdFZyXbHnK2PTk +eGQt5EsL+RwIck5eoqd5vSE+KyzhhydL0zcpngVQoQKBgQD/Yelvholbz5NQtSy3 +ZoiW1y7hL1BKzvVNHuAMKJ5WOnj5szhjhKxt/wZ+hk0qcAmlV9WAPbf4izbEwPRC +tnMBQzf1uBxqqbLL6WZ4YAyGrcX3UrT7GXsGfVT4zJjz7oYSw8aPircecw5V4exB +xa4NF+ki8IycXSkHwvW2R56fRwKBgQD92xpxXtte/rUnmENbQmr0aKg7JEfMoih6 +MdX+f6mfgjMmqj+L4jPTI8/ql8HEy13SQS1534aDSHO+nBqBK5aHUCRMIgSLnTP9 +Xyx9Ngg03SZIkPfykqxQmnZgWkTPMhYS+K1Ao9FGVs8W5jVi7veyAdhHptAcxhP3 +IuxvrxVTBQKBgQCluMPiu0snaOwP04HRAZhhSgIB3tIbuXE1OnPpb/JPwmH+p25Q +Jig+uN9d+4jXoRyhTv4c2fAoOS6xPwVCxWKbzyLhMTg/fx+ncy4rryhxvRJaDDGl +QEO1Ul9xlFMs9/vI8YJIY5uxBrimwpStmbn4hSukoLSeQ1X802bfglpMwQKBgD8z +GTY4Y20XBIrDAaHquy32EEwJEEcF6AXj+l7N8bDgfVOW9xMgUb6zH8RL29Xeu5Do +4SWCXL66fvZpbr/R1jwB28eIgJExpgvicfUKSqi+lhVi4hfmJDg8/FOopZDf61b1 +ykxZfHSCkDQnRAtJaylKBEpyYUWImtfgPfTgJfLxAoGAc8A/Tl2h/DsdTA+cA5d7 +1e0l64m13ObruSWRczyru4hy8Yq6E/K2rOFw8cYCcFpy24NqNlk+2iXPLRpWm2zt +9R497zAPvhK/bfPXjvm0j/VjB44lvRTC9hby/RRMHy9UJk4o/UQaD+1IodxZovvk +SruEA1+5bfBRMW0P+h7Qfe4= +-----END PRIVATE KEY----- -----BEGIN CERTIFICATE----- -MIIC+DCCAmGgAwIBAgIJAJ5pBAq2HXAsMA0GCSqGSIb3DQEBBQUAMFwxCzAJBgNV -BAYTAkdPMQwwCgYDVQQIEwNNR08xDDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdP -MQ8wDQYDVQQLEwZTZXJ2ZXIxEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xNDA5MjQx -MzUxMTBaFw0xNTA5MjQxMzUxMTBaMFwxCzAJBgNVBAYTAkdPMQwwCgYDVQQIEwNN -R08xDDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdPMQ8wDQYDVQQLEwZTZXJ2ZXIx -EjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA -pQ5wO2L23xMI4PzpVt/Ftvez82IvA9amwr3fUd7RjlYwiFsFeMnG24a4CUoOeKF0 -fpQWc9rmCs0EeP5ofZ2otOsfxoVWXZAZWdgauuwlYB6EeFaAMH3fxVH3IiH+21RR -q2w9sH/s4fqh5stavUfyPdVmCcb8NW0jD8jlqniJL0kCAwEAAaOBwTCBvjAdBgNV -HQ4EFgQUjyVWGMHBrmPDGwCY5VusHsKIpzIwgY4GA1UdIwSBhjCBg4AUjyVWGMHB -rmPDGwCY5VusHsKIpzKhYKReMFwxCzAJBgNVBAYTAkdPMQwwCgYDVQQIEwNNR08x -DDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdPMQ8wDQYDVQQLEwZTZXJ2ZXIxEjAQ -BgNVBAMTCWxvY2FsaG9zdIIJAJ5pBAq2HXAsMAwGA1UdEwQFMAMBAf8wDQYJKoZI -hvcNAQEFBQADgYEAa65TgDKp3SRUDNAILSuQOCEbenWh/DMPL4vTVgo/Dxd4emoO -7i8/4HMTa0XeYIVbAsxO+dqtxqt32IcV7DurmQozdUZ7q0ueJRXon6APnCN0IqPC -sF71w63xXfpmnvTAfQXi7x6TUAyAQ2nScHExAjzc000DF1dO/6+nIINqNQE= +MIIDjTCCAnWgAwIBAgIJAMW+wDfcdzC+MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAkdPMQwwCgYDVQQIDANNR08xDDAKBgNVBAcMA01HTzEMMAoGA1UECgwDTUdP +MQ8wDQYDVQQLDAZTZXJ2ZXIxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNTA5Mjkw +ODM0MTBaGA8yMTE1MDkwNTA4MzQxMFowXDELMAkGA1UEBhMCR08xDDAKBgNVBAgM +A01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNlcnZl +cjESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA/T5W1vTsAF+2gTXP1JKygjM7T/2BXHiJc6DRKVjlshTtPYuC3rpTddDm +6d86d17LWEo+T2bCT4MzZJhSGAun9peFvehdElRMr57xs7j5V1QYjwadMTBkLQuK +IAg6cISN1KPUzpUTUKsWIsbx97sA0t0wiEPifROb7nfSMIVQsdz/c9LlY2UNYI+5 +GiU88iDGg2wrdsa3U+l2G2KSx/9uE3c5iFki6bdequLiWmBZ6rxfoaLe4gk1INji +fKssNsn2i3uJ4i4Tmr3PUc4kxx0mMKuWK3HdlQsMqtpq++HQmHSvsPrbgcjl9HyP +JiHDsoJ+4O5bbtcE51oQbLh1bZAhYwIDAQABo1AwTjAdBgNVHQ4EFgQUhku/u9Kd +OAc1L0OR649vCCuQT+0wHwYDVR0jBBgwFoAUhku/u9KdOAc1L0OR649vCCuQT+0w +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAw7Bgw3hlWXWSZjLhnSOu +2mW/UJ2Sj31unHngmgtXwW/04cyzoULb+qmzPe/Z06QMgGIsku1jFBcu0JabQtUG +TyalpfW77tfnvz238CYdImYwE9ZcIGuZGfhs6ySFN9XpW43B8YM7R8wTNPvOcSPw +nfjqU6kueN4TTspQg9cKhDss5DcMTIdgJgLbITXhIsrCu6GlKOgtX3HrdMGpQX7s +UoMXtZVG8pK32vxKWGTZ6DPqESeKjjq74NbYnB3H5U/kDU2dt7LF90C/Umdr9y+C +W2OJb1WBrf6RTcbt8D6d7P9kOfLPOtyn/cbaA/pfXBMQMHqr7XNXzjnaNU+jB7hL +yQ== -----END CERTIFICATE----- ------BEGIN RSA PRIVATE KEY----- -MIICWwIBAAKBgQClDnA7YvbfEwjg/OlW38W297PzYi8D1qbCvd9R3tGOVjCIWwV4 -ycbbhrgJSg54oXR+lBZz2uYKzQR4/mh9nai06x/GhVZdkBlZ2Bq67CVgHoR4VoAw -fd/FUfciIf7bVFGrbD2wf+zh+qHmy1q9R/I91WYJxvw1bSMPyOWqeIkvSQIDAQAB -AoGABA9S22MXx2zkbwRJiQWAC3wURQxJM8L33xpkf9MHPIUKNJBolgwAhC3QIQpd -SMJP5z0lQDxGJEXesksvrsdN+vsgbleRfQsAIcY/rEhr9h8m6auM08f+69oIX32o -aTOWJJRofjbgzE5c/RijqhIaYGdq54a0EE9mAaODwZoa2/ECQQDRGrIRI5L3pdRA -yifDKNjvAFOk6TbdGe+J9zHFw4F7bA2In/b+rno9vrj+EanOevD8LRLzeFshzXrG -WQFzZ69/AkEAyhLSY7WNiQTeJWCwXawVnoSl5AMSRYFA/A2sEUokfORR5BS7gqvL -mmEKmvslnZp5qlMtM4AyrW2OaoGvE6sFNwJACB3xK5kl61cUli9Cu+CqCx0IIi6r -YonPMpvV4sdkD1ZycAtFmz1KoXr102b8IHfFQwS855aUcwt26Jwr4j70IQJAXv9+ -PTXq9hF9xiCwiTkPaNh/jLQM8PQU8uoSjIZIpRZJkWpVxNay/z7D15xeULuAmxxD -UcThDjtFCrkw75Qk/QJAFfcM+5r31R1RrBGM1QPKwDqkFTGsFKnMWuS/pXyLTTOv -I+In9ZJyA/R5zKeJZjM7xtZs0ANU9HpOpgespq6CvA== ------END RSA PRIVATE KEY----- diff --git a/testdb/supervisord.conf b/testdb/supervisord.conf index a4d634ec6..95b7f9e4b 100644 --- a/testdb/supervisord.conf +++ b/testdb/supervisord.conf @@ -20,7 +20,7 @@ command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssiz command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db2 --bind_ip=127.0.0.1 --port 40002 --auth [program:db3] -command = mongod -nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --dbpath %(here)s/db3 --bind_ip=127.0.0.1 --port 40003 --auth --sslMode preferSSL --sslCAFile %(here)s/server.pem --sslPEMKeyFile %(here)s/server.pem +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --dbpath %(here)s/db3 --bind_ip=127.0.0.1 --port 40003 --auth --sslMode preferSSL --sslCAFile %(here)s/server.pem --sslPEMKeyFile %(here)s/server.pem [program:rs1a] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1a --bind_ip=127.0.0.1 --port 40011 From aaa62963e56e107faa677882ec3dd206ca074e95 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 30 Sep 2015 03:51:37 +0200 Subject: [PATCH 182/305] Move test SSL key instructions into the key itself. --- testdb/client.pem | 9 +++++++++ testdb/newkey.txt | 7 ------- 2 files changed, 9 insertions(+), 7 deletions(-) delete mode 100644 testdb/newkey.txt diff --git a/testdb/client.pem b/testdb/client.pem index 50cb6f32d..93aed3556 100644 --- a/testdb/client.pem +++ b/testdb/client.pem @@ -1,3 +1,12 @@ +To regenerate the key: + + openssl req -newkey rsa:2048 -new -x509 -days 36500 -nodes -out server.crt -keyout server.key + cat server.key server.crt > server.pem + openssl genrsa -out client.key 2048 + openssl req -key client.key -new -out client.req + openssl x509 -req -in client.req -CA server.crt -CAkey server.key -days 36500 -CAserial file.srl -out client.crt + cat client.key client.crt > client.pem + -----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAtFIkIZk/h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7 wQidZwLul+cyDfPRDzzo3za4GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJ diff --git a/testdb/newkey.txt b/testdb/newkey.txt deleted file mode 100644 index e1727564a..000000000 --- a/testdb/newkey.txt +++ /dev/null @@ -1,7 +0,0 @@ -openssl req -newkey rsa:2048 -new -x509 -days 36500 -nodes -out server.crt -keyout server.key -cat server.key server.crt > server.pem -openssl genrsa -out client.key 2048 -openssl req -key client.key -new -out client.req -openssl x509 -req -in client.req -CA server.crt -CAkey server.key -days 36500 -CAserial file.srl -out client.crt -cat client.key client.crt > client.pem -#openssl x509 -extensions usr_cert -req -in client.req -CA server.crt -CAkey server.key -days 36500 -CAserial file.srl -out client.pem From 60e73f61ff121ae979096c48305c3436e8610904 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 30 Sep 2015 04:50:35 +0200 Subject: [PATCH 183/305] Better bulk error messages. Fixes #136. --- bulk.go | 51 ++++++++++++++++++++++++++++++++++++++++----------- bulk_test.go | 33 +++++++++++++++++++++++++++++++++ session.go | 46 ++++++++++++++++++++++++++-------------------- 3 files changed, 99 insertions(+), 31 deletions(-) diff --git a/bulk.go b/bulk.go index 57d82ddd0..d69f7b23f 100644 --- a/bulk.go +++ b/bulk.go @@ -1,6 +1,7 @@ package mgo import ( + "bytes" "gopkg.in/mgo.v2-unstable/bson" ) @@ -37,7 +38,7 @@ type bulkUpdateOp []interface{} // TODO: This is private for the moment, until we understand exactly how // to report these multi-errors in a useful and convenient way. type bulkError struct { - err error + errs []error } // BulkResult holds the results for a bulk operation. @@ -52,7 +53,29 @@ type BulkResult struct { } func (e *bulkError) Error() string { - return e.err.Error() + if len(e.errs) == 0{ + return "invalid bulkError instance: no errors" + } + if len(e.errs) == 1 { + return e.errs[0].Error() + } + msgs := make(map[string]bool) + for _, err := range e.errs { + msgs[err.Error()] = true + } + if len(msgs) == 1 { + for msg := range msgs { + return msg + } + } + var buf bytes.Buffer + buf.WriteString("multiple errors in bulk operation:\n") + for msg := range msgs { + buf.WriteString(" - ") + buf.WriteString(msg) + buf.WriteByte('\n') + } + return buf.String() } // Bulk returns a value to prepare the execution of a bulk operation. @@ -197,21 +220,16 @@ func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *bulkError if !b.ordered { op.flags = 1 // ContinueOnError } - _, err := b.c.writeOp(op, b.ordered) - if err != nil { - berr.err = err - return false - } - return true + lerr, err := b.c.writeOp(op, b.ordered) + return b.checkSuccess(berr, lerr, err) } func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *bulkError) bool { ok := true for _, op := range action.docs { lerr, err := b.c.writeOp(op, b.ordered) - if err != nil { + if !b.checkSuccess(berr, lerr, err) { ok = false - berr.err = &bulkError{err} if b.ordered { break } @@ -222,4 +240,15 @@ func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *bulkError return ok } -// TODO Introduce IsNotFound that also works on bulk. +func (b *Bulk) checkSuccess(berr *bulkError, lerr *LastError, err error) bool { + if lerr != nil && len(lerr.errors) > 0 { + for _, e := range lerr.errors { + berr.errs = append(berr.errs, &QueryError{Code: e.Code, Message: e.ErrMsg}) + } + return false + } else if err != nil { + berr.errs = append(berr.errs, err) + return false + } + return true +} diff --git a/bulk_test.go b/bulk_test.go index b269f4864..fbe1aa450 100644 --- a/bulk_test.go +++ b/bulk_test.go @@ -131,6 +131,39 @@ func (s *S) TestBulkInsertErrorUnorderedSplitBatch(c *C) { c.Assert(res.Id, Equals, 1500) } +func (s *S) TestBulkError(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + // If it's just the same string multiple times, join it into a single message. + bulk := coll.Bulk() + bulk.Unordered() + bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}) + _, err = bulk.Run() + c.Assert(err, ErrorMatches, ".*duplicate key.*") + c.Assert(err, Not(ErrorMatches), ".*duplicate key.*duplicate key") + c.Assert(mgo.IsDup(err), Equals, true) + + // With matching errors but different messages, present them all. + bulk = coll.Bulk() + bulk.Unordered() + bulk.Insert(M{"_id": "dupone"}, M{"_id": "dupone"}, M{"_id": "duptwo"}, M{"_id": "duptwo"}) + _, err = bulk.Run() + c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n - .*duplicate.*dupone.*\n - .*duplicate.*duptwo.*\n$") + c.Assert(mgo.IsDup(err), Equals, true) + + // With mixed errors, present them all. + bulk = coll.Bulk() + bulk.Unordered() + bulk.Insert(M{"_id": 1}, M{"_id": []int{2}}) + _, err = bulk.Run() + c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n - .*duplicate.*\n - .*array.*\n$") + c.Assert(mgo.IsDup(err), Equals, false) +} + func (s *S) TestBulkUpdate(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) diff --git a/session.go b/session.go index 804282389..e7d0edccf 100644 --- a/session.go +++ b/session.go @@ -1311,7 +1311,6 @@ func (c *Collection) DropIndexName(name string) error { session.cluster().CacheIndex(cacheKey, false) } - result := struct { ErrMsg string Ok bool @@ -1326,7 +1325,6 @@ func (c *Collection) DropIndexName(name string) error { return nil } - // Indexes returns a list of all indexes for the collection. // // For example, this snippet would drop all available indexes: @@ -2237,6 +2235,7 @@ type LastError struct { UpsertedId interface{} `bson:"upserted"` modified int + errors []writeCmdError } func (err *LastError) Error() string { @@ -2274,7 +2273,12 @@ func IsDup(err error) bool { case *QueryError: return e.Code == 11000 || e.Code == 11001 || e.Code == 12582 case *bulkError: - return IsDup(e.err) + for _, ee := range e.errs { + if !IsDup(ee) { + return false + } + } + return true } return false } @@ -4142,17 +4146,19 @@ type writeCmdResult struct { Index int Id interface{} `_id` } - Errors []struct { - Ok bool - Index int - Code int - N int - ErrMsg string - } `bson:"writeErrors"` - ConcernError struct { - Code int - ErrMsg string - } `bson:"writeConcernError"` + ConcernError writeConcernError `bson:"writeConcernError"` + Errors []writeCmdError `bson:"writeErrors"` +} + +type writeConcernError struct { + Code int + ErrMsg string +} + +type writeCmdError struct { + Index int + Code int + ErrMsg string } // writeOp runs the given modifying operation, potentially followed up @@ -4310,18 +4316,18 @@ func (c *Collection) writeOpCommand(socket *mongoSocket, safeOp *queryOp, op int lerr = &LastError{ UpdatedExisting: result.N > 0 && len(result.Upserted) == 0, N: result.N, - modified: result.NModified, + + modified: result.NModified, + errors: result.Errors, } if len(result.Upserted) > 0 { lerr.UpsertedId = result.Upserted[0].Id } if len(result.Errors) > 0 { e := result.Errors[0] - if !e.Ok { - lerr.Code = e.Code - lerr.Err = e.ErrMsg - err = lerr - } + lerr.Code = e.Code + lerr.Err = e.ErrMsg + err = lerr } else if result.ConcernError.Code != 0 { e := result.ConcernError lerr.Code = e.Code From c279dec060a5b79312bd12c92f01476259e94138 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 30 Sep 2015 04:56:18 +0200 Subject: [PATCH 184/305] Fix SelectServers example. Fixes #160. --- session.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/session.go b/session.go index e7d0edccf..967bcfc14 100644 --- a/session.go +++ b/session.go @@ -1865,7 +1865,7 @@ func (s *Session) Run(cmd interface{}, result interface{}) error { // used for reading operations to those with both tag "disk" set to // "ssd" and tag "rack" set to 1: // -// session.SelectSlaves(bson.D{{"disk", "ssd"}, {"rack", 1}}) +// session.SelectServers(bson.D{{"disk", "ssd"}, {"rack", 1}}) // // Multiple sets of tags may be provided, in which case the used server // must match all tags within any one set. From 42f11439ab05d7163a882fd09d682b81d89b6121 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 30 Sep 2015 05:27:47 +0200 Subject: [PATCH 185/305] Document that Session is concurrency safe. Fixes #156. --- doc.go | 2 +- session.go | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/doc.go b/doc.go index 9316c5554..859fd9b8d 100644 --- a/doc.go +++ b/doc.go @@ -20,7 +20,7 @@ // // New sessions are typically created by calling session.Copy on the // initial session obtained at dial time. These new sessions will share -// the same cluster information and connection cache, and may be easily +// the same cluster information and connection pool, and may be easily // handed into other methods and functions for organizing logic. // Every session created must have its Close method called at the end // of its life time, so its resources may be put back in the pool or diff --git a/session.go b/session.go index 967bcfc14..b9cb44839 100644 --- a/session.go +++ b/session.go @@ -66,6 +66,12 @@ const ( // When changing the Session type, check if newSession and copySession // need to be updated too. +// Session represents a communication session with the database. +// +// All Session methods are concurrency-safe and may be called from multiple +// goroutines. In all session modes but Eventual, using the session from +// multiple goroutines will cause them to share the same underlying socket. +// See the documentation on Session.SetMode for more details. type Session struct { m sync.RWMutex cluster_ *mongoCluster From 51fd6ff0c58da9a71382ac371ee43d144d5a123f Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 30 Sep 2015 05:53:12 +0200 Subject: [PATCH 186/305] Document serialization order for struct fields. Fixes #132. --- bson/bson.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bson/bson.go b/bson/bson.go index 41816b874..42744d4c3 100644 --- a/bson/bson.go +++ b/bson/bson.go @@ -421,7 +421,8 @@ func handleErr(err *error) { } // Marshal serializes the in value, which may be a map or a struct value. -// In the case of struct values, only exported fields will be serialized. +// In the case of struct values, only exported fields will be serialized, +// and the order of serialized fields will match that of the struct itself. // The lowercased field name is used as the key for each exported field, // but this behavior may be changed using the respective field tag. // The tag may also contain flags to tweak the marshalling behavior for From f0a7f763a126483b7f9605668cd3f9868f55218a Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 30 Sep 2015 06:10:51 +0200 Subject: [PATCH 187/305] Initialize NewObjectId counter from random seed. Fixes #173. --- bson/bson.go | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/bson/bson.go b/bson/bson.go index 42744d4c3..f1f9ab747 100644 --- a/bson/bson.go +++ b/bson/bson.go @@ -189,15 +189,25 @@ func IsObjectIdHex(s string) bool { // objectIdCounter is atomically incremented when generating a new ObjectId // using NewObjectId() function. It's used as a counter part of an id. -var objectIdCounter uint32 = 0 +var objectIdCounter uint32 = readRandomUint32() + +// readRandomUint32 returns a random objectIdCounter. +func readRandomUint32() uint32 { + var b [4]byte + _, err := io.ReadFull(rand.Reader, b[:]) + if err != nil { + panic(fmt.Errorf("cannot read random object id: %v", err)) + } + return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)) +} + // machineId stores machine id generated once and used in subsequent calls // to NewObjectId function. var machineId = readMachineId() -// readMachineId generates machine id and puts it into the machineId global -// variable. If this function fails to get the hostname, it will cause -// a runtime error. +// readMachineId generates and returns a machine id. +// If this function fails to get the hostname it will cause a runtime error. func readMachineId() []byte { var sum [3]byte id := sum[:] From 297592656914854e5685ea9194b958f38daa3697 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 30 Sep 2015 20:19:29 +0200 Subject: [PATCH 188/305] Add Index.Minf/Maxf to support float64 values. The correct fix would be to change the type of Min and Max, but that's not possible without breaking existing applications. Add the alternative fields and fix the situation in v3. --- bulk.go | 2 +- session.go | 24 +++++++++++++++++++----- session_test.go | 32 ++++++++++++++++++++++++-------- 3 files changed, 44 insertions(+), 14 deletions(-) diff --git a/bulk.go b/bulk.go index d69f7b23f..c19aae2d5 100644 --- a/bulk.go +++ b/bulk.go @@ -53,7 +53,7 @@ type BulkResult struct { } func (e *bulkError) Error() string { - if len(e.errs) == 0{ + if len(e.errs) == 0 { return "invalid bulkError instance: no errors" } if len(e.errs) == 1 { diff --git a/session.go b/session.go index b9cb44839..5fa05febc 100644 --- a/session.go +++ b/session.go @@ -980,7 +980,8 @@ type indexSpec struct { DropDups bool "dropDups,omitempty" Background bool ",omitempty" Sparse bool ",omitempty" - Bits, Min, Max int ",omitempty" + Bits int ",omitempty" + Min, Max float64 ",omitempty" BucketSize float64 "bucketSize,omitempty" ExpireAfter int "expireAfterSeconds,omitempty" Weights bson.D ",omitempty" @@ -1004,8 +1005,16 @@ type Index struct { Name string // Properties for spatial indexes. - Bits, Min, Max int - BucketSize float64 + // + // Min and Max were improperly typed as int when they should have been + // floats. To preserve backwards compatibility they are still typed as + // int and the following two fields enable reading and writing the same + // fields as float numbers. In mgo.v3, these fields will be dropped and + // Min/Max will become floats. + Min, Max int + Minf, Maxf float64 + BucketSize float64 + Bits int // Properties for text indexes. DefaultLanguage string @@ -1199,8 +1208,8 @@ func (c *Collection) EnsureIndex(index Index) error { Background: index.Background, Sparse: index.Sparse, Bits: index.Bits, - Min: index.Min, - Max: index.Max, + Min: index.Minf, + Max: index.Maxf, BucketSize: index.BucketSize, ExpireAfter: int(index.ExpireAfter / time.Second), Weights: keyInfo.weights, @@ -1208,6 +1217,11 @@ func (c *Collection) EnsureIndex(index Index) error { LanguageOverride: index.LanguageOverride, } + if spec.Min == 0 && spec.Max == 0 { + spec.Min = float64(index.Min) + spec.Max = float64(index.Max) + } + if index.Name != "" { spec.Name = index.Name } diff --git a/session_test.go b/session_test.go index 67b11afd8..a80c1f118 100644 --- a/session_test.go +++ b/session_test.go @@ -2646,8 +2646,8 @@ var indexTests = []struct { "name": "loc_old_2d", "key": M{"loc_old": "2d"}, "ns": "mydb.mycoll", - "min": -500, - "max": 500, + "min": -500.0, + "max": 500.0, "bits": 32, }, }, { @@ -2661,8 +2661,25 @@ var indexTests = []struct { "name": "loc_2d", "key": M{"loc": "2d"}, "ns": "mydb.mycoll", - "min": -500, - "max": 500, + "min": -500.0, + "max": 500.0, + "bits": 32, + }, +}, { + mgo.Index{ + Key: []string{"$2d:loc"}, + Minf: -500.1, + Maxf: 500.1, + Min: 1, // Should be ignored + Max: 2, + Bits: 32, + }, + M{ + "name": "loc_2d", + "key": M{"loc": "2d"}, + "ns": "mydb.mycoll", + "min": -500.1, + "max": 500.1, "bits": 32, }, }, { @@ -2724,9 +2741,9 @@ var indexTests = []struct { Name: "CustomName", }, M{ - "name": "CustomName", - "key": M{"cn": 1}, - "ns": "mydb.mycoll", + "name": "CustomName", + "key": M{"cn": 1}, + "ns": "mydb.mycoll", }, }} @@ -3022,7 +3039,6 @@ func (s *S) TestEnsureIndexNameCaching(c *C) { stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 0) - // Resetting the cache should make it contact the server again. session.ResetIndexCache() From 6227a67df475d5c8442d2b87fcdeb25469e68f48 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 1 Oct 2015 03:40:29 +0200 Subject: [PATCH 189/305] Fixed Indexes to match closely EnsureIndex input. --- session.go | 38 ++++++++++++++++++++++++++++++-------- session_test.go | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+), 8 deletions(-) diff --git a/session.go b/session.go index 5fa05febc..9c3edb449 100644 --- a/session.go +++ b/session.go @@ -987,6 +987,7 @@ type indexSpec struct { Weights bson.D ",omitempty" DefaultLanguage string "default_language,omitempty" LanguageOverride string "language_override,omitempty" + TextIndexVersion int "textIndexVersion,omitempty" } type Index struct { @@ -1414,15 +1415,36 @@ func (c *Collection) Indexes() (indexes []Index, err error) { } func indexFromSpec(spec indexSpec) Index { - return Index{ - Name: spec.Name, - Key: simpleIndexKey(spec.Key), - Unique: spec.Unique, - DropDups: spec.DropDups, - Background: spec.Background, - Sparse: spec.Sparse, - ExpireAfter: time.Duration(spec.ExpireAfter) * time.Second, + index := Index{ + Name: spec.Name, + Key: simpleIndexKey(spec.Key), + Unique: spec.Unique, + DropDups: spec.DropDups, + Background: spec.Background, + Sparse: spec.Sparse, + Minf: spec.Min, + Maxf: spec.Max, + Bits: spec.Bits, + BucketSize: spec.BucketSize, + DefaultLanguage: spec.DefaultLanguage, + LanguageOverride: spec.LanguageOverride, + ExpireAfter: time.Duration(spec.ExpireAfter) * time.Second, + } + if float64(int(spec.Min)) == spec.Min && float64(int(spec.Max)) == spec.Max { + index.Min = int(spec.Min) + index.Max = int(spec.Max) + } + if spec.TextIndexVersion > 0 { + index.Key = make([]string, len(spec.Weights)) + index.Weights = make(map[string]int) + for i, elem := range spec.Weights { + index.Key[i] = "$text:" + elem.Name + if w, ok := elem.Value.(int); ok { + index.Weights[elem.Name] = w + } + } } + return index } type indexSlice []Index diff --git a/session_test.go b/session_test.go index a80c1f118..dabc74811 100644 --- a/session_test.go +++ b/session_test.go @@ -2782,6 +2782,45 @@ func (s *S) TestEnsureIndex(c *C) { c.Assert(obtained, DeepEquals, test.expected) + // The result of Indexes must match closely what was used to create the index. + indexes, err := coll.Indexes() + c.Assert(err, IsNil) + c.Assert(indexes, HasLen, 2) + gotIndex := indexes[0] + if gotIndex.Name == "_id_" { + gotIndex = indexes[1] + } + wantIndex := test.index + if wantIndex.Name == "" { + wantIndex.Name = gotIndex.Name + } + if strings.HasPrefix(wantIndex.Key[0], "@") { + wantIndex.Key[0] = "$2d:" + wantIndex.Key[0][1:] + } + if wantIndex.Minf == 0 && wantIndex.Maxf == 0 { + wantIndex.Minf = float64(wantIndex.Min) + wantIndex.Maxf = float64(wantIndex.Max) + } else { + wantIndex.Min = gotIndex.Min + wantIndex.Max = gotIndex.Max + } + if wantIndex.DefaultLanguage == "" { + wantIndex.DefaultLanguage = gotIndex.DefaultLanguage + } + if wantIndex.LanguageOverride == "" { + wantIndex.LanguageOverride = gotIndex.LanguageOverride + } + for name, _ := range gotIndex.Weights { + if _, ok := wantIndex.Weights[name]; !ok { + if wantIndex.Weights == nil { + wantIndex.Weights = make(map[string]int) + } + wantIndex.Weights[name] = 1 + } + } + c.Assert(gotIndex, DeepEquals, wantIndex) + + // Drop created index by key or by name if a custom name was used. if test.index.Name == "" { err = coll.DropIndex(test.index.Key...) c.Assert(err, IsNil) From 89612db29991b3e833702606e85932a0c74768c6 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 5 Oct 2015 09:09:27 -0700 Subject: [PATCH 190/305] Fixed bulk error improvements for MongoDB <2.6. --- bulk.go | 9 ++++++--- bulk_test.go | 16 ++++++++++++++-- session.go | 48 ++++++++++++++++++++++++++++-------------------- 3 files changed, 48 insertions(+), 25 deletions(-) diff --git a/bulk.go b/bulk.go index c19aae2d5..704b4fa1d 100644 --- a/bulk.go +++ b/bulk.go @@ -187,6 +187,11 @@ func (b *Bulk) Upsert(pairs ...interface{}) { } // Run runs all the operations queued up. +// +// If an error is reported on an unordered bulk operation, the error value may +// be an aggregation of all issues observed. As an exception to that, Insert +// operations running on MongoDB versions prior to 2.6 will report the last +// error only due to a limitation in the wire protocol. func (b *Bulk) Run() (*BulkResult, error) { var result BulkResult var berr bulkError @@ -242,9 +247,7 @@ func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *bulkError func (b *Bulk) checkSuccess(berr *bulkError, lerr *LastError, err error) bool { if lerr != nil && len(lerr.errors) > 0 { - for _, e := range lerr.errors { - berr.errs = append(berr.errs, &QueryError{Code: e.Code, Message: e.ErrMsg}) - } + berr.errs = append(berr.errs, lerr.errors...) return false } else if err != nil { berr.errs = append(berr.errs, err) diff --git a/bulk_test.go b/bulk_test.go index fbe1aa450..b39f50421 100644 --- a/bulk_test.go +++ b/bulk_test.go @@ -152,7 +152,14 @@ func (s *S) TestBulkError(c *C) { bulk.Unordered() bulk.Insert(M{"_id": "dupone"}, M{"_id": "dupone"}, M{"_id": "duptwo"}, M{"_id": "duptwo"}) _, err = bulk.Run() - c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n - .*duplicate.*dupone.*\n - .*duplicate.*duptwo.*\n$") + if s.versionAtLeast(2, 6) { + c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n( - .*duplicate.*\n){2}$") + c.Assert(err, ErrorMatches, "(?s).*dupone.*") + c.Assert(err, ErrorMatches, "(?s).*duptwo.*") + } else { + // Wire protocol query doesn't return all errors. + c.Assert(err, ErrorMatches, ".*duplicate.*") + } c.Assert(mgo.IsDup(err), Equals, true) // With mixed errors, present them all. @@ -160,7 +167,12 @@ func (s *S) TestBulkError(c *C) { bulk.Unordered() bulk.Insert(M{"_id": 1}, M{"_id": []int{2}}) _, err = bulk.Run() - c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n - .*duplicate.*\n - .*array.*\n$") + if s.versionAtLeast(2, 6) { + c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n - .*duplicate.*\n - .*array.*\n$") + } else { + // Wire protocol query doesn't return all errors. + c.Assert(err, ErrorMatches, ".*array.*") + } c.Assert(mgo.IsDup(err), Equals, false) } diff --git a/session.go b/session.go index 9c3edb449..16e256722 100644 --- a/session.go +++ b/session.go @@ -2277,7 +2277,7 @@ type LastError struct { UpsertedId interface{} `bson:"upserted"` modified int - errors []writeCmdError + errors []error } func (err *LastError) Error() string { @@ -4203,6 +4203,14 @@ type writeCmdError struct { ErrMsg string } +func (r *writeCmdResult) QueryErrors() []error { + var errs []error + for _, err := range r.Errors { + errs = append(errs, &QueryError{Code: err.Code, Message: err.ErrMsg}) + } + return errs +} + // writeOp runs the given modifying operation, potentially followed up // by a getLastError command in case the session is in safe mode. The // LastError result is made available in lerr, and if lerr.Err is set it @@ -4222,7 +4230,7 @@ func (c *Collection) writeOp(op interface{}, ordered bool) (lerr *LastError, err if socket.ServerInfo().MaxWireVersion >= 2 { // Servers with a more recent write protocol benefit from write commands. if op, ok := op.(*insertOp); ok && len(op.documents) > 1000 { - var firstErr error + var errors []error // Maximum batch size is 1000. Must split out in separate operations for compatibility. all := op.documents for i := 0; i < len(all); i += 1000 { @@ -4231,35 +4239,35 @@ func (c *Collection) writeOp(op interface{}, ordered bool) (lerr *LastError, err l = len(all) } op.documents = all[i:l] - _, err := c.writeOpCommand(socket, safeOp, op, ordered) + lerr, err := c.writeOpCommand(socket, safeOp, op, ordered) if err != nil { - if op.flags&1 != 0 { - if firstErr == nil { - firstErr = err - } - } else { - return nil, err + errors = append(errors, lerr.errors...) + if op.flags&1 == 0 { + return &LastError{errors: errors}, err } } } - return nil, firstErr + if len(errors) == 0 { + return nil, nil + } + return &LastError{errors: errors}, errors[0] } return c.writeOpCommand(socket, safeOp, op, ordered) } else if updateOps, ok := op.(bulkUpdateOp); ok { - var firstErr error + var errors []error for _, updateOp := range updateOps { - _, err := c.writeOpQuery(socket, safeOp, updateOp, ordered) + lerr, err := c.writeOpQuery(socket, safeOp, updateOp, ordered) if err != nil { - if !ordered { - if firstErr == nil { - firstErr = err - } - } else { - return nil, err + errors = append(errors, lerr.errors...) + if ordered { + return &LastError{errors: errors}, err } } } - return nil, firstErr + if len(errors) == 0 { + return nil, nil + } + return &LastError{errors: errors}, errors[0] } return c.writeOpQuery(socket, safeOp, op, ordered) } @@ -4360,7 +4368,7 @@ func (c *Collection) writeOpCommand(socket *mongoSocket, safeOp *queryOp, op int N: result.N, modified: result.NModified, - errors: result.Errors, + errors: result.QueryErrors(), } if len(result.Upserted) > 0 { lerr.UpsertedId = result.Upserted[0].Id From 753e934768c70f0ae0010dc6f90339f28c8fd995 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 5 Oct 2015 09:16:48 -0700 Subject: [PATCH 191/305] Add a few notes about future mgo.v3 changes. --- session.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/session.go b/session.go index 16e256722..b003fe169 100644 --- a/session.go +++ b/session.go @@ -371,6 +371,8 @@ type DialInfo struct { Dial func(addr net.Addr) (net.Conn, error) } +// mgo.v3: Drop DialInfo.Dial. + // ServerAddr represents the address for establishing a connection to an // individual MongoDB server. type ServerAddr struct { @@ -1028,6 +1030,8 @@ type Index struct { Weights map[string]int } +// mgo.v3: Drop Minf and Maxf and transform Min and Max to floats. + type indexKeyInfo struct { name string key bson.D @@ -2268,6 +2272,8 @@ func (p *Pipe) Batch(n int) *Pipe { return p } +// mgo.v3: Use a single user-visible error type. + type LastError struct { Err string Code, N, Waited int From 672cfa7824f6ae5ca31b035ce5bb599a5295fb99 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 5 Oct 2015 09:24:40 -0700 Subject: [PATCH 192/305] Fix test instability. --- cluster_test.go | 11 +++++++---- session_test.go | 42 ++++++++++++++++++++++++++---------------- suite_test.go | 17 +++++++++++------ testdb/dropall.js | 2 +- 4 files changed, 45 insertions(+), 27 deletions(-) diff --git a/cluster_test.go b/cluster_test.go index dbad407ac..bf17f0dfe 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -1308,10 +1308,8 @@ func (s *S) TestMonotonicSlaveOkFlagWithMongos(c *C) { opc23b, err := getOpCounters("localhost:40023") c.Assert(err, IsNil) - masterPort := master[strings.Index(master, ":")+1:] - var masterDelta, slaveDelta int - switch masterPort { + switch hostPort(master) { case "40021": masterDelta = opc21b.Query - opc21a.Query slaveDelta = (opc22b.Query - opc22a.Query) + (opc23b.Query - opc23a.Query) @@ -1361,8 +1359,13 @@ func (s *S) TestRemovalOfClusterMember(c *C) { slaveAddr := result.Me defer func() { + config := map[string]string{ + "40021": `{_id: 1, host: "127.0.0.1:40021", priority: 1, tags: {rs2: "a"}}`, + "40022": `{_id: 2, host: "127.0.0.1:40022", priority: 0, tags: {rs2: "b"}}`, + "40023": `{_id: 3, host: "127.0.0.1:40023", priority: 0, tags: {rs2: "c"}}`, + } master.Refresh() - master.Run(bson.D{{"$eval", `rs.add("` + slaveAddr + `")`}}, nil) + master.Run(bson.D{{"$eval", `rs.add(` + config[hostPort(slaveAddr)] + `)`}}, nil) master.Close() slave.Close() }() diff --git a/session_test.go b/session_test.go index dabc74811..f644193e7 100644 --- a/session_test.go +++ b/session_test.go @@ -30,7 +30,6 @@ import ( "flag" "fmt" "math" - "reflect" "runtime" "sort" "strconv" @@ -287,10 +286,7 @@ func (s *S) TestDatabaseAndCollectionNames(c *C) { names, err := session.DatabaseNames() c.Assert(err, IsNil) - if !reflect.DeepEqual(names, []string{"db1", "db2"}) { - // 2.4+ has "local" as well. - c.Assert(names, DeepEquals, []string{"db1", "db2", "local"}) - } + c.Assert(filterDBs(names), DeepEquals, []string{"db1", "db2"}) // Try to exercise cursor logic. 2.8.0-rc3 still ignores this. session.SetBatch(2) @@ -698,20 +694,30 @@ func (s *S) TestDropDatabase(c *C) { names, err := session.DatabaseNames() c.Assert(err, IsNil) - if !reflect.DeepEqual(names, []string{"db2"}) { - // 2.4+ has "local" as well. - c.Assert(names, DeepEquals, []string{"db2", "local"}) - } + c.Assert(filterDBs(names), DeepEquals, []string{"db2"}) err = db2.DropDatabase() c.Assert(err, IsNil) names, err = session.DatabaseNames() c.Assert(err, IsNil) - if !reflect.DeepEqual(names, []string(nil)) { - // 2.4+ has "local" as well. - c.Assert(names, DeepEquals, []string{"local"}) + c.Assert(filterDBs(names), DeepEquals, []string{}) +} + +func filterDBs(dbs []string) []string { + var i int + for _, name := range dbs { + switch name { + case "admin", "local": + default: + dbs[i] = name + i++ + } } + if len(dbs) == 0 { + return []string{} + } + return dbs[:i] } func (s *S) TestDropCollection(c *C) { @@ -2283,6 +2289,10 @@ func (s *S) TestSortScoreText(c *C) { err = coll.EnsureIndex(mgo.Index{ Key: []string{"$text:a", "$text:b"}, }) + msg := "text search not enabled" + if err != nil && strings.Contains(err.Error(), msg) { + c.Skip(msg) + } c.Assert(err, IsNil) err = coll.Insert(M{ @@ -2756,12 +2766,11 @@ func (s *S) TestEnsureIndex(c *C) { idxs := session.DB("mydb").C("system.indexes") for _, test := range indexTests { - if !s.versionAtLeast(2, 4) && test.expected["weights"] != nil { - // No text indexes until 2.4. + err = coll.EnsureIndex(test.index) + msg := "text search not enabled" + if err != nil && strings.Contains(err.Error(), msg) { continue } - - err = coll.EnsureIndex(test.index) c.Assert(err, IsNil) expectedName := test.index.Name @@ -2778,6 +2787,7 @@ func (s *S) TestEnsureIndex(c *C) { if s.versionAtLeast(2, 7) { // Was deprecated in 2.6, and not being reported by 2.7+. delete(test.expected, "dropDups") + test.index.DropDups = false } c.Assert(obtained, DeepEquals, test.expected) diff --git a/suite_test.go b/suite_test.go index 3f488a364..cf4b6ca84 100644 --- a/suite_test.go +++ b/suite_test.go @@ -103,6 +103,9 @@ func (s *S) SetUpTest(c *C) { func (s *S) TearDownTest(c *C) { if s.stopped { + s.Stop(":40201") + s.Stop(":40202") + s.Stop(":40203") s.StartAll() } for _, host := range s.frozen { @@ -180,13 +183,15 @@ func (s *S) Thaw(host string) { } func (s *S) StartAll() { - // Restart any stopped nodes. - run("cd _testdb && supervisorctl start all") - err := run("cd testdb && mongo --nodb wait.js") - if err != nil { - panic(err) + if s.stopped { + // Restart any stopped nodes. + run("cd _testdb && supervisorctl start all") + err := run("cd testdb && mongo --nodb wait.js") + if err != nil { + panic(err) + } + s.stopped = false } - s.stopped = false } func run(command string) error { diff --git a/testdb/dropall.js b/testdb/dropall.js index 2059349db..7fa39d112 100644 --- a/testdb/dropall.js +++ b/testdb/dropall.js @@ -60,7 +60,7 @@ for (var i in ports) { } function notMaster(result) { - return typeof result.errmsg != "undefined" && result.errmsg.indexOf("not master") >= 0 + return typeof result.errmsg != "undefined" && (result.errmsg.indexOf("not master") >= 0 || result.errmsg.indexOf("no master found")) } // vim:ts=4:sw=4:et From 2a23d74e06b5659db00d638597137a4844d25928 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 5 Oct 2015 09:24:40 -0700 Subject: [PATCH 193/305] Another test stability tweak. --- cluster_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cluster_test.go b/cluster_test.go index bf17f0dfe..5f5fda108 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -1368,6 +1368,14 @@ func (s *S) TestRemovalOfClusterMember(c *C) { master.Run(bson.D{{"$eval", `rs.add(` + config[hostPort(slaveAddr)] + `)`}}, nil) master.Close() slave.Close() + + // Ensure suite syncs up with the changes before next test. + s.Stop(":40201") + s.StartAll() + time.Sleep(8 * time.Second) + // TODO Find a better way to find out when mongos is fully aware that all + // servers are up. Without that follow up tests that depend on mongos will + // break due to their expectation of things being in a working state. }() c.Logf("========== Removing slave: %s ==========", slaveAddr) From 7adfcd100c2bfcfe8c92be12321f77954e7dba54 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 5 Oct 2015 18:02:44 -0700 Subject: [PATCH 194/305] Properly provide read preference in use to mongos. --- session.go | 15 +++++++++------ socket.go | 31 +++++++++++++++++++++++++++++-- 2 files changed, 38 insertions(+), 8 deletions(-) diff --git a/session.go b/session.go index b003fe169..6843254ef 100644 --- a/session.go +++ b/session.go @@ -2940,7 +2940,7 @@ func (q *Query) One(result interface{}) (err error) { } defer socket.Release() - op.flags |= session.slaveOkFlag() + session.prepareQuery(&op) op.limit = -1 data, err := socket.SimpleQuery(&op) @@ -2980,7 +2980,7 @@ func (db *Database) run(socket *mongoSocket, cmd, result interface{}) (err error op.collection = db.Name + ".$cmd" // Query.One: - op.flags |= session.slaveOkFlag() + session.prepareQuery(&op) op.limit = -1 data, err := socket.SimpleQuery(&op) @@ -3173,8 +3173,9 @@ func (q *Query) Iter() *Iter { iter.op.limit = op.limit iter.op.replyFunc = iter.replyFunc() iter.docsToReceive++ + + session.prepareQuery(&op) op.replyFunc = iter.op.replyFunc - op.flags |= session.slaveOkFlag() socket, err := session.acquireSocket(true) if err != nil { @@ -3254,8 +3255,9 @@ func (q *Query) Tail(timeout time.Duration) *Iter { iter.op.limit = op.limit iter.op.replyFunc = iter.replyFunc() iter.docsToReceive++ + session.prepareQuery(&op) op.replyFunc = iter.op.replyFunc - op.flags |= flagTailable | flagAwaitData | session.slaveOkFlag() + op.flags |= flagTailable | flagAwaitData socket, err := session.acquireSocket(true) if err != nil { @@ -3274,10 +3276,11 @@ func (q *Query) Tail(timeout time.Duration) *Iter { return iter } -func (s *Session) slaveOkFlag() (flag queryOpFlags) { +func (s *Session) prepareQuery(op *queryOp) { s.m.RLock() + op.mode = s.consistency if s.slaveOk { - flag = flagSlaveOk + op.flags |= flagSlaveOk } s.m.RUnlock() return diff --git a/socket.go b/socket.go index 725c37e94..ac98229ed 100644 --- a/socket.go +++ b/socket.go @@ -28,6 +28,7 @@ package mgo import ( "errors" + "fmt" "net" "sync" "time" @@ -74,6 +75,7 @@ type queryOp struct { flags queryOpFlags replyFunc replyFunc + mode Mode options queryWrapper hasOptions bool serverTags []bson.D @@ -92,9 +94,34 @@ type queryWrapper struct { } func (op *queryOp) finalQuery(socket *mongoSocket) interface{} { - if op.flags&flagSlaveOk != 0 && len(op.serverTags) > 0 && socket.ServerInfo().Mongos { + if socket.ServerInfo().Mongos { + var modeName string + if op.flags&flagSlaveOk == 0 { + modeName = "primary" + } else { + switch op.mode { + case Strong: + modeName = "primary" + case Monotonic, Eventual: + modeName = "secondaryPreferred" + case PrimaryPreferred: + modeName = "primaryPreferred" + case Secondary: + modeName = "secondary" + case SecondaryPreferred: + modeName = "secondaryPreferred" + case Nearest: + modeName = "nearest" + default: + panic(fmt.Sprintf("unsupported read mode: %d", op.mode)) + } + } op.hasOptions = true - op.options.ReadPreference = bson.D{{"mode", "secondaryPreferred"}, {"tags", op.serverTags}} + op.options.ReadPreference = make(bson.D, 0, 2) + op.options.ReadPreference = append(op.options.ReadPreference, bson.DocElem{"mode", modeName}) + if len(op.serverTags) > 0 { + op.options.ReadPreference = append(op.options.ReadPreference, bson.DocElem{"tags", op.serverTags}) + } } if op.hasOptions { if op.query == nil { From c89feac5a7f21bc7e3c6ebe91dd464b8b027de9b Mon Sep 17 00:00:00 2001 From: HaijunWang Date: Wed, 14 Oct 2015 12:32:57 +0800 Subject: [PATCH 195/305] dbs.server.Process.Signal(os.Interrupt) instead of kill , kill causes mongod exit unexpected dbs.server.Process.Signal(os.Interrupt) instead of kill , kill causes mongod exit unexpected --- dbtest/dbserver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dbtest/dbserver.go b/dbtest/dbserver.go index 85113969d..fe730e860 100644 --- a/dbtest/dbserver.go +++ b/dbtest/dbserver.go @@ -111,7 +111,7 @@ func (dbs *DBServer) Stop() { } if dbs.server != nil { dbs.tomb.Kill(nil) - dbs.server.Process.Kill() + dbs.server.Process.Signal(os.Interrupt) select { case <-dbs.tomb.Dead(): case <-time.After(5 * time.Second): From 5c58cd274a191c01bdb2fc17cf0c247827627816 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 14 Oct 2015 16:34:59 -0300 Subject: [PATCH 196/305] Improve Safe.J/FSync docs, more mgo.v3 notes. --- session.go | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/session.go b/session.go index 6843254ef..89973c374 100644 --- a/session.go +++ b/session.go @@ -63,6 +63,8 @@ const ( Strong Mode = 2 // Same as Primary. ) +// mgo.v3: Drop Strong mode, suffix all modes with "Mode". + // When changing the Session type, check if newSession and copySession // need to be updated too. @@ -315,7 +317,7 @@ type DialInfo struct { // Timeout is the amount of time to wait for a server to respond when // first connecting and on follow up operations in the session. If // timeout is zero, the call may block forever waiting for a connection - // to be established. + // to be established. Timeout does not affect logic in DialServer. Timeout time.Duration // FailFast will cause connection and query attempts to fail faster when @@ -1031,6 +1033,7 @@ type Index struct { } // mgo.v3: Drop Minf and Maxf and transform Min and Max to floats. +// mgo.v3: Drop DropDups as it's unsupported past 2.8. type indexKeyInfo struct { name string @@ -1716,8 +1719,8 @@ type Safe struct { W int // Min # of servers to ack before success WMode string // Write mode for MongoDB 2.0+ (e.g. "majority") WTimeout int // Milliseconds to wait for W before timing out - FSync bool // Should servers sync to disk before returning success - J bool // Wait for next group commit if journaling; no effect otherwise + FSync bool // Sync via the journal if present, or via data files sync otherwise + J bool // Sync via the journal if present } // Safe returns the current safety mode for the session. @@ -1761,11 +1764,19 @@ func (s *Session) Safe() (safe *Safe) { // the links below for more details (note that MongoDB internally reuses the // "w" field name for WMode). // -// If safe.FSync is true and journaling is disabled, the servers will be -// forced to sync all files to disk immediately before returning. If the -// same option is true but journaling is enabled, the server will instead -// await for the next group commit before returning. -// +// If safe.J is true, servers will block until write operations have been +// committed to the journal. Cannot be used in combination with FSync. Prior +// to MongoDB 2.6 this option was ignored if the server was running without +// journaling. Starting with MongoDB 2.6 write operations will fail with an +// exception if this option is used when the server is running without +// journaling. +// +// If safe.FSync is true and the server is running without journaling, blocks +// until the server has synced all data files to disk. If the server is running +// with journaling, this acts the same as the J option, blocking until write +// operations have been committed to the journal. Cannot be used in +// combination with J. +// // Since MongoDB 2.0.0, the safe.J option can also be used instead of FSync // to force the server to wait for a group commit in case journaling is // enabled. The option has no effect if the server has journaling disabled. From 4d04138ffef2791c479c0c8bbffc30b34081b8d9 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 26 Oct 2015 14:34:53 -0200 Subject: [PATCH 197/305] Only send read pref if slaveOk is set This reverts the regression introduced in the latest stable and notified by Louisa Berger via MGO-94. --- socket.go | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/socket.go b/socket.go index 84ca1f191..19ea28124 100644 --- a/socket.go +++ b/socket.go @@ -94,27 +94,23 @@ type queryWrapper struct { } func (op *queryOp) finalQuery(socket *mongoSocket) interface{} { - if socket.ServerInfo().Mongos { + if op.flags&flagSlaveOk != 0 && socket.ServerInfo().Mongos { var modeName string - if op.flags&flagSlaveOk == 0 { + switch op.mode { + case Strong: modeName = "primary" - } else { - switch op.mode { - case Strong: - modeName = "primary" - case Monotonic, Eventual: - modeName = "secondaryPreferred" - case PrimaryPreferred: - modeName = "primaryPreferred" - case Secondary: - modeName = "secondary" - case SecondaryPreferred: - modeName = "secondaryPreferred" - case Nearest: - modeName = "nearest" - default: - panic(fmt.Sprintf("unsupported read mode: %d", op.mode)) - } + case Monotonic, Eventual: + modeName = "secondaryPreferred" + case PrimaryPreferred: + modeName = "primaryPreferred" + case Secondary: + modeName = "secondary" + case SecondaryPreferred: + modeName = "secondaryPreferred" + case Nearest: + modeName = "nearest" + default: + panic(fmt.Sprintf("unsupported read mode: %d", op.mode)) } op.hasOptions = true op.options.ReadPreference = make(bson.D, 0, 2) From 6eb62ba6b21e292ca4dca50d1e32f26978ddc3e0 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 27 Oct 2015 18:00:59 -0200 Subject: [PATCH 198/305] Only send read pref if slaveOk is set This reverts the regression introduced in the latest stable and notified by Louisa Berger via MGO-94. --- socket.go | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/socket.go b/socket.go index ac98229ed..aefc32d0b 100644 --- a/socket.go +++ b/socket.go @@ -94,27 +94,23 @@ type queryWrapper struct { } func (op *queryOp) finalQuery(socket *mongoSocket) interface{} { - if socket.ServerInfo().Mongos { + if op.flags&flagSlaveOk != 0 && socket.ServerInfo().Mongos { var modeName string - if op.flags&flagSlaveOk == 0 { + switch op.mode { + case Strong: modeName = "primary" - } else { - switch op.mode { - case Strong: - modeName = "primary" - case Monotonic, Eventual: - modeName = "secondaryPreferred" - case PrimaryPreferred: - modeName = "primaryPreferred" - case Secondary: - modeName = "secondary" - case SecondaryPreferred: - modeName = "secondaryPreferred" - case Nearest: - modeName = "nearest" - default: - panic(fmt.Sprintf("unsupported read mode: %d", op.mode)) - } + case Monotonic, Eventual: + modeName = "secondaryPreferred" + case PrimaryPreferred: + modeName = "primaryPreferred" + case Secondary: + modeName = "secondary" + case SecondaryPreferred: + modeName = "secondaryPreferred" + case Nearest: + modeName = "nearest" + default: + panic(fmt.Sprintf("unsupported read mode: %d", op.mode)) } op.hasOptions = true op.options.ReadPreference = make(bson.D, 0, 2) From 769d6445be6d9a0e3a933f07742b1c8ac2d28a9d Mon Sep 17 00:00:00 2001 From: Alexandre Cesaro Date: Thu, 29 Oct 2015 12:25:11 +0100 Subject: [PATCH 199/305] Remove the errors when a struct tag contains a slash. --- bson/bson.go | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/bson/bson.go b/bson/bson.go index f1f9ab747..a098d0a96 100644 --- a/bson/bson.go +++ b/bson/bson.go @@ -618,24 +618,6 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { continue } - // XXX Drop this after a few releases. - if s := strings.Index(tag, "/"); s >= 0 { - recommend := tag[:s] - for _, c := range tag[s+1:] { - switch c { - case 'c': - recommend += ",omitempty" - case 's': - recommend += ",minsize" - default: - msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", string([]byte{uint8(c)}), tag, st) - panic(externalPanic(msg)) - } - } - msg := fmt.Sprintf("Replace tag %q in field %s of type %s by %q", tag, field.Name, st, recommend) - panic(externalPanic(msg)) - } - inline := false fields := strings.Split(tag, ",") if len(fields) > 1 { From 6f16319a3e202fc586438b99948bdc323939be3b Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 29 Oct 2015 10:40:56 -0200 Subject: [PATCH 200/305] Improve test suite setup reliability in 3.2. --- testdb/init.js | 20 ++++++++++++++++++-- testdb/setup.sh | 2 +- testdb/supervisord.conf | 9 ++++++--- testdb/wait.js | 2 +- 4 files changed, 26 insertions(+), 7 deletions(-) diff --git a/testdb/init.js b/testdb/init.js index 7f3a9f0ea..ceb75a5e4 100644 --- a/testdb/init.js +++ b/testdb/init.js @@ -58,14 +58,30 @@ function configAuth() { addrs.push("127.0.0.1:40003") } for (var i in addrs) { + print("Configuring auth for", addrs[i]) var db = new Mongo(addrs[i]).getDB("admin") var v = db.serverBuildInfo().versionArray + var timedOut = false if (v < [2, 5]) { db.addUser("root", "rapadura") } else { - db.createUser({user: "root", pwd: "rapadura", roles: ["root"]}) + try { + db.createUser({user: "root", pwd: "rapadura", roles: ["root"]}) + } catch (err) { + // 3.2 consistently fails replication of creds on 40031 (config server) + print("createUser command returned an error: " + err) + if (String(err).indexOf("timed out") >= 0) { + timedOut = true; + } + } + } + for (var i = 0; i < 60; i++) { + var ok = db.auth("root", "rapadura") + if (ok || !timedOut) { + break + } + sleep(1000); } - db.auth("root", "rapadura") if (v >= [2, 6]) { db.createUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]}) } else if (v >= [2, 4]) { diff --git a/testdb/setup.sh b/testdb/setup.sh index 317e8e5ab..a121847e3 100755 --- a/testdb/setup.sh +++ b/testdb/setup.sh @@ -15,7 +15,7 @@ start() { echo "Running supervisord..." supervisord || ( echo "Supervisord failed executing ($?)" && exit 1 ) echo "Supervisord is up, starting $COUNT processes..." - for i in $(seq 10); do + for i in $(seq 30); do RUNNING=$(supervisorctl status | grep RUNNING | wc -l | tr -d ' ') echo "$RUNNING processes running..." if [ x$COUNT = x$RUNNING ]; then diff --git a/testdb/supervisord.conf b/testdb/supervisord.conf index 95b7f9e4b..ebb777e54 100644 --- a/testdb/supervisord.conf +++ b/testdb/supervisord.conf @@ -47,19 +47,22 @@ command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssiz command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs4 --dbpath %(here)s/rs4a --bind_ip=127.0.0.1 --port 40041 [program:cfg1] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg1 --bind_ip=127.0.0.1 --port 40101 +command = mongod --nohttpinterface --noprealloc --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg1 --bind_ip=127.0.0.1 --port 40101 [program:cfg2] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg2 --bind_ip=127.0.0.1 --port 40102 +command = mongod --nohttpinterface --noprealloc --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg2 --bind_ip=127.0.0.1 --port 40102 [program:cfg3] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg3 --bind_ip=127.0.0.1 --port 40103 --auth --keyFile=%(here)s/keyfile +command = mongod --nohttpinterface --noprealloc --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg3 --bind_ip=127.0.0.1 --port 40103 --auth --keyFile=%(here)s/keyfile [program:s1] command = mongos --configdb 127.0.0.1:40101 --bind_ip=127.0.0.1 --port 40201 --chunkSize 1 +startretries = 10 [program:s2] command = mongos --configdb 127.0.0.1:40102 --bind_ip=127.0.0.1 --port 40202 --chunkSize 1 +startretries = 10 [program:s3] command = mongos --configdb 127.0.0.1:40103 --bind_ip=127.0.0.1 --port 40203 --chunkSize 1 --keyFile=%(here)s/keyfile +startretries = 10 diff --git a/testdb/wait.js b/testdb/wait.js index fbde0749f..2735d0e56 100644 --- a/testdb/wait.js +++ b/testdb/wait.js @@ -52,7 +52,7 @@ function countHealthy(rs) { var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length -for (var i = 0; i != 60; i++) { +for (var i = 0; i != 90; i++) { var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a) print("Replica sets have", count, "healthy nodes.") if (count == totalRSMembers) { From 11a571bf266248830e77cee7170df975d5c2a503 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Fri, 6 Nov 2015 11:52:34 -0200 Subject: [PATCH 201/305] BuildInfo.SysInfo is deprecated on 3.2. --- session.go | 5 ++++- session_test.go | 8 +++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/session.go b/session.go index 89973c374..bd6aae71c 100644 --- a/session.go +++ b/session.go @@ -4022,7 +4022,7 @@ type BuildInfo struct { VersionArray []int `bson:"versionArray"` // On MongoDB 2.0+; assembled from Version otherwise GitVersion string `bson:"gitVersion"` OpenSSLVersion string `bson:"OpenSSLVersion"` - SysInfo string `bson:"sysInfo"` + SysInfo string `bson:"sysInfo"` // Deprecated and empty on MongoDB 3.2+. Bits int Debug bool MaxObjectSize int `bson:"maxBsonObjectSize"` @@ -4064,6 +4064,9 @@ func (s *Session) BuildInfo() (info BuildInfo, err error) { // That information may be moved to another field if people need it. info.GitVersion = info.GitVersion[:i] } + if info.SysInfo == "deprecated" { + info.SysInfo = "" + } return } diff --git a/session_test.go b/session_test.go index f644193e7..a15d4bd26 100644 --- a/session_test.go +++ b/session_test.go @@ -3474,7 +3474,13 @@ func (s *S) TestBuildInfo(c *C) { c.Assert(info.VersionArray, DeepEquals, v) c.Assert(info.GitVersion, Matches, "[a-z0-9]+") - c.Assert(info.SysInfo, Matches, ".*[0-9:]+.*") + + if s.versionAtLeast(3, 2) { + // It was deprecated in 3.2. + c.Assert(info.SysInfo, Equals, "") + } else { + c.Assert(info.SysInfo, Matches, ".*[0-9:]+.*") + } if info.Bits != 32 && info.Bits != 64 { c.Fatalf("info.Bits is %d", info.Bits) } From 76d16c7d8f0aa6893d6083a04ebf2e978212201d Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Fri, 6 Nov 2015 11:57:43 -0200 Subject: [PATCH 202/305] Expect textIndexVersion to be 3 in tests for 3.2. --- session_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/session_test.go b/session_test.go index a15d4bd26..8c8319fb1 100644 --- a/session_test.go +++ b/session_test.go @@ -2789,6 +2789,9 @@ func (s *S) TestEnsureIndex(c *C) { delete(test.expected, "dropDups") test.index.DropDups = false } + if s.versionAtLeast(3, 2) && test.expected["textIndexVersion"] != nil { + test.expected["textIndexVersion"] = 3 + } c.Assert(obtained, DeepEquals, test.expected) From 36bcd77d1e09a9837fbd9c141a2af080dfc44c7a Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Fri, 6 Nov 2015 12:21:17 -0200 Subject: [PATCH 203/305] Stabilize order of bulk error messages. --- bulk.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/bulk.go b/bulk.go index 704b4fa1d..2e489d727 100644 --- a/bulk.go +++ b/bulk.go @@ -59,18 +59,21 @@ func (e *bulkError) Error() string { if len(e.errs) == 1 { return e.errs[0].Error() } - msgs := make(map[string]bool) + msgs := make([]string, 0, len(e.errs)) + seen := make(map[string]bool) for _, err := range e.errs { - msgs[err.Error()] = true + msg := err.Error() + if !seen[msg] { + seen[msg] = true + msgs = append(msgs, msg) + } } if len(msgs) == 1 { - for msg := range msgs { - return msg - } + return msgs[0] } var buf bytes.Buffer buf.WriteString("multiple errors in bulk operation:\n") - for msg := range msgs { + for _, msg := range msgs { buf.WriteString(" - ") buf.WriteString(msg) buf.WriteByte('\n') From d3f62eb4497af7a3ac753f1e81c302719bd77d54 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 10 Nov 2015 21:03:02 -0200 Subject: [PATCH 204/305] Fix test for query comment field change in 3.2. --- session_test.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/session_test.go b/session_test.go index 8c8319fb1..3ad988ab5 100644 --- a/session_test.go +++ b/session_test.go @@ -1202,7 +1202,13 @@ func (s *S) TestQueryComment(c *C) { err = query.One(nil) c.Assert(err, IsNil) - n, err := session.DB("mydb").C("system.profile").Find(bson.M{"query.$query.n": 41, "query.$comment": "some comment"}).Count() + commentField := "query.$comment" + nField := "query.$query.n" + if s.versionAtLeast(3, 2) { + commentField = "query.comment" + nField = "query.filter.n" + } + n, err := session.DB("mydb").C("system.profile").Find(bson.M{nField: 41, commentField: "some comment"}).Count() c.Assert(err, IsNil) c.Assert(n, Equals, 1) } From 07e4ee6b61737a8eec2fd3636beb675c58a0a620 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 11 Nov 2015 14:57:49 -0200 Subject: [PATCH 205/305] Query snapshot is broken in 3.2. Skip test for now. Issue tracked at https://jira.mongodb.org/browse/SERVER-21403 --- session_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/session_test.go b/session_test.go index 3ad988ab5..c57bbb487 100644 --- a/session_test.go +++ b/session_test.go @@ -2191,6 +2191,10 @@ func (s *S) TestFindForResetsResult(c *C) { } func (s *S) TestFindIterSnapshot(c *C) { + if s.versionAtLeast(3, 2) { + c.Skip("Broken in 3.2: https://jira.mongodb.org/browse/SERVER-21403") + } + session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() From 56e69fecda9c7743b0146563a1efb5364cb361d2 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 11 Nov 2015 15:00:29 -0200 Subject: [PATCH 206/305] In 3.2 mongos test count commands rather than ops. --- cluster_test.go | 126 ++++++++++++++++++++++++++---------------------- 1 file changed, 69 insertions(+), 57 deletions(-) diff --git a/cluster_test.go b/cluster_test.go index 5f5fda108..a77a6df64 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -1240,25 +1240,25 @@ func (s *S) TestFailFast(c *C) { c.Assert(started.After(time.Now().Add(-time.Second)), Equals, true) } -type OpCounters struct { - Insert int - Query int - Update int - Delete int - GetMore int - Command int -} - -func getOpCounters(server string) (c *OpCounters, err error) { +func (s *S) countQueries(c *C, server string) int { session, err := mgo.Dial(server + "?connect=direct") - if err != nil { - return nil, err - } + c.Assert(err, IsNil) defer session.Close() session.SetMode(mgo.Monotonic, true) - result := struct{ OpCounters }{} + var result struct { + OpCounters struct { + Query int + } + Metrics struct { + Commands struct{ Find struct{ Total int } } + } + } err = session.Run("serverStatus", &result) - return &result.OpCounters, err + c.Assert(err, IsNil) + if s.versionAtLeast(3, 2) { + return result.Metrics.Commands.Find.Total + } + return result.OpCounters.Query } func (s *S) TestMonotonicSlaveOkFlagWithMongos(c *C) { @@ -1277,14 +1277,34 @@ func (s *S) TestMonotonicSlaveOkFlagWithMongos(c *C) { master := ssresult.Host c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) - // Collect op counters for everyone. - opc21a, err := getOpCounters("localhost:40021") - c.Assert(err, IsNil) - opc22a, err := getOpCounters("localhost:40022") - c.Assert(err, IsNil) - opc23a, err := getOpCounters("localhost:40023") + // Insert some data as otherwise 3.2+ won't run the query at all. + err = session.DB("mydb").C("mycoll").Insert(bson.M{"n": 1}) c.Assert(err, IsNil) + // Wait until all servers see the data. + for _, addr := range []string{"localhost:40021", "localhost:40022", "localhost:40023"} { + session, err := mgo.Dial(addr + "?connect=direct") + c.Assert(err, IsNil) + defer session.Close() + session.SetMode(mgo.Monotonic, true) + for i := 300; i >= 0; i-- { + n, err := session.DB("mydb").C("mycoll").Find(nil).Count() + c.Assert(err, IsNil) + if n == 1 { + break + } + if i == 0 { + c.Fatalf("Inserted data never reached " + addr) + } + time.Sleep(100 * time.Millisecond) + } + } + + // Collect op counters for everyone. + q21a := s.countQueries(c, "localhost:40021") + q22a := s.countQueries(c, "localhost:40022") + q23a := s.countQueries(c, "localhost:40023") + // Do a SlaveOk query through MongoS mongos, err := mgo.Dial("localhost:40202") @@ -1294,31 +1314,29 @@ func (s *S) TestMonotonicSlaveOkFlagWithMongos(c *C) { mongos.SetMode(mgo.Monotonic, true) coll := mongos.DB("mydb").C("mycoll") - result := &struct{}{} + var result struct{ N int } for i := 0; i != 5; i++ { - err := coll.Find(nil).One(result) - c.Assert(err, Equals, mgo.ErrNotFound) + err := coll.Find(nil).One(&result) + c.Assert(err, IsNil) + c.Assert(result.N, Equals, 1) } // Collect op counters for everyone again. - opc21b, err := getOpCounters("localhost:40021") - c.Assert(err, IsNil) - opc22b, err := getOpCounters("localhost:40022") - c.Assert(err, IsNil) - opc23b, err := getOpCounters("localhost:40023") - c.Assert(err, IsNil) + q21b := s.countQueries(c, "localhost:40021") + q22b := s.countQueries(c, "localhost:40022") + q23b := s.countQueries(c, "localhost:40023") var masterDelta, slaveDelta int switch hostPort(master) { case "40021": - masterDelta = opc21b.Query - opc21a.Query - slaveDelta = (opc22b.Query - opc22a.Query) + (opc23b.Query - opc23a.Query) + masterDelta = q21b - q21a + slaveDelta = (q22b - q22a) + (q23b - q23a) case "40022": - masterDelta = opc22b.Query - opc22a.Query - slaveDelta = (opc21b.Query - opc21a.Query) + (opc23b.Query - opc23a.Query) + masterDelta = q22b - q22a + slaveDelta = (q21b - q21a) + (q23b - q23a) case "40023": - masterDelta = opc23b.Query - opc23a.Query - slaveDelta = (opc21b.Query - opc21a.Query) + (opc22b.Query - opc22a.Query) + masterDelta = q23b - q23a + slaveDelta = (q21b - q21a) + (q22b - q22a) default: c.Fatal("Uh?") } @@ -1857,12 +1875,9 @@ func (s *S) TestSelectServersWithMongos(c *C) { } // Collect op counters for everyone. - opc21a, err := getOpCounters("localhost:40021") - c.Assert(err, IsNil) - opc22a, err := getOpCounters("localhost:40022") - c.Assert(err, IsNil) - opc23a, err := getOpCounters("localhost:40023") - c.Assert(err, IsNil) + q21a := s.countQueries(c, "localhost:40021") + q22a := s.countQueries(c, "localhost:40022") + q23a := s.countQueries(c, "localhost:40023") // Do a SlaveOk query through MongoS mongos, err := mgo.Dial("localhost:40202") @@ -1889,26 +1904,23 @@ func (s *S) TestSelectServersWithMongos(c *C) { } // Collect op counters for everyone again. - opc21b, err := getOpCounters("localhost:40021") - c.Assert(err, IsNil) - opc22b, err := getOpCounters("localhost:40022") - c.Assert(err, IsNil) - opc23b, err := getOpCounters("localhost:40023") - c.Assert(err, IsNil) + q21b := s.countQueries(c, "localhost:40021") + q22b := s.countQueries(c, "localhost:40022") + q23b := s.countQueries(c, "localhost:40023") switch hostPort(master) { case "40021": - c.Check(opc21b.Query-opc21a.Query, Equals, 0) - c.Check(opc22b.Query-opc22a.Query, Equals, 5) - c.Check(opc23b.Query-opc23a.Query, Equals, 7) + c.Check(q21b-q21a, Equals, 0) + c.Check(q22b-q22a, Equals, 5) + c.Check(q23b-q23a, Equals, 7) case "40022": - c.Check(opc21b.Query-opc21a.Query, Equals, 5) - c.Check(opc22b.Query-opc22a.Query, Equals, 0) - c.Check(opc23b.Query-opc23a.Query, Equals, 7) + c.Check(q21b-q21a, Equals, 5) + c.Check(q22b-q22a, Equals, 0) + c.Check(q23b-q23a, Equals, 7) case "40023": - c.Check(opc21b.Query-opc21a.Query, Equals, 5) - c.Check(opc22b.Query-opc22a.Query, Equals, 7) - c.Check(opc23b.Query-opc23a.Query, Equals, 0) + c.Check(q21b-q21a, Equals, 5) + c.Check(q22b-q22a, Equals, 7) + c.Check(q23b-q23a, Equals, 0) default: c.Fatal("Uh?") } From a1dc310eb00c167bde8f846936e7fd97bc8fe419 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 12 Nov 2015 14:59:21 -0200 Subject: [PATCH 207/305] Several more fixes for tests in 3.2. --- cluster_test.go | 32 ++++++++++++++++++-------------- session_test.go | 47 ++++++++++++++--------------------------------- 2 files changed, 32 insertions(+), 47 deletions(-) diff --git a/cluster_test.go b/cluster_test.go index a77a6df64..926b7c18a 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -877,9 +877,9 @@ func (s *S) TestPreserveSocketCountOnSync(c *C) { defer session.Close() stats := mgo.GetStats() - for stats.MasterConns+stats.SlaveConns != 3 { + for stats.SocketsAlive != 3 { + c.Logf("Waiting for all connections to be established (sockets alive currently %d)...", stats.SocketsAlive) stats = mgo.GetStats() - c.Log("Waiting for all connections to be established...") time.Sleep(5e8) } @@ -1240,7 +1240,8 @@ func (s *S) TestFailFast(c *C) { c.Assert(started.After(time.Now().Add(-time.Second)), Equals, true) } -func (s *S) countQueries(c *C, server string) int { +func (s *S) countQueries(c *C, server string) (n int) { + defer func() { c.Logf("Queries for %q: %d", server, n) }() session, err := mgo.Dial(server + "?connect=direct") c.Assert(err, IsNil) defer session.Close() @@ -1277,8 +1278,16 @@ func (s *S) TestMonotonicSlaveOkFlagWithMongos(c *C) { master := ssresult.Host c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) - // Insert some data as otherwise 3.2+ won't run the query at all. - err = session.DB("mydb").C("mycoll").Insert(bson.M{"n": 1}) + // Ensure mongos is aware about the current topology. + s.Stop(":40201") + s.StartAll() + + mongos, err := mgo.Dial("localhost:40202") + c.Assert(err, IsNil) + defer mongos.Close() + + // Insert some data as otherwise 3.2+ doesn't seem to run the query at all. + err = mongos.DB("mydb").C("mycoll").Insert(bson.M{"n": 1}) c.Assert(err, IsNil) // Wait until all servers see the data. @@ -1307,16 +1316,12 @@ func (s *S) TestMonotonicSlaveOkFlagWithMongos(c *C) { // Do a SlaveOk query through MongoS - mongos, err := mgo.Dial("localhost:40202") - c.Assert(err, IsNil) - defer mongos.Close() - mongos.SetMode(mgo.Monotonic, true) coll := mongos.DB("mydb").C("mycoll") var result struct{ N int } for i := 0; i != 5; i++ { - err := coll.Find(nil).One(&result) + err = coll.Find(nil).One(&result) c.Assert(err, IsNil) c.Assert(result.N, Equals, 1) } @@ -1473,12 +1478,11 @@ func (s *S) TestPoolLimitMany(c *C) { defer session.Close() stats := mgo.GetStats() - for stats.MasterConns+stats.SlaveConns != 3 { + for stats.SocketsAlive != 3 { + c.Logf("Waiting for all connections to be established (sockets alive currently %d)...", stats.SocketsAlive) stats = mgo.GetStats() - c.Log("Waiting for all connections to be established...") - time.Sleep(500 * time.Millisecond) + time.Sleep(5e8) } - c.Assert(stats.SocketsAlive, Equals, 3) const poolLimit = 64 session.SetPoolLimit(poolLimit) diff --git a/session_test.go b/session_test.go index c57bbb487..6d8bfbe0b 100644 --- a/session_test.go +++ b/session_test.go @@ -1664,7 +1664,7 @@ func (s *S) TestFindTailTimeoutWithSleep(c *C) { mgo.ResetStats() - timeout := 3 * time.Second + timeout := 3500 * time.Millisecond query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) iter := query.Tail(timeout) @@ -1686,39 +1686,34 @@ func (s *S) TestFindTailTimeoutWithSleep(c *C) { mgo.ResetStats() // The following call to Next will block. + done := make(chan bool) + defer func() { <-done }() go func() { // The internal AwaitData timing of MongoDB is around 2 seconds, // so this should force mgo to sleep at least once by itself to // respect the requested timeout. - time.Sleep(timeout + 5e8*time.Nanosecond) + c.Logf("[GOROUTINE] Starting and sleeping...") + time.Sleep(timeout - 800*time.Millisecond) + c.Logf("[GOROUTINE] Woke up...") session := session.New() - defer session.Close() - coll := session.DB("mydb").C("mycoll") - coll.Insert(M{"n": 47}) + c.Logf("[GOROUTINE] Session created and will insert...") + err := coll.Insert(M{"n": 47}) + c.Logf("[GOROUTINE] Insert attempted, err=%v...", err) + session.Close() + c.Logf("[GOROUTINE] Session closed.") + c.Check(err, IsNil) + done <- true }() c.Log("Will wait for Next with N=47...") ok := iter.Next(&result) c.Assert(ok, Equals, true) + c.Assert(iter.Err(), IsNil) c.Assert(iter.Timeout(), Equals, false) c.Assert(result.N, Equals, 47) c.Log("Got Next with N=47!") - // The following may break because it depends a bit on the internal - // timing used by MongoDB's AwaitData logic. If it does, the problem - // will be observed as more GET_MORE_OPs than predicted: - // 1*QUERY for nonce + 1*GET_MORE_OP on Next + 1*GET_MORE_OP on Next after sleep + - // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 - stats := mgo.GetStats() - if s.versionAtLeast(2, 6) { - c.Assert(stats.SentOps, Equals, 4) - } else { - c.Assert(stats.SentOps, Equals, 5) - } - c.Assert(stats.ReceivedOps, Equals, 4) // REPLY_OPs for 1*QUERY_OP for nonce + 2*GET_MORE_OPs + 1*QUERY_OP - c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response - c.Log("Will wait for a result which will never come...") started := time.Now() @@ -1898,20 +1893,6 @@ func (s *S) TestFindTailNoTimeout(c *C) { c.Assert(result.N, Equals, 47) c.Log("Got Next with N=47!") - // The following may break because it depends a bit on the internal - // timing used by MongoDB's AwaitData logic. If it does, the problem - // will be observed as more GET_MORE_OPs than predicted: - // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + - // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 - stats := mgo.GetStats() - if s.versionAtLeast(2, 6) { - c.Assert(stats.SentOps, Equals, 3) - } else { - c.Assert(stats.SentOps, Equals, 4) - } - c.Assert(stats.ReceivedOps, Equals, 3) // REPLY_OPs for 1*QUERY_OP for nonce + 1*GET_MORE_OPs and 1*QUERY_OP - c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response - c.Log("Will wait for a result which will never come...") gotNext := make(chan bool) From 458582ca38afba0070050860e2c10c2313c5202e Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 16 Nov 2015 23:35:57 -0200 Subject: [PATCH 208/305] Add Session.SetBypassValidation. Fixes MGO-88. --- session.go | 63 +++++++++++++++++++++++++++++++++---------------- session_test.go | 47 ++++++++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+), 20 deletions(-) diff --git a/session.go b/session.go index bd6aae71c..9bb7170ad 100644 --- a/session.go +++ b/session.go @@ -75,21 +75,22 @@ const ( // multiple goroutines will cause them to share the same underlying socket. // See the documentation on Session.SetMode for more details. type Session struct { - m sync.RWMutex - cluster_ *mongoCluster - slaveSocket *mongoSocket - masterSocket *mongoSocket - slaveOk bool - consistency Mode - queryConfig query - safeOp *queryOp - syncTimeout time.Duration - sockTimeout time.Duration - defaultdb string - sourcedb string - dialCred *Credential - creds []Credential - poolLimit int + m sync.RWMutex + cluster_ *mongoCluster + slaveSocket *mongoSocket + masterSocket *mongoSocket + slaveOk bool + consistency Mode + queryConfig query + safeOp *queryOp + syncTimeout time.Duration + sockTimeout time.Duration + defaultdb string + sourcedb string + dialCred *Credential + creds []Credential + poolLimit int + bypassValidation bool } type Database struct { @@ -1678,6 +1679,24 @@ func (s *Session) SetPoolLimit(limit int) { s.m.Unlock() } +// SetBypassValidation sets whether the server should bypass the registered +// validation expressions executed when documents are inserted or modified, +// in the interest of preserving properties for documents in the collection +// being modfified. The default is to not bypass, and thus to perform the +// validation expressions registered for modified collections. +// +// Document validation was introuced in MongoDB 3.2. +// +// Relevant documentation: +// +// https://docs.mongodb.org/manual/release-notes/3.2/#bypass-validation +// +func (s *Session) SetBypassValidation(bypass bool) { + s.m.Lock() + s.bypassValidation = bypass + s.m.Unlock() +} + // SetBatch sets the default batch size used when fetching documents from the // database. It's possible to change this setting on a per-query basis as // well, using the Query.Batch method. @@ -1776,7 +1795,7 @@ func (s *Session) Safe() (safe *Safe) { // with journaling, this acts the same as the J option, blocking until write // operations have been committed to the journal. Cannot be used in // combination with J. -// +// // Since MongoDB 2.0.0, the safe.J option can also be used instead of FSync // to force the server to wait for a group commit in case journaling is // enabled. The option has no effect if the server has journaling disabled. @@ -4022,7 +4041,7 @@ type BuildInfo struct { VersionArray []int `bson:"versionArray"` // On MongoDB 2.0+; assembled from Version otherwise GitVersion string `bson:"gitVersion"` OpenSSLVersion string `bson:"OpenSSLVersion"` - SysInfo string `bson:"sysInfo"` // Deprecated and empty on MongoDB 3.2+. + SysInfo string `bson:"sysInfo"` // Deprecated and empty on MongoDB 3.2+. Bits int Debug bool MaxObjectSize int `bson:"maxBsonObjectSize"` @@ -4248,6 +4267,7 @@ func (c *Collection) writeOp(op interface{}, ordered bool) (lerr *LastError, err s.m.RLock() safeOp := s.safeOp + bypassValidation := s.bypassValidation s.m.RUnlock() if socket.ServerInfo().MaxWireVersion >= 2 { @@ -4262,7 +4282,7 @@ func (c *Collection) writeOp(op interface{}, ordered bool) (lerr *LastError, err l = len(all) } op.documents = all[i:l] - lerr, err := c.writeOpCommand(socket, safeOp, op, ordered) + lerr, err := c.writeOpCommand(socket, safeOp, op, ordered, bypassValidation) if err != nil { errors = append(errors, lerr.errors...) if op.flags&1 == 0 { @@ -4275,7 +4295,7 @@ func (c *Collection) writeOp(op interface{}, ordered bool) (lerr *LastError, err } return &LastError{errors: errors}, errors[0] } - return c.writeOpCommand(socket, safeOp, op, ordered) + return c.writeOpCommand(socket, safeOp, op, ordered, bypassValidation) } else if updateOps, ok := op.(bulkUpdateOp); ok { var errors []error for _, updateOp := range updateOps { @@ -4335,7 +4355,7 @@ func (c *Collection) writeOpQuery(socket *mongoSocket, safeOp *queryOp, op inter return result, nil } -func (c *Collection) writeOpCommand(socket *mongoSocket, safeOp *queryOp, op interface{}, ordered bool) (lerr *LastError, err error) { +func (c *Collection) writeOpCommand(socket *mongoSocket, safeOp *queryOp, op interface{}, ordered, bypassValidation bool) (lerr *LastError, err error) { var writeConcern interface{} if safeOp == nil { writeConcern = bson.D{{"w", 0}} @@ -4382,6 +4402,9 @@ func (c *Collection) writeOpCommand(socket *mongoSocket, safeOp *queryOp, op int //{"ordered", }, } } + if bypassValidation { + cmd = append(cmd, bson.DocElem{"bypassDocumentValidation", true}) + } var result writeCmdResult err = c.Database.run(socket, cmd, &result) diff --git a/session_test.go b/session_test.go index 6d8bfbe0b..dd083f6ff 100644 --- a/session_test.go +++ b/session_test.go @@ -3840,6 +3840,53 @@ func (s *S) TestNewIterNoServerPresetErr(c *C) { c.Assert(iter.Err(), ErrorMatches, "my error") } +func (s *S) TestBypassValidation(c *C) { + if !s.versionAtLeast(3, 2) { + c.Skip("validation supported on 3.2+") + } + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"n": 1}) + c.Assert(err, IsNil) + + err = coll.Database.Run(bson.D{ + {"collMod", "mycoll"}, + {"validator", M{"s": M{"$type": "string"}}}, + }, nil) + c.Assert(err, IsNil) + + err = coll.Insert(M{"n": 2}) + c.Assert(err, ErrorMatches, "Document failed validation") + + err = coll.Update(M{"n": 1}, M{"n": 10}) + c.Assert(err, ErrorMatches, "Document failed validation") + + session.SetBypassValidation(true) + + err = coll.Insert(M{"n": 3}) + c.Assert(err, IsNil) + + err = coll.Update(M{"n": 3}, M{"n": 4}) + c.Assert(err, IsNil) + + // Ensure this still works. Shouldn't be affected. + err = coll.Remove(M{"n": 1}) + c.Assert(err, IsNil) + + var result struct{ N int } + var ns []int + iter := coll.Find(nil).Iter() + for iter.Next(&result) { + ns = append(ns, result.N) + } + c.Assert(iter.Err(), IsNil) + sort.Ints(ns) + c.Assert(ns, DeepEquals, []int{4}) +} + // -------------------------------------------------------------------------- // Some benchmarks that require a running database. From baa4323e7bfcb038182d949288065cb9e45e23e1 Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 25 Nov 2015 11:36:56 -0800 Subject: [PATCH 209/305] Clarify that only exported fields are deserialized Marshal already contains this note; adding it to Unmarshal too for clarity. --- bson/bson.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bson/bson.go b/bson/bson.go index f1f9ab747..0152992f5 100644 --- a/bson/bson.go +++ b/bson/bson.go @@ -475,6 +475,7 @@ func Marshal(in interface{}) (out []byte, err error) { // Unmarshal deserializes data from in into the out value. The out value // must be a map, a pointer to a struct, or a pointer to a bson.D value. +// In the case of struct values, only exported fields will be deserialized. // The lowercased field name is used as the key for each exported field, // but this behavior may be changed using the respective field tag. // The tag may also contain flags to tweak the marshalling behavior for From 09d4460c6bcf00f8be3e4f32a984b1fa858145e7 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 3 Dec 2015 19:31:06 -0200 Subject: [PATCH 210/305] Add support for bulk removes. --- bulk.go | 65 +++++++++++++++++++++++++++++++++++++++------------- bulk_test.go | 49 +++++++++++++++++++++++++++++++++++++++ session.go | 55 +++++++++++++++++++++++++++++++++----------- socket.go | 15 ++++++------ 4 files changed, 148 insertions(+), 36 deletions(-) diff --git a/bulk.go b/bulk.go index 2e489d727..59a9f76af 100644 --- a/bulk.go +++ b/bulk.go @@ -24,6 +24,7 @@ const ( bulkInsert bulkOp = iota + 1 bulkUpdate bulkUpdateAll + bulkRemove ) type bulkAction struct { @@ -32,6 +33,7 @@ type bulkAction struct { } type bulkUpdateOp []interface{} +type bulkDeleteOp []interface{} // BulkError holds an error returned from running a Bulk operation. // @@ -82,9 +84,6 @@ func (e *bulkError) Error() string { } // Bulk returns a value to prepare the execution of a bulk operation. -// -// WARNING: This API is still experimental. -// func (c *Collection) Bulk() *Bulk { return &Bulk{c: c, ordered: true} } @@ -119,6 +118,40 @@ func (b *Bulk) Insert(docs ...interface{}) { action.docs = append(action.docs, docs...) } +// Remove queues up the provided selectors for removing matching documents. +// Each selector will remove only a single matching document. +func (b *Bulk) Remove(selectors ...interface{}) { + action := b.action(bulkRemove) + for _, selector := range selectors { + if selector == nil { + selector = bson.D{} + } + action.docs = append(action.docs, &deleteOp{ + Collection: b.c.FullName, + Selector: selector, + Flags: 1, + Limit: 1, + }) + } +} + +// RemoveAll queues up the provided selectors for removing all matching documents. +// Each selector will remove all matching documents. +func (b *Bulk) RemoveAll(selectors ...interface{}) { + action := b.action(bulkRemove) + for _, selector := range selectors { + if selector == nil { + selector = bson.D{} + } + action.docs = append(action.docs, &deleteOp{ + Collection: b.c.FullName, + Selector: selector, + Flags: 0, + Limit: 0, + }) + } +} + // Update queues up the provided pairs of updating instructions. // The first element of each pair selects which documents must be // updated, and the second element defines how to update it. @@ -207,6 +240,8 @@ func (b *Bulk) Run() (*BulkResult, error) { ok = b.runInsert(action, &result, &berr) case bulkUpdate: ok = b.runUpdate(action, &result, &berr) + case bulkRemove: + ok = b.runRemove(action, &result, &berr) default: panic("unknown bulk operation") } @@ -233,19 +268,17 @@ func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *bulkError } func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *bulkError) bool { - ok := true - for _, op := range action.docs { - lerr, err := b.c.writeOp(op, b.ordered) - if !b.checkSuccess(berr, lerr, err) { - ok = false - if b.ordered { - break - } - } - result.Matched += lerr.N - result.Modified += lerr.modified - } - return ok + lerr, err := b.c.writeOp(bulkUpdateOp(action.docs), b.ordered) + result.Matched += lerr.N + result.Modified += lerr.modified + return b.checkSuccess(berr, lerr, err) +} + +func (b *Bulk) runRemove(action *bulkAction, result *BulkResult, berr *bulkError) bool { + lerr, err := b.c.writeOp(bulkDeleteOp(action.docs), b.ordered) + result.Matched += lerr.N + result.Modified += lerr.modified + return b.checkSuccess(berr, lerr, err) } func (b *Bulk) checkSuccess(berr *bulkError, lerr *LastError, err error) bool { diff --git a/bulk_test.go b/bulk_test.go index b39f50421..df5e9116f 100644 --- a/bulk_test.go +++ b/bulk_test.go @@ -342,3 +342,52 @@ func (s *S) TestBulkUpsert(c *C) { c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{1}, {20}, {30}, {40}}) } + +func (s *S) TestBulkRemove(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}, M{"n": 4}, M{"n": 4}) + c.Assert(err, IsNil) + + bulk := coll.Bulk() + bulk.Remove(M{"n": 1}) + bulk.Remove(M{"n": 2}, M{"n": 4}) + r, err := bulk.Run() + c.Assert(err, IsNil) + c.Assert(r.Matched, Equals, 3) + + type doc struct{ N int } + var res []doc + err = coll.Find(nil).Sort("n").All(&res) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []doc{{3}, {4}}) +} + +func (s *S) TestBulkRemoveAll(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}, M{"n": 4}, M{"n": 4}) + c.Assert(err, IsNil) + + bulk := coll.Bulk() + bulk.RemoveAll(M{"n": 1}) + bulk.RemoveAll(M{"n": 2}, M{"n": 4}) + r, err := bulk.Run() + c.Assert(err, IsNil) + c.Assert(r.Matched, Equals, 4) + + type doc struct{ N int } + var res []doc + err = coll.Find(nil).Sort("n").All(&res) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []doc{{3}}) +} + diff --git a/session.go b/session.go index 9bb7170ad..17a6f453b 100644 --- a/session.go +++ b/session.go @@ -2499,7 +2499,10 @@ func (c *Collection) UpsertId(id interface{}, update interface{}) (info *ChangeI // http://www.mongodb.org/display/DOCS/Removing // func (c *Collection) Remove(selector interface{}) error { - lerr, err := c.writeOp(&deleteOp{c.FullName, selector, 1}, true) + if selector == nil { + selector = bson.D{} + } + lerr, err := c.writeOp(&deleteOp{c.FullName, selector, 1, 1}, true) if err == nil && lerr != nil && lerr.N == 0 { return ErrNotFound } @@ -2525,7 +2528,10 @@ func (c *Collection) RemoveId(id interface{}) error { // http://www.mongodb.org/display/DOCS/Removing // func (c *Collection) RemoveAll(selector interface{}) (info *ChangeInfo, err error) { - lerr, err := c.writeOp(&deleteOp{c.FullName, selector, 0}, true) + if selector == nil { + selector = bson.D{} + } + lerr, err := c.writeOp(&deleteOp{c.FullName, selector, 0, 0}, true) if err == nil && lerr != nil { info = &ChangeInfo{Removed: lerr.N} } @@ -4297,20 +4303,39 @@ func (c *Collection) writeOp(op interface{}, ordered bool) (lerr *LastError, err } return c.writeOpCommand(socket, safeOp, op, ordered, bypassValidation) } else if updateOps, ok := op.(bulkUpdateOp); ok { - var errors []error + var lerr LastError for _, updateOp := range updateOps { - lerr, err := c.writeOpQuery(socket, safeOp, updateOp, ordered) + oplerr, err := c.writeOpQuery(socket, safeOp, updateOp, ordered) if err != nil { - errors = append(errors, lerr.errors...) + lerr.N += oplerr.N + lerr.modified += oplerr.modified + lerr.errors = append(lerr.errors, oplerr.errors...) if ordered { - return &LastError{errors: errors}, err + break } } } - if len(errors) == 0 { + if len(lerr.errors) == 0 { return nil, nil } - return &LastError{errors: errors}, errors[0] + return &lerr, lerr.errors[0] + } else if deleteOps, ok := op.(bulkDeleteOp); ok { + var lerr LastError + for _, deleteOp := range deleteOps { + oplerr, err := c.writeOpQuery(socket, safeOp, deleteOp, ordered) + if err != nil { + lerr.N += oplerr.N + lerr.modified += oplerr.modified + lerr.errors = append(lerr.errors, oplerr.errors...) + if ordered { + break + } + } + } + if len(lerr.errors) == 0 { + return nil, nil + } + return &lerr, lerr.errors[0] } return c.writeOpQuery(socket, safeOp, op, ordered) } @@ -4391,15 +4416,19 @@ func (c *Collection) writeOpCommand(socket *mongoSocket, safeOp *queryOp, op int } case *deleteOp: // http://docs.mongodb.org/manual/reference/command/delete - selector := op.selector - if selector == nil { - selector = bson.D{} + cmd = bson.D{ + {"delete", c.Name}, + {"deletes", []interface{}{op}}, + {"writeConcern", writeConcern}, + {"ordered", ordered}, } + case bulkDeleteOp: + // http://docs.mongodb.org/manual/reference/command/delete cmd = bson.D{ {"delete", c.Name}, - {"deletes", []bson.D{{{"q", selector}, {"limit", op.flags & 1}}}}, + {"deletes", op}, {"writeConcern", writeConcern}, - //{"ordered", }, + {"ordered", ordered}, } } if bypassValidation { diff --git a/socket.go b/socket.go index aefc32d0b..f8862a396 100644 --- a/socket.go +++ b/socket.go @@ -162,9 +162,10 @@ type updateOp struct { } type deleteOp struct { - collection string // "database.collection" - selector interface{} - flags uint32 + Collection string `bson:"-"` // "database.collection" + Selector interface{} `bson:"q"` + Flags uint32 `bson:"-"` + Limit int `bson:"limit"` } type killCursorsOp struct { @@ -449,10 +450,10 @@ func (socket *mongoSocket) Query(ops ...interface{}) (err error) { case *deleteOp: buf = addHeader(buf, 2006) buf = addInt32(buf, 0) // Reserved - buf = addCString(buf, op.collection) - buf = addInt32(buf, int32(op.flags)) - debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.selector) - buf, err = addBSON(buf, op.selector) + buf = addCString(buf, op.Collection) + buf = addInt32(buf, int32(op.Flags)) + debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.Selector) + buf, err = addBSON(buf, op.Selector) if err != nil { return err } From 1a5a4d00e14897e12bcd0726d5c64e44dce4c662 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 7 Dec 2015 00:13:24 -0200 Subject: [PATCH 211/305] Tune harness so it works with 2.6.3 again. --- session_test.go | 5 +++-- testdb/supervisord.conf | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/session_test.go b/session_test.go index dd083f6ff..cc0cda91a 100644 --- a/session_test.go +++ b/session_test.go @@ -1664,7 +1664,7 @@ func (s *S) TestFindTailTimeoutWithSleep(c *C) { mgo.ResetStats() - timeout := 3500 * time.Millisecond + timeout := 5 * time.Second query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2) iter := query.Tail(timeout) @@ -1693,7 +1693,7 @@ func (s *S) TestFindTailTimeoutWithSleep(c *C) { // so this should force mgo to sleep at least once by itself to // respect the requested timeout. c.Logf("[GOROUTINE] Starting and sleeping...") - time.Sleep(timeout - 800*time.Millisecond) + time.Sleep(timeout - 2*time.Second) c.Logf("[GOROUTINE] Woke up...") session := session.New() c.Logf("[GOROUTINE] Session created and will insert...") @@ -1707,6 +1707,7 @@ func (s *S) TestFindTailTimeoutWithSleep(c *C) { c.Log("Will wait for Next with N=47...") ok := iter.Next(&result) + c.Log("Next unblocked...") c.Assert(ok, Equals, true) c.Assert(iter.Err(), IsNil) diff --git a/testdb/supervisord.conf b/testdb/supervisord.conf index ebb777e54..724eaa79c 100644 --- a/testdb/supervisord.conf +++ b/testdb/supervisord.conf @@ -47,13 +47,13 @@ command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssiz command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs4 --dbpath %(here)s/rs4a --bind_ip=127.0.0.1 --port 40041 [program:cfg1] -command = mongod --nohttpinterface --noprealloc --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg1 --bind_ip=127.0.0.1 --port 40101 +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg1 --bind_ip=127.0.0.1 --port 40101 [program:cfg2] -command = mongod --nohttpinterface --noprealloc --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg2 --bind_ip=127.0.0.1 --port 40102 +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg2 --bind_ip=127.0.0.1 --port 40102 [program:cfg3] -command = mongod --nohttpinterface --noprealloc --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg3 --bind_ip=127.0.0.1 --port 40103 --auth --keyFile=%(here)s/keyfile +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg3 --bind_ip=127.0.0.1 --port 40103 --auth --keyFile=%(here)s/keyfile [program:s1] command = mongos --configdb 127.0.0.1:40101 --bind_ip=127.0.0.1 --port 40201 --chunkSize 1 From dac197412c189d226929bed1bd7b13857f56192b Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 15 Dec 2015 14:54:48 -0200 Subject: [PATCH 212/305] Do not fallback to Monotonic mode improperly. Problem reported by Sundar. --- cluster_test.go | 44 ++++++++++++++++++++++++++++++++++++++++++++ session.go | 28 +++++++++++++++------------- 2 files changed, 59 insertions(+), 13 deletions(-) diff --git a/cluster_test.go b/cluster_test.go index 926b7c18a..d509afc11 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -1262,6 +1262,22 @@ func (s *S) countQueries(c *C, server string) (n int) { return result.OpCounters.Query } +func (s *S) countCommands(c *C, server, commandName string) (n int) { + defer func() { c.Logf("Queries for %q: %d", server, n) }() + session, err := mgo.Dial(server + "?connect=direct") + c.Assert(err, IsNil) + defer session.Close() + session.SetMode(mgo.Monotonic, true) + var result struct { + Metrics struct { + Commands map[string]struct{ Total int } + } + } + err = session.Run("serverStatus", &result) + c.Assert(err, IsNil) + return result.Metrics.Commands[commandName].Total +} + func (s *S) TestMonotonicSlaveOkFlagWithMongos(c *C) { session, err := mgo.Dial("localhost:40021") c.Assert(err, IsNil) @@ -1929,3 +1945,31 @@ func (s *S) TestSelectServersWithMongos(c *C) { c.Fatal("Uh?") } } + +func (s *S) TestDoNotFallbackToMonotonic(c *C) { + // There was a bug at some point that some functions were + // falling back to Monotonic mode. This test ensures all listIndexes + // commands go to the primary, as should happen since the session is + // in Strong mode. + + session, err := mgo.Dial("localhost:40012") + c.Assert(err, IsNil) + defer session.Close() + + for i := 0; i < 15; i++ { + q11a := s.countCommands(c, "localhost:40011", "listIndexes") + q12a := s.countCommands(c, "localhost:40012", "listIndexes") + q13a := s.countCommands(c, "localhost:40013", "listIndexes") + + _, err := session.DB("local").C("system.indexes").Indexes() + c.Assert(err, IsNil) + + q11b := s.countCommands(c, "localhost:40011", "listIndexes") + q12b := s.countCommands(c, "localhost:40012", "listIndexes") + q13b := s.countCommands(c, "localhost:40013", "listIndexes") + + c.Assert(q11b, Equals, q11a+1) + c.Assert(q12b, Equals, q12a) + c.Assert(q13b, Equals, q13a) + } +} diff --git a/session.go b/session.go index 17a6f453b..af3a88c65 100644 --- a/session.go +++ b/session.go @@ -1354,6 +1354,17 @@ func (c *Collection) DropIndexName(name string) error { return nil } +// nonEventual returns a clone of session and ensures it is not Eventual. +// This guarantees that the server that is used for queries may be reused +// afterwards when a cursor is received. +func (session *Session) nonEventual() *Session { + cloned := session.Clone() + if cloned.consistency == Eventual { + cloned.SetMode(Monotonic, false) + } + return cloned +} + // Indexes returns a list of all indexes for the collection. // // For example, this snippet would drop all available indexes: @@ -1371,12 +1382,7 @@ func (c *Collection) DropIndexName(name string) error { // // See the EnsureIndex method for more details on indexes. func (c *Collection) Indexes() (indexes []Index, err error) { - // Clone session and set it to Monotonic mode so that the server - // used for the query may be safely obtained afterwards, if - // necessary for iteration when a cursor is received. - session := c.Database.Session - cloned := session.Clone() - cloned.SetMode(Monotonic, false) + cloned := c.Database.Session.nonEventual() defer cloned.Close() batchSize := int(cloned.queryConfig.op.limit) @@ -2058,8 +2064,7 @@ func (c *Collection) Repair() *Iter { // used for the query may be safely obtained afterwards, if // necessary for iteration when a cursor is received. session := c.Database.Session - cloned := session.Clone() - cloned.SetMode(Monotonic, false) + cloned := session.nonEventual() defer cloned.Close() batchSize := int(cloned.queryConfig.op.limit) @@ -2143,8 +2148,7 @@ func (p *Pipe) Iter() *Iter { // Clone session and set it to Monotonic mode so that the server // used for the query may be safely obtained afterwards, if // necessary for iteration when a cursor is received. - cloned := p.session.Clone() - cloned.SetMode(Monotonic, false) + cloned := p.session.nonEventual() defer cloned.Close() c := p.collection.With(cloned) @@ -3103,9 +3107,7 @@ func (db *Database) CollectionNames() (names []string, err error) { // Clone session and set it to Monotonic mode so that the server // used for the query may be safely obtained afterwards, if // necessary for iteration when a cursor is received. - session := db.Session - cloned := session.Clone() - cloned.SetMode(Monotonic, false) + cloned := db.Session.nonEventual() defer cloned.Close() batchSize := int(cloned.queryConfig.op.limit) From d4e17e0238172164932c6f5c7a065a670639e793 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 16 Dec 2015 11:27:50 -0200 Subject: [PATCH 213/305] Work in progress on find and getMore commands. --- session.go | 229 ++++++++++++++++++++++++++++++++++++++---------- session_test.go | 36 ++++++-- 2 files changed, 214 insertions(+), 51 deletions(-) diff --git a/session.go b/session.go index af3a88c65..29da178d1 100644 --- a/session.go +++ b/session.go @@ -138,6 +138,7 @@ type Iter struct { docsBeforeMore int timeout time.Duration timedout bool + findCmd bool } var ( @@ -1390,12 +1391,7 @@ func (c *Collection) Indexes() (indexes []Index, err error) { // Try with a command. var result struct { Indexes []bson.Raw - - Cursor struct { - FirstBatch []bson.Raw "firstBatch" - NS string - Id int64 - } + Cursor cursorData } var iter *Iter err = c.Database.With(cloned).Run(bson.D{{"listIndexes", c.Name}, {"cursor", bson.D{{"batchSize", batchSize}}}}, &result) @@ -1687,9 +1683,9 @@ func (s *Session) SetPoolLimit(limit int) { // SetBypassValidation sets whether the server should bypass the registered // validation expressions executed when documents are inserted or modified, -// in the interest of preserving properties for documents in the collection -// being modfified. The default is to not bypass, and thus to perform the -// validation expressions registered for modified collections. +// in the interest of preserving invariants in the collection being modified. +// The default is to not bypass, and thus to perform the validation +// expressions registered for modified collections. // // Document validation was introuced in MongoDB 3.2. // @@ -2069,12 +2065,7 @@ func (c *Collection) Repair() *Iter { batchSize := int(cloned.queryConfig.op.limit) - var result struct { - Cursor struct { - FirstBatch []bson.Raw "firstBatch" - Id int64 - } - } + var result struct{ Cursor cursorData } cmd := repairCmd{ RepairCursor: c.Name, @@ -2153,14 +2144,8 @@ func (p *Pipe) Iter() *Iter { c := p.collection.With(cloned) var result struct { - // 2.4, no cursors. - Result []bson.Raw - - // 2.6+, with cursors. - Cursor struct { - FirstBatch []bson.Raw "firstBatch" - Id int64 - } + Result []bson.Raw // 2.4, no cursors. + Cursor cursorData // 2.6+, with cursors. } cmd := pipeCmd{ @@ -2980,9 +2965,12 @@ func (q *Query) One(result interface{}) (err error) { } defer socket.Release() - session.prepareQuery(&op) op.limit = -1 + session.prepareQuery(&op) + + prepareFindOp(socket, &op, 1) + data, err := socket.SimpleQuery(&op) if err != nil { return err @@ -2991,7 +2979,7 @@ func (q *Query) One(result interface{}) (err error) { return ErrNotFound } if result != nil { - err = bson.Unmarshal(data, result) + err = unmarshalFindOpOne(socket, data, result) if err == nil { debugf("Query %p document unmarshaled: %#v", q, result) } else { @@ -3002,6 +2990,105 @@ func (q *Query) One(result interface{}) (err error) { return checkQueryError(op.collection, data) } +// prepareFindOp translates op from being an old-style wire protocol query into +// a new-style find command if that's supported by the MongoDB server (3.2+). +// It returns whether the op was translated or not. +func prepareFindOp(socket *mongoSocket, op *queryOp, limit int32) bool { + if socket.ServerInfo().MaxWireVersion < 4 { + return false + } + + nameDot := strings.Index(op.collection, ".") + if nameDot < 0 { + panic("invalid query collection name: " + op.collection) + } + + find := findCmd{ + Collection: op.collection[nameDot+1:], + Filter: op.query, + Sort: op.options.OrderBy, + Limit: limit, + } + if op.limit < 0 { + find.BatchSize = -op.limit + find.SingleBatch = true + } else { + find.BatchSize = op.limit + } + + op.collection = op.collection[:nameDot] + ".$cmd" + op.query = &find + op.limit = -1 + + return true +} + +type cursorData struct { + FirstBatch []bson.Raw "firstBatch" + NextBatch []bson.Raw "nextBatch" + NS string + Id int64 +} + +func unmarshalFindOpOne(socket *mongoSocket, data []byte, result interface{}) error { + if socket.ServerInfo().MaxWireVersion < 4 { + return bson.Unmarshal(data, result) + } + var findResult struct{ Cursor cursorData } + if err := bson.Unmarshal(data, &findResult); err != nil { + return err + } + if len(findResult.Cursor.FirstBatch) == 0 { + return ErrNotFound + } + return findResult.Cursor.FirstBatch[0].Unmarshal(result) +} + +// findCmd holds the command used for performing queries on MongoDB 3.2+. +// +// Relevant documentation: +// +// https://docs.mongodb.org/master/reference/command/find/#dbcmd.find +// +type findCmd struct { + Collection string `bson:"find"` + Filter interface{} `bson:"filter,omitempty"` + Sort interface{} `bson:"sort,omitempty"` + Projection interface{} `bson:"projection,omitempty"` + Hint interface{} `bson:"hint,omitempty"` + Skip interface{} `bson:"skip,omitempty"` + Limit int32 `bson:"limit,omitempty"` + BatchSize interface{} `bson:"batchSize,omitempty"` + SingleBatch bool `bson:"singleBatch,omitempty"` + Comment string `bson:"comment,omitempty"` + MaxScan int `bson:"maxScan,omitempty"` + MaxTimeMS int64 `bson:"maxTimeMS,omitempty"` + ReadConcern interface{} `bson:"readConcern,omitempty"` + Max interface{} `bson:"max,omitempty"` + Min interface{} `bson:"min,omitempty"` + ReturnKey bool `bson:"returnKey,omitempty"` + ShowRecordId bool `bson:"showRecordId,omitempty"` + Snapshot bool `bson:"snapshot,omitempty"` + Tailable bool `bson:"tailable,omitempty"` + AwaitData bool `bson:"awaitData,omitempty"` + OplogReplay bool `bson:"oplogReplay,omitempty"` + NoCursorTimeout bool `bson:"noCursorTimeout,omitempty"` + AllowPartialResults bool `bson:"allowPartialResults,omitempty"` +} + +// getMoreCmd holds the command used for requesting more query results on MongoDB 3.2+. +// +// Relevant documentation: +// +// https://docs.mongodb.org/master/reference/command/getMore/#dbcmd.getMore +// +type getMoreCmd struct { + CursorId int64 `bson:"getMore"` + Collection string `bson:"collection"` + BatchSize interface{} `bson:"batchSize,omitempty"` + MaxTimeMS int64 `bson:"maxTimeMS,omitempty"` +} + // run duplicates the behavior of collection.Find(query).One(&result) // as performed by Database.Run, specializing the logic for running // database commands on a given socket. @@ -3115,12 +3202,7 @@ func (db *Database) CollectionNames() (names []string, err error) { // Try with a command. var result struct { Collections []bson.Raw - - Cursor struct { - FirstBatch []bson.Raw "firstBatch" - NS string - Id int64 - } + Cursor cursorData } err = db.With(cloned).Run(bson.D{{"listCollections", 1}, {"cursor", bson.D{{"batchSize", batchSize}}}}, &result) if err == nil { @@ -3212,23 +3294,29 @@ func (q *Query) Iter() *Iter { iter.op.replyFunc = iter.replyFunc() iter.docsToReceive++ + socket, err := session.acquireSocket(true) + if err != nil { + iter.err = err + return iter + } + defer socket.Release() + session.prepareQuery(&op) op.replyFunc = iter.op.replyFunc - socket, err := session.acquireSocket(true) + if prepareFindOp(socket, &op, limit) { + iter.findCmd = true + } + + iter.server = socket.Server() + err = socket.Query(&op) if err != nil { + // Must lock as the query is already out and it may call replyFunc. + iter.m.Lock() iter.err = err - } else { - iter.server = socket.Server() - err = socket.Query(&op) - if err != nil { - // Must lock as the query above may call replyFunc. - iter.m.Lock() - iter.err = err - iter.m.Unlock() - } - socket.Release() + iter.m.Unlock() } + return iter } @@ -3304,7 +3392,7 @@ func (q *Query) Tail(timeout time.Duration) *Iter { iter.server = socket.Server() err = socket.Query(&op) if err != nil { - // Must lock as the query above may call replyFunc. + // Must lock as the query is already out and it may call replyFunc. iter.m.Lock() iter.err = err iter.m.Unlock() @@ -3642,12 +3730,39 @@ func (iter *Iter) getMore() { iter.op.limit = limit } } - if err := socket.Query(&iter.op); err != nil { + var op interface{} + if iter.findCmd { + op = iter.getMoreCmd() + } else { + op = &iter.op + } + if err := socket.Query(op); err != nil { iter.docsToReceive-- iter.err = err } } +func (iter *Iter) getMoreCmd() *queryOp { + // TODO: Define the query statically in the Iter type, next to getMoreOp. + nameDot := strings.Index(iter.op.collection, ".") + if nameDot < 0 { + panic("invalid query collection name: " + iter.op.collection) + } + + getMore := getMoreCmd{ + CursorId: iter.op.cursorId, + Collection: iter.op.collection[nameDot+1:], + BatchSize: iter.op.limit, + } + + var op queryOp + op.collection = iter.op.collection[:nameDot] + ".$cmd" + op.query = &getMore + op.limit = -1 + op.replyFunc = iter.op.replyFunc + return &op +} + type countCmd struct { Count string Query interface{} @@ -4209,6 +4324,31 @@ func (iter *Iter) replyFunc() replyFunc { } else { iter.err = ErrNotFound } + } else if iter.findCmd { + debugf("Iter %p received reply document %d/%d (cursor=%d)", iter, docNum+1, int(op.replyDocs), op.cursorId) + var findReply struct{ Cursor cursorData } + if err := bson.Unmarshal(docData, &findReply); err != nil { + iter.err = err + } else if len(findReply.Cursor.FirstBatch) == 0 && len(findReply.Cursor.NextBatch) == 0 { + iter.err = ErrNotFound + } else { + batch := findReply.Cursor.FirstBatch + if len(batch) == 0 { + batch = findReply.Cursor.NextBatch + } + rdocs := len(batch) + for _, raw := range batch { + iter.docData.Push(raw.Data) + } + iter.docsToReceive = 0 + docsToProcess := iter.docData.Len() + if iter.limit == 0 || int32(docsToProcess) < iter.limit { + iter.docsBeforeMore = docsToProcess - int(iter.prefetch*float64(rdocs)) + } else { + iter.docsBeforeMore = -1 + } + iter.op.cursorId = findReply.Cursor.Id + } } else { rdocs := int(op.replyDocs) if docNum == 0 { @@ -4221,7 +4361,6 @@ func (iter *Iter) replyFunc() replyFunc { } iter.op.cursorId = op.cursorId } - // XXX Handle errors and flags. debugf("Iter %p received reply document %d/%d (cursor=%d)", iter, docNum+1, rdocs, op.cursorId) iter.docData.Push(docData) } diff --git a/session_test.go b/session_test.go index cc0cda91a..c9cfd413e 100644 --- a/session_test.go +++ b/session_test.go @@ -146,6 +146,11 @@ func (s *S) TestInsertFindOne(c *C) { c.Assert(err, IsNil) c.Assert(result.A, Equals, 1) c.Assert(result.B, Equals, 2) + + err = coll.Find(M{"a": 1}).Sort("-b").One(&result) + c.Assert(err, IsNil) + c.Assert(result.A, Equals, 1) + c.Assert(result.B, Equals, 3) } func (s *S) TestInsertFindOneNil(c *C) { @@ -1281,7 +1286,7 @@ func (s *S) TestFindIterAll(c *C) { result := struct{ N int }{} for i := 2; i < 7; i++ { ok := iter.Next(&result) - c.Assert(ok, Equals, true) + c.Assert(ok, Equals, true, Commentf("err=%v", err)) c.Assert(result.N, Equals, ns[i]) if i == 1 { stats := mgo.GetStats() @@ -1298,7 +1303,12 @@ func (s *S) TestFindIterAll(c *C) { stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP c.Assert(stats.ReceivedOps, Equals, 3) // and their REPLY_OPs. - c.Assert(stats.ReceivedDocs, Equals, 5) + if s.versionAtLeast(3, 2) { + // In 3.2+ responses come in batches inside the op reply docs. + c.Assert(stats.ReceivedDocs, Equals, 3) + } else { + c.Assert(stats.ReceivedDocs, Equals, 5) + } c.Assert(stats.SocketsInUse, Equals, 0) } @@ -1568,7 +1578,12 @@ func (s *S) TestFindIterLimitWithBatch(c *C) { c.Assert(result.N, Equals, ns[i]) if i == 3 { stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 2) + if s.versionAtLeast(3, 2) { + // In 3.2+ responses come in batches inside the op reply docs. + c.Assert(stats.ReceivedDocs, Equals, 1) + } else { + c.Assert(stats.ReceivedDocs, Equals, 2) + } } } @@ -1579,9 +1594,18 @@ func (s *S) TestFindIterLimitWithBatch(c *C) { session.Refresh() // Release socket. stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 1*GET_MORE_OP + 1*KILL_CURSORS_OP - c.Assert(stats.ReceivedOps, Equals, 2) // and its REPLY_OPs - c.Assert(stats.ReceivedDocs, Equals, 3) + if s.versionAtLeast(3, 2) { + // In 3.2+ limit works properly even with multiple batches.. + c.Assert(stats.SentOps, Equals, 2) // 1*QUERY_OP + 1*GET_MORE_OP + c.Assert(stats.ReceivedOps, Equals, 2) // and its REPLY_OPs + + // In 3.2+ responses come in batches inside the op reply docs. + c.Assert(stats.ReceivedDocs, Equals, 2) + } else { + c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 1*GET_MORE_OP + 1*KILL_CURSORS_OP + c.Assert(stats.ReceivedOps, Equals, 2) // and its REPLY_OPs + c.Assert(stats.ReceivedDocs, Equals, 3) + } c.Assert(stats.SocketsInUse, Equals, 0) } From 224c35c17d5892c7b09d2ac60775fe085cd48bf1 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 16 Dec 2015 14:26:47 -0200 Subject: [PATCH 214/305] Fix find command's batch size and skip. --- session.go | 6 +++++- session_test.go | 29 +++++++++++++++++++---------- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/session.go b/session.go index 29da178d1..b99a800a2 100644 --- a/session.go +++ b/session.go @@ -3007,6 +3007,7 @@ func prepareFindOp(socket *mongoSocket, op *queryOp, limit int32) bool { Collection: op.collection[nameDot+1:], Filter: op.query, Sort: op.options.OrderBy, + Skip: op.skip, Limit: limit, } if op.limit < 0 { @@ -3018,7 +3019,10 @@ func prepareFindOp(socket *mongoSocket, op *queryOp, limit int32) bool { op.collection = op.collection[:nameDot] + ".$cmd" op.query = &find + op.skip = 0 op.limit = -1 + op.options = queryWrapper{} + op.hasOptions = false return true } @@ -3058,7 +3062,7 @@ type findCmd struct { Hint interface{} `bson:"hint,omitempty"` Skip interface{} `bson:"skip,omitempty"` Limit int32 `bson:"limit,omitempty"` - BatchSize interface{} `bson:"batchSize,omitempty"` + BatchSize int32 `bson:"batchSize,omitempty"` SingleBatch bool `bson:"singleBatch,omitempty"` Comment string `bson:"comment,omitempty"` MaxScan int `bson:"maxScan,omitempty"` diff --git a/session_test.go b/session_test.go index c9cfd413e..112d13520 100644 --- a/session_test.go +++ b/session_test.go @@ -1320,19 +1320,20 @@ func (s *S) TestFindIterTwiceWithSameQuery(c *C) { coll := session.DB("mydb").C("mycoll") for i := 40; i != 47; i++ { - coll.Insert(M{"n": i}) + err := coll.Insert(M{"n": i}) + c.Assert(err, IsNil) } query := coll.Find(M{}).Sort("n") - result1 := query.Skip(1).Iter() - result2 := query.Skip(2).Iter() + iter1 := query.Skip(1).Iter() + iter2 := query.Skip(2).Iter() - result := struct{ N int }{} - ok := result2.Next(&result) + var result struct{ N int } + ok := iter2.Next(&result) c.Assert(ok, Equals, true) c.Assert(result.N, Equals, 42) - ok = result1.Next(&result) + ok = iter1.Next(&result) c.Assert(ok, Equals, true) c.Assert(result.N, Equals, 41) } @@ -1363,7 +1364,8 @@ func (s *S) TestFindIterLimit(c *C) { ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { - coll.Insert(M{"n": n}) + err := coll.Insert(M{"n": n}) + c.Assert(err, IsNil) } session.Refresh() // Release socket. @@ -1387,9 +1389,16 @@ func (s *S) TestFindIterLimit(c *C) { session.Refresh() // Release socket. stats := mgo.GetStats() - c.Assert(stats.SentOps, Equals, 2) // 1*QUERY_OP + 1*KILL_CURSORS_OP - c.Assert(stats.ReceivedOps, Equals, 1) // and its REPLY_OP - c.Assert(stats.ReceivedDocs, Equals, 3) + if s.versionAtLeast(3, 2) { + // Limit works properly in 3.2+, and results are batched in single doc. + c.Assert(stats.SentOps, Equals, 1) // 1*QUERY_OP + c.Assert(stats.ReceivedOps, Equals, 1) // and its REPLY_OP + c.Assert(stats.ReceivedDocs, Equals, 1) + } else { + c.Assert(stats.SentOps, Equals, 2) // 1*QUERY_OP + 1*KILL_CURSORS_OP + c.Assert(stats.ReceivedOps, Equals, 1) // and its REPLY_OP + c.Assert(stats.ReceivedDocs, Equals, 3) + } c.Assert(stats.SocketsInUse, Equals, 0) } From e57d4e990eb0f483a9fed52f01cf80674e94d769 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 21 Dec 2015 14:35:06 -0200 Subject: [PATCH 215/305] All tests pass in 3.2 with find+getMore commands. --- session.go | 84 +++++++++++++++++++++++++----------- session_test.go | 112 ++++++++++++++++++++++++++++++++++-------------- socket.go | 5 +++ 3 files changed, 142 insertions(+), 59 deletions(-) diff --git a/session.go b/session.go index b99a800a2..a452a6976 100644 --- a/session.go +++ b/session.go @@ -2005,7 +2005,11 @@ func (s *Session) FsyncLock() error { // FsyncUnlock releases the server for writes. See FsyncLock for details. func (s *Session) FsyncUnlock() error { - return s.DB("admin").C("$cmd.sys.unlock").Find(nil).One(nil) // WTF? + err := s.Run(bson.D{{"fsyncUnlock", 1}}, nil) + if isNoCmd(err) { + err = s.DB("admin").C("$cmd.sys.unlock").Find(nil).One(nil) // WTF? + } + return err } // Find prepares a query using the provided document. The document may be a @@ -2766,6 +2770,8 @@ func (q *Query) Explain(result interface{}) error { return iter.Close() } +// TODO: Add Collection.Explain. See https://goo.gl/1MDlvz. + // Hint will include an explicit "hint" in the query to force the server // to use a specified index, potentially improving performance in some // situations. The provided parameters are the fields that compose the @@ -2969,7 +2975,7 @@ func (q *Query) One(result interface{}) (err error) { session.prepareQuery(&op) - prepareFindOp(socket, &op, 1) + expectFindReply := prepareFindOp(socket, &op, 1) data, err := socket.SimpleQuery(&op) if err != nil { @@ -2978,8 +2984,27 @@ func (q *Query) One(result interface{}) (err error) { if data == nil { return ErrNotFound } + if expectFindReply { + var findReply struct { + Ok bool + Code int + Errmsg string + Cursor cursorData + } + err = bson.Unmarshal(data, &findReply) + if err != nil { + return err + } + if !findReply.Ok && findReply.Errmsg != "" { + return &QueryError{Code: findReply.Code, Message: findReply.Errmsg} + } + if len(findReply.Cursor.FirstBatch) == 0 { + return ErrNotFound + } + data = findReply.Cursor.FirstBatch[0].Data + } if result != nil { - err = unmarshalFindOpOne(socket, data, result) + err = bson.Unmarshal(data, result) if err == nil { debugf("Query %p document unmarshaled: %#v", q, result) } else { @@ -2992,9 +3017,10 @@ func (q *Query) One(result interface{}) (err error) { // prepareFindOp translates op from being an old-style wire protocol query into // a new-style find command if that's supported by the MongoDB server (3.2+). -// It returns whether the op was translated or not. +// It returns whether to expect a find command result or not. Note op may be +// translated into an explain command, in which case the function returns false. func prepareFindOp(socket *mongoSocket, op *queryOp, limit int32) bool { - if socket.ServerInfo().MaxWireVersion < 4 { + if socket.ServerInfo().MaxWireVersion < 4 || op.collection == "admin.$cmd" { return false } @@ -3004,11 +3030,18 @@ func prepareFindOp(socket *mongoSocket, op *queryOp, limit int32) bool { } find := findCmd{ - Collection: op.collection[nameDot+1:], - Filter: op.query, - Sort: op.options.OrderBy, - Skip: op.skip, - Limit: limit, + Collection: op.collection[nameDot+1:], + Filter: op.query, + Projection: op.selector, + Sort: op.options.OrderBy, + Skip: op.skip, + Limit: limit, + MaxTimeMS: op.options.MaxTimeMS, + MaxScan: op.options.MaxScan, + Hint: op.options.Hint, + Comment: op.options.Comment, + Snapshot: op.options.Snapshot, + OplogReplay: op.flags&flagLogReplay != 0, } if op.limit < 0 { find.BatchSize = -op.limit @@ -3017,6 +3050,8 @@ func prepareFindOp(socket *mongoSocket, op *queryOp, limit int32) bool { find.BatchSize = op.limit } + explain := op.options.Explain + op.collection = op.collection[:nameDot] + ".$cmd" op.query = &find op.skip = 0 @@ -3024,6 +3059,10 @@ func prepareFindOp(socket *mongoSocket, op *queryOp, limit int32) bool { op.options = queryWrapper{} op.hasOptions = false + if explain { + op.query = bson.D{{"explain", op.query}} + return false + } return true } @@ -3034,20 +3073,6 @@ type cursorData struct { Id int64 } -func unmarshalFindOpOne(socket *mongoSocket, data []byte, result interface{}) error { - if socket.ServerInfo().MaxWireVersion < 4 { - return bson.Unmarshal(data, result) - } - var findResult struct{ Cursor cursorData } - if err := bson.Unmarshal(data, &findResult); err != nil { - return err - } - if len(findResult.Cursor.FirstBatch) == 0 { - return ErrNotFound - } - return findResult.Cursor.FirstBatch[0].Unmarshal(result) -} - // findCmd holds the command used for performing queries on MongoDB 3.2+. // // Relevant documentation: @@ -3066,7 +3091,7 @@ type findCmd struct { SingleBatch bool `bson:"singleBatch,omitempty"` Comment string `bson:"comment,omitempty"` MaxScan int `bson:"maxScan,omitempty"` - MaxTimeMS int64 `bson:"maxTimeMS,omitempty"` + MaxTimeMS int `bson:"maxTimeMS,omitempty"` ReadConcern interface{} `bson:"readConcern,omitempty"` Max interface{} `bson:"max,omitempty"` Min interface{} `bson:"min,omitempty"` @@ -4330,9 +4355,16 @@ func (iter *Iter) replyFunc() replyFunc { } } else if iter.findCmd { debugf("Iter %p received reply document %d/%d (cursor=%d)", iter, docNum+1, int(op.replyDocs), op.cursorId) - var findReply struct{ Cursor cursorData } + var findReply struct { + Ok bool + Code int + Errmsg string + Cursor cursorData + } if err := bson.Unmarshal(docData, &findReply); err != nil { iter.err = err + } else if !findReply.Ok && findReply.Errmsg != "" { + iter.err = &QueryError{Code: findReply.Code, Message: findReply.Errmsg} } else if len(findReply.Cursor.FirstBatch) == 0 && len(findReply.Cursor.NextBatch) == 0 { iter.err = ErrNotFound } else { diff --git a/session_test.go b/session_test.go index 112d13520..aca2946a2 100644 --- a/session_test.go +++ b/session_test.go @@ -1232,6 +1232,20 @@ func (s *S) TestFindOneNotFound(c *C) { c.Assert(err == mgo.ErrNotFound, Equals, true) } +func (s *S) TestFindIterNotFound(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + result := struct{ A, B int }{} + iter := coll.Find(M{"a": 1}).Iter() + ok := iter.Next(&result) + c.Assert(ok, Equals, false) + c.Assert(iter.Err(), IsNil) +} + func (s *S) TestFindNil(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) @@ -1652,7 +1666,12 @@ func (s *S) TestFindIterSortWithBatch(c *C) { c.Assert(result.N, Equals, ns[i]) if i == 3 { stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 2) + if s.versionAtLeast(3, 2) { + // Find command in 3.2+ bundles batches in a single document. + c.Assert(stats.ReceivedDocs, Equals, 1) + } else { + c.Assert(stats.ReceivedDocs, Equals, 2) + } } } @@ -1665,7 +1684,12 @@ func (s *S) TestFindIterSortWithBatch(c *C) { stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP c.Assert(stats.ReceivedOps, Equals, 3) // and its REPLY_OPs - c.Assert(stats.ReceivedDocs, Equals, 5) + if s.versionAtLeast(3, 2) { + // Find command in 3.2+ bundles batches in a single document. + c.Assert(stats.ReceivedDocs, Equals, 3) + } else { + c.Assert(stats.ReceivedDocs, Equals, 5) + } c.Assert(stats.SocketsInUse, Equals, 0) } @@ -2051,7 +2075,12 @@ func (s *S) TestFindForOnIter(c *C) { c.Assert(result.N, Equals, ns[i]) if i == 1 { stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, 2) + if s.versionAtLeast(3, 2) { + // Find command in 3.2+ bundles batches in a single document. + c.Assert(stats.ReceivedDocs, Equals, 1) + } else { + c.Assert(stats.ReceivedDocs, Equals, 2) + } } i++ return nil @@ -2063,7 +2092,12 @@ func (s *S) TestFindForOnIter(c *C) { stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP c.Assert(stats.ReceivedOps, Equals, 3) // and their REPLY_OPs. - c.Assert(stats.ReceivedDocs, Equals, 5) + if s.versionAtLeast(3, 2) { + // Find command in 3.2+ bundles batches in a single document. + c.Assert(stats.ReceivedDocs, Equals, 3) + } else { + c.Assert(stats.ReceivedDocs, Equals, 5) + } c.Assert(stats.SocketsInUse, Equals, 0) } @@ -2093,6 +2127,12 @@ func (s *S) TestFindFor(c *C) { if i == 1 { stats := mgo.GetStats() c.Assert(stats.ReceivedDocs, Equals, 2) + if s.versionAtLeast(3, 2) { + // Find command in 3.2+ bundles batches in a single document. + c.Assert(stats.ReceivedDocs, Equals, 1) + } else { + c.Assert(stats.ReceivedDocs, Equals, 2) + } } i++ return nil @@ -2104,7 +2144,12 @@ func (s *S) TestFindFor(c *C) { stats := mgo.GetStats() c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP c.Assert(stats.ReceivedOps, Equals, 3) // and their REPLY_OPs. - c.Assert(stats.ReceivedDocs, Equals, 5) + if s.versionAtLeast(3, 2) { + // Find command in 3.2+ bundles batches in a single document. + c.Assert(stats.ReceivedDocs, Equals, 3) + } else { + c.Assert(stats.ReceivedDocs, Equals, 5) + } c.Assert(stats.SocketsInUse, Equals, 0) } @@ -2368,6 +2413,7 @@ func (s *S) TestPrefetching(c *C) { coll := session.DB("mydb").C("mycoll") const total = 600 + const batch = 100 mgo.SetDebug(false) docs := make([]interface{}, total) for i := 0; i != total; i++ { @@ -2384,31 +2430,31 @@ func (s *S) TestPrefetching(c *C) { switch testi { case 0: // The default session value. - session.SetBatch(100) + session.SetBatch(batch) iter = coll.Find(M{}).Iter() beforeMore = 75 case 2: // Changing the session value. - session.SetBatch(100) + session.SetBatch(batch) session.SetPrefetch(0.27) iter = coll.Find(M{}).Iter() beforeMore = 73 case 1: // Changing via query methods. - iter = coll.Find(M{}).Prefetch(0.27).Batch(100).Iter() + iter = coll.Find(M{}).Prefetch(0.27).Batch(batch).Iter() beforeMore = 73 case 3: // With prefetch on first document. - iter = coll.Find(M{}).Prefetch(1.0).Batch(100).Iter() + iter = coll.Find(M{}).Prefetch(1.0).Batch(batch).Iter() beforeMore = 0 case 4: // Without prefetch. - iter = coll.Find(M{}).Prefetch(0).Batch(100).Iter() + iter = coll.Find(M{}).Prefetch(0).Batch(batch).Iter() beforeMore = 100 } pings := 0 - for batchi := 0; batchi < len(docs)/100-1; batchi++ { + for batchi := 0; batchi < len(docs)/batch-1; batchi++ { c.Logf("Iterating over %d documents on batch %d", beforeMore, batchi) var result struct{ N int } for i := 0; i < beforeMore; i++ { @@ -2422,7 +2468,12 @@ func (s *S) TestPrefetching(c *C) { pings++ stats := mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, (batchi+1)*100+pings) + if s.versionAtLeast(3, 2) { + // Find command in 3.2+ bundles batches in a single document. + c.Assert(stats.ReceivedDocs, Equals, (batchi+1)+pings) + } else { + c.Assert(stats.ReceivedDocs, Equals, (batchi+1)*batch+pings) + } c.Logf("Iterating over one more document on batch %d", batchi) ok := iter.Next(&result) @@ -2433,7 +2484,12 @@ func (s *S) TestPrefetching(c *C) { pings++ stats = mgo.GetStats() - c.Assert(stats.ReceivedDocs, Equals, (batchi+2)*100+pings) + if s.versionAtLeast(3, 2) { + // Find command in 3.2+ bundles batches in a single document. + c.Assert(stats.ReceivedDocs, Equals, (batchi+2)+pings) + } else { + c.Assert(stats.ReceivedDocs, Equals, (batchi+2)*batch+pings) + } } } } @@ -2594,22 +2650,17 @@ func (s *S) TestQueryErrorOne(c *C) { coll := session.DB("mydb").C("mycoll") - result := struct { - Err string "$err" - }{} - - err = coll.Find(M{"a": 1}).Select(M{"a": M{"b": 1}}).One(&result) + err = coll.Find(M{"a": 1}).Select(M{"a": M{"b": 1}}).One(nil) c.Assert(err, ErrorMatches, ".*Unsupported projection option:.*") c.Assert(err.(*mgo.QueryError).Message, Matches, ".*Unsupported projection option:.*") - if s.versionAtLeast(2, 6) { - // Oh, the dance of error codes. :-( + // Oh, the dance of error codes. :-( + if s.versionAtLeast(3, 2) { + c.Assert(err.(*mgo.QueryError).Code, Equals, 2) + } else if s.versionAtLeast(2, 6) { c.Assert(err.(*mgo.QueryError).Code, Equals, 17287) } else { c.Assert(err.(*mgo.QueryError).Code, Equals, 13097) } - - // The result should be properly unmarshalled with QueryError - c.Assert(result.Err, Matches, ".*Unsupported projection option:.*") } func (s *S) TestQueryErrorNext(c *C) { @@ -2619,28 +2670,23 @@ func (s *S) TestQueryErrorNext(c *C) { coll := session.DB("mydb").C("mycoll") - result := struct { - Err string "$err" - }{} - iter := coll.Find(M{"a": 1}).Select(M{"a": M{"b": 1}}).Iter() - ok := iter.Next(&result) + ok := iter.Next(nil) c.Assert(ok, Equals, false) err = iter.Close() c.Assert(err, ErrorMatches, ".*Unsupported projection option:.*") c.Assert(err.(*mgo.QueryError).Message, Matches, ".*Unsupported projection option:.*") - if s.versionAtLeast(2, 6) { - // Oh, the dance of error codes. :-( + // Oh, the dance of error codes. :-( + if s.versionAtLeast(3, 2) { + c.Assert(err.(*mgo.QueryError).Code, Equals, 2) + } else if s.versionAtLeast(2, 6) { c.Assert(err.(*mgo.QueryError).Code, Equals, 17287) } else { c.Assert(err.(*mgo.QueryError).Code, Equals, 13097) } c.Assert(iter.Err(), Equals, err) - - // The result should be properly unmarshalled with QueryError - c.Assert(result.Err, Matches, ".*Unsupported projection option:.*") } var indexTests = []struct { diff --git a/socket.go b/socket.go index f8862a396..eb66afc66 100644 --- a/socket.go +++ b/socket.go @@ -389,6 +389,11 @@ func (socket *mongoSocket) Query(ops ...interface{}) (err error) { for _, op := range ops { debugf("Socket %p to %s: serializing op: %#v", socket, socket.addr, op) + if qop, ok := op.(*queryOp); ok { + if cmd, ok := qop.query.(*findCmd); ok { + debugf("Socket %p to %s: find command: %#v", socket, socket.addr, cmd) + } + } start := len(buf) var replyFunc replyFunc switch op := op.(type) { From 9589f7c462fc1edd518e528b7380edc145ee1732 Mon Sep 17 00:00:00 2001 From: Shawn Smith Date: Mon, 4 Jan 2016 12:12:36 +0900 Subject: [PATCH 216/305] fix typo --- txn/flusher.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/txn/flusher.go b/txn/flusher.go index 964b43a31..67d27a64d 100644 --- a/txn/flusher.go +++ b/txn/flusher.go @@ -314,7 +314,7 @@ NextDoc: } } - // The stash wasn't valid and tt got overwriten. Try again. + // The stash wasn't valid and tt got overwritten. Try again. f.unstashToken(tt, dkey) goto RetryDoc } From 4c1af3e7dad9d8f3e219a452b4231ecf8ee329aa Mon Sep 17 00:00:00 2001 From: Shawn Smith Date: Mon, 4 Jan 2016 12:14:56 +0900 Subject: [PATCH 217/305] fix typo --- session.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/session.go b/session.go index a452a6976..f2f1ac8ed 100644 --- a/session.go +++ b/session.go @@ -330,7 +330,7 @@ type DialInfo struct { FailFast bool // Database is the default database name used when the Session.DB method - // is called with an empty name, and is also used during the intial + // is called with an empty name, and is also used during the initial // authentication if Source is unset. Database string From 7b96a158480287bbdd34e46eacbb9b7c428b65e9 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 7 Jan 2016 23:30:48 -0200 Subject: [PATCH 218/305] Handle document validation in Collection.Create. Fixes #205. --- session.go | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/session.go b/session.go index a452a6976..0b3c26607 100644 --- a/session.go +++ b/session.go @@ -2566,6 +2566,23 @@ type CollectionInfo struct { Capped bool MaxBytes int MaxDocs int + + // Validator contains a validation expression that defines which + // documents should be considered valid for this collection. + Validator interface{} + + // ValidationLevel may be set to "strict" (the default) to force + // MongoDB to validate all documents on inserts and updates, to + // "moderate" to apply the validation rules only to documents + // that already fulfill the validation criteria, or to "off" for + // disabling validation entirely. + ValidationLevel string + + // ValidationAction determines how MongoDB handles documents that + // violate the validation rules. It may be set to "error" (the default) + // to reject inserts or updates that violate the rules, or to "warn" + // to log invalid operations but allow them to proceed. + ValidationAction string } // Create explicitly creates the c collection with details of info. @@ -2597,13 +2614,22 @@ func (c *Collection) Create(info *CollectionInfo) error { if info.ForceIdIndex { cmd = append(cmd, bson.DocElem{"autoIndexId", true}) } + if info.Validator != nil { + cmd = append(cmd, bson.DocElem{"validator", info.Validator}) + } + if info.ValidationLevel != "" { + cmd = append(cmd, bson.DocElem{"validationLevel", info.ValidationLevel}) + } + if info.ValidationAction != "" { + cmd = append(cmd, bson.DocElem{"validationAction", info.ValidationAction}) + } return c.Database.Run(cmd, nil) } // Batch sets the batch size used when fetching documents from the database. // It's possible to change this setting on a per-session basis as well, using // the Batch method of Session. -// + // The default batch size is defined by the database itself. As of this // writing, MongoDB will use an initial size of min(100 docs, 4MB) on the // first batch, and 4MB on remaining ones. From c244a322e1d0e7bb97e8c4150e34fd3db0af1134 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 7 Jan 2016 23:42:10 -0200 Subject: [PATCH 219/305] Allow storage engine options on Collection.Create Fixes #186. --- session.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/session.go b/session.go index 0b3c26607..681c9589b 100644 --- a/session.go +++ b/session.go @@ -2583,6 +2583,11 @@ type CollectionInfo struct { // to reject inserts or updates that violate the rules, or to "warn" // to log invalid operations but allow them to proceed. ValidationAction string + + // StorageEngine allows specifying collection options for the + // storage engine in use. The map keys must hold the storage engine + // name for which options are being specified. + StorageEngine interface{} } // Create explicitly creates the c collection with details of info. @@ -2623,6 +2628,9 @@ func (c *Collection) Create(info *CollectionInfo) error { if info.ValidationAction != "" { cmd = append(cmd, bson.DocElem{"validationAction", info.ValidationAction}) } + if info.StorageEngine != nil { + cmd = append(cmd, bson.DocElem{"storageEngine", info.StorageEngine}) + } return c.Database.Run(cmd, nil) } From 1374d680bd0d914d69dee330254e773455e6b07b Mon Sep 17 00:00:00 2001 From: Gabriel Russell Date: Wed, 13 Jan 2016 17:44:37 -0500 Subject: [PATCH 220/305] correctly skip docs being read in to slices o when unmarshaling a document in to a slice the parser forgot to discard the document, and left the parser in a broken state. Now it's properly discarded o test that unmarshaling a document in to a slice doesn't error --- bson/bson_test.go | 4 ++++ bson/decode.go | 2 ++ 2 files changed, 6 insertions(+) diff --git a/bson/bson_test.go b/bson/bson_test.go index 9b63f8ebb..543fb046d 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -549,6 +549,10 @@ var unmarshalItems = []testItemType{ // Decode old binary without length. According to the spec, this shouldn't happen. {bson.M{"_": []byte("old")}, "\x05_\x00\x03\x00\x00\x00\x02old"}, + + // Decode a doc within a doc in to a slice within a doc; shouldn't error + {&struct{ Foo []string }{}, + "\x03\x66\x6f\x6f\x00\x05\x00\x00\x00\x00"}, } func (s *S) TestUnmarshalOneWayItems(c *C) { diff --git a/bson/decode.go b/bson/decode.go index 0ee8d22d9..9bd73f966 100644 --- a/bson/decode.go +++ b/bson/decode.go @@ -460,6 +460,8 @@ func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { out.Set(d.readDocElems(outt)) case typeRawDocElem: out.Set(d.readRawDocElems(outt)) + default: + d.readDocTo(blackHole) } return true } From bfa4c9830fa6f062c7a911e3eec85c0b66bb47eb Mon Sep 17 00:00:00 2001 From: Maciej Galkowski Date: Fri, 15 Jan 2016 14:52:23 +0000 Subject: [PATCH 221/305] Fix a crash when bulk update and delete returned an connection error --- bulk.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/bulk.go b/bulk.go index c377af563..b18202ad9 100644 --- a/bulk.go +++ b/bulk.go @@ -270,15 +270,19 @@ func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *bulkError func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *bulkError) bool { lerr, err := b.c.writeOp(bulkUpdateOp(action.docs), b.ordered) - result.Matched += lerr.N - result.Modified += lerr.modified + if lerr != nil { + result.Matched += lerr.N + result.Modified += lerr.modified + } return b.checkSuccess(berr, lerr, err) } func (b *Bulk) runRemove(action *bulkAction, result *BulkResult, berr *bulkError) bool { lerr, err := b.c.writeOp(bulkDeleteOp(action.docs), b.ordered) - result.Matched += lerr.N - result.Modified += lerr.modified + if lerr != nil { + result.Matched += lerr.N + result.Modified += lerr.modified + } return b.checkSuccess(berr, lerr, err) } From 5088382a866b7034418d01b49489c6db49e9b5f7 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 18 Jan 2016 12:36:47 -0200 Subject: [PATCH 222/305] Expose bulk error type and its individual cases. Fixes #189. --- bulk.go | 135 +++++++++++++++++++++++++++++++++--------------- bulk_test.go | 114 +++++++++++++++++++++++++++++++++++++++- session.go | 71 ++++++++++++++----------- session_test.go | 70 +++++++++++++++++++++++++ 4 files changed, 317 insertions(+), 73 deletions(-) diff --git a/bulk.go b/bulk.go index 59a9f76af..b419ed83b 100644 --- a/bulk.go +++ b/bulk.go @@ -3,19 +3,28 @@ package mgo import ( "bytes" "gopkg.in/mgo.v2-unstable/bson" + "sort" ) // Bulk represents an operation that can be prepared with several // orthogonal changes before being delivered to the server. // +// MongoDB servers older than version 2.6 do not have proper support for bulk +// operations, so the driver attempts to map its API as much as possible into +// the functionality that works. In particular, in those releases updates and +// removals are sent individually, and inserts are sent in bulk but have +// suboptimal error reporting compared to more recent versions of the server. +// See the documentation of BulkErrorCase for details on that. +// // Relevant documentation: // // http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api // type Bulk struct { c *Collection - ordered bool + opcount int actions []bulkAction + ordered bool } type bulkOp int @@ -30,19 +39,12 @@ const ( type bulkAction struct { op bulkOp docs []interface{} + idxs []int } type bulkUpdateOp []interface{} type bulkDeleteOp []interface{} -// BulkError holds an error returned from running a Bulk operation. -// -// TODO: This is private for the moment, until we understand exactly how -// to report these multi-errors in a useful and convenient way. -type bulkError struct { - errs []error -} - // BulkResult holds the results for a bulk operation. type BulkResult struct { Matched int @@ -54,17 +56,23 @@ type BulkResult struct { private bool } -func (e *bulkError) Error() string { - if len(e.errs) == 0 { - return "invalid bulkError instance: no errors" +// BulkError holds an error returned from running a Bulk operation. +// Individual errors may be obtained and inspected via the Cases method. +type BulkError struct { + ecases []BulkErrorCase +} + +func (e *BulkError) Error() string { + if len(e.ecases) == 0 { + return "invalid BulkError instance: no errors" } - if len(e.errs) == 1 { - return e.errs[0].Error() + if len(e.ecases) == 1 { + return e.ecases[0].Err.Error() } - msgs := make([]string, 0, len(e.errs)) + msgs := make([]string, 0, len(e.ecases)) seen := make(map[string]bool) - for _, err := range e.errs { - msg := err.Error() + for _, ecase := range e.ecases { + msg := ecase.Err.Error() if !seen[msg] { seen[msg] = true msgs = append(msgs, msg) @@ -83,6 +91,32 @@ func (e *bulkError) Error() string { return buf.String() } +type bulkErrorCases []BulkErrorCase + +func (slice bulkErrorCases) Len() int { return len(slice) } +func (slice bulkErrorCases) Less(i, j int) bool { return slice[i].Index < slice[j].Index } +func (slice bulkErrorCases) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } + +// BulkErrorCase holds an individual error found while attempting a single change +// within a bulk operation, and the position in which it was enqueued. +// +// MongoDB servers older than version 2.6 do not have proper support for bulk +// operations, so the driver attempts to map its API as much as possible into +// the functionality that works. In particular, only the last error is reported +// for bulk inserts and without any positional information, so the Index +// field is set to -1 in these cases. +type BulkErrorCase struct { + Index int // Position of operation that failed, or -1 if unknown. + Err error +} + +// Cases returns all individual errors found while attempting the requested changes. +// +// See the documentation of BulkErrorCase for limitations in older MongoDB releases. +func (e *BulkError) Cases() []BulkErrorCase { + return e.ecases +} + // Bulk returns a value to prepare the execution of a bulk operation. func (c *Collection) Bulk() *Bulk { return &Bulk{c: c, ordered: true} @@ -97,31 +131,39 @@ func (b *Bulk) Unordered() { b.ordered = false } -func (b *Bulk) action(op bulkOp) *bulkAction { +func (b *Bulk) action(op bulkOp, opcount int) *bulkAction { + var action *bulkAction if len(b.actions) > 0 && b.actions[len(b.actions)-1].op == op { - return &b.actions[len(b.actions)-1] - } - if !b.ordered { + action = &b.actions[len(b.actions)-1] + } else if !b.ordered { for i := range b.actions { if b.actions[i].op == op { - return &b.actions[i] + action = &b.actions[i] + break } } } - b.actions = append(b.actions, bulkAction{op: op}) - return &b.actions[len(b.actions)-1] + if action == nil { + b.actions = append(b.actions, bulkAction{op: op}) + action = &b.actions[len(b.actions)-1] + } + for i := 0; i < opcount; i++ { + action.idxs = append(action.idxs, b.opcount) + b.opcount++ + } + return action } // Insert queues up the provided documents for insertion. func (b *Bulk) Insert(docs ...interface{}) { - action := b.action(bulkInsert) + action := b.action(bulkInsert, len(docs)) action.docs = append(action.docs, docs...) } // Remove queues up the provided selectors for removing matching documents. // Each selector will remove only a single matching document. func (b *Bulk) Remove(selectors ...interface{}) { - action := b.action(bulkRemove) + action := b.action(bulkRemove, len(selectors)) for _, selector := range selectors { if selector == nil { selector = bson.D{} @@ -138,7 +180,7 @@ func (b *Bulk) Remove(selectors ...interface{}) { // RemoveAll queues up the provided selectors for removing all matching documents. // Each selector will remove all matching documents. func (b *Bulk) RemoveAll(selectors ...interface{}) { - action := b.action(bulkRemove) + action := b.action(bulkRemove, len(selectors)) for _, selector := range selectors { if selector == nil { selector = bson.D{} @@ -160,7 +202,7 @@ func (b *Bulk) Update(pairs ...interface{}) { if len(pairs)%2 != 0 { panic("Bulk.Update requires an even number of parameters") } - action := b.action(bulkUpdate) + action := b.action(bulkUpdate, len(pairs)/2) for i := 0; i < len(pairs); i += 2 { selector := pairs[i] if selector == nil { @@ -182,7 +224,7 @@ func (b *Bulk) UpdateAll(pairs ...interface{}) { if len(pairs)%2 != 0 { panic("Bulk.UpdateAll requires an even number of parameters") } - action := b.action(bulkUpdate) + action := b.action(bulkUpdate, len(pairs)/2) for i := 0; i < len(pairs); i += 2 { selector := pairs[i] if selector == nil { @@ -206,7 +248,7 @@ func (b *Bulk) Upsert(pairs ...interface{}) { if len(pairs)%2 != 0 { panic("Bulk.Update requires an even number of parameters") } - action := b.action(bulkUpdate) + action := b.action(bulkUpdate, len(pairs)/2) for i := 0; i < len(pairs); i += 2 { selector := pairs[i] if selector == nil { @@ -230,7 +272,7 @@ func (b *Bulk) Upsert(pairs ...interface{}) { // error only due to a limitation in the wire protocol. func (b *Bulk) Run() (*BulkResult, error) { var result BulkResult - var berr bulkError + var berr BulkError var failed bool for i := range b.actions { action := &b.actions[i] @@ -253,40 +295,51 @@ func (b *Bulk) Run() (*BulkResult, error) { } } if failed { + sort.Sort(bulkErrorCases(berr.ecases)) return nil, &berr } return &result, nil } -func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *bulkError) bool { +func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *BulkError) bool { op := &insertOp{b.c.FullName, action.docs, 0} if !b.ordered { op.flags = 1 // ContinueOnError } lerr, err := b.c.writeOp(op, b.ordered) - return b.checkSuccess(berr, lerr, err) + return b.checkSuccess(action, berr, lerr, err) } -func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *bulkError) bool { +func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *BulkError) bool { lerr, err := b.c.writeOp(bulkUpdateOp(action.docs), b.ordered) result.Matched += lerr.N result.Modified += lerr.modified - return b.checkSuccess(berr, lerr, err) + return b.checkSuccess(action, berr, lerr, err) } -func (b *Bulk) runRemove(action *bulkAction, result *BulkResult, berr *bulkError) bool { +func (b *Bulk) runRemove(action *bulkAction, result *BulkResult, berr *BulkError) bool { lerr, err := b.c.writeOp(bulkDeleteOp(action.docs), b.ordered) result.Matched += lerr.N result.Modified += lerr.modified - return b.checkSuccess(berr, lerr, err) + return b.checkSuccess(action, berr, lerr, err) } -func (b *Bulk) checkSuccess(berr *bulkError, lerr *LastError, err error) bool { - if lerr != nil && len(lerr.errors) > 0 { - berr.errs = append(berr.errs, lerr.errors...) +func (b *Bulk) checkSuccess(action *bulkAction, berr *BulkError, lerr *LastError, err error) bool { + if lerr != nil && len(lerr.ecases) > 0 { + for i := 0; i < len(lerr.ecases); i++ { + // Map back from the local error index into the visible one. + ecase := lerr.ecases[i] + idx := ecase.Index + if idx >= 0 { + idx = action.idxs[idx] + } + berr.ecases = append(berr.ecases, BulkErrorCase{idx, ecase.Err}) + } return false } else if err != nil { - berr.errs = append(berr.errs, err) + for i := 0; i < len(action.idxs); i++ { + berr.ecases = append(berr.ecases, BulkErrorCase{action.idxs[i], err}) + } return false } return true diff --git a/bulk_test.go b/bulk_test.go index df5e9116f..17fb8a5d0 100644 --- a/bulk_test.go +++ b/bulk_test.go @@ -131,7 +131,7 @@ func (s *S) TestBulkInsertErrorUnorderedSplitBatch(c *C) { c.Assert(res.Id, Equals, 1500) } -func (s *S) TestBulkError(c *C) { +func (s *S) TestBulkErrorString(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) defer session.Close() @@ -176,6 +176,118 @@ func (s *S) TestBulkError(c *C) { c.Assert(mgo.IsDup(err), Equals, false) } +func (s *S) TestBulkErrorCases_2_6(c *C) { + if !s.versionAtLeast(2, 6) { + c.Skip("2.4- has poor bulk reporting") + } + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + bulk := coll.Bulk() + bulk.Unordered() + + // There's a limit of 1000 operations per command, so + // this forces the more complex indexing logic to act. + for i := 0; i < 1010; i++ { + switch i { + case 3, 14: + bulk.Insert(M{"_id": "dupone"}) + case 5, 106: + bulk.Update(M{"_id": i-1}, M{"$set": M{"_id": 4}}) + case 7, 1008: + bulk.Insert(M{"_id": "duptwo"}) + default: + bulk.Insert(M{"_id": i}) + } + } + + _, err = bulk.Run() + ecases := err.(*mgo.BulkError).Cases() + + c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*dupone.*") + c.Check(ecases[0].Index, Equals, 14) + c.Check(ecases[1].Err, ErrorMatches, ".*update.*_id.*") + c.Check(ecases[1].Index, Equals, 106) + c.Check(ecases[2].Err, ErrorMatches, ".*duplicate.*duptwo.*") + c.Check(ecases[2].Index, Equals, 1008) +} + +func (s *S) TestBulkErrorCases_2_4(c *C) { + if s.versionAtLeast(2, 6) { + c.Skip("2.6+ has better reporting") + } + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + bulk := coll.Bulk() + bulk.Unordered() + + // There's a limit of 1000 operations per command, so + // this forces the more complex indexing logic to act. + for i := 0; i < 1010; i++ { + switch i { + case 3, 14: + bulk.Insert(M{"_id": "dupone"}) + case 5: + bulk.Update(M{"_id": i-1}, M{"$set": M{"n": 4}}) + case 106: + bulk.Update(M{"_id": i-1}, M{"$bogus": M{"n": 4}}) + case 7, 1008: + bulk.Insert(M{"_id": "duptwo"}) + default: + bulk.Insert(M{"_id": i}) + } + } + + _, err = bulk.Run() + ecases := err.(*mgo.BulkError).Cases() + + c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*duptwo.*") + c.Check(ecases[0].Index, Equals, -1) + c.Check(ecases[1].Err, ErrorMatches, `.*\$bogus.*`) + c.Check(ecases[1].Index, Equals, 106) +} + +func (s *S) TestBulkErrorCasesOrdered(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + bulk := coll.Bulk() + + // There's a limit of 1000 operations per command, so + // this forces the more complex indexing logic to act. + for i := 0; i < 20; i++ { + switch i { + case 3, 14: + bulk.Insert(M{"_id": "dupone"}) + case 7, 17: + bulk.Insert(M{"_id": "duptwo"}) + default: + bulk.Insert(M{"_id": i}) + } + } + + _, err = bulk.Run() + ecases := err.(*mgo.BulkError).Cases() + + c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*dupone.*") + if s.versionAtLeast(2, 6) { + c.Check(ecases[0].Index, Equals, 14) + } else { + c.Check(ecases[0].Index, Equals, -1) + } + c.Check(ecases, HasLen, 1) +} + func (s *S) TestBulkUpdate(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) diff --git a/session.go b/session.go index 681c9589b..352921125 100644 --- a/session.go +++ b/session.go @@ -2306,7 +2306,7 @@ type LastError struct { UpsertedId interface{} `bson:"upserted"` modified int - errors []error + ecases []BulkErrorCase } func (err *LastError) Error() string { @@ -2343,9 +2343,9 @@ func IsDup(err error) bool { return e.Code == 11000 || e.Code == 11001 || e.Code == 12582 || e.Code == 16460 && strings.Contains(e.Err, " E11000 ") case *QueryError: return e.Code == 11000 || e.Code == 11001 || e.Code == 12582 - case *bulkError: - for _, ee := range e.errs { - if !IsDup(ee) { + case *BulkError: + for _, ecase := range e.ecases { + if !IsDup(ecase.Err) { return false } } @@ -3020,10 +3020,10 @@ func (q *Query) One(result interface{}) (err error) { } if expectFindReply { var findReply struct { - Ok bool - Code int - Errmsg string - Cursor cursorData + Ok bool + Code int + Errmsg string + Cursor cursorData } err = bson.Unmarshal(data, &findReply) if err != nil { @@ -4390,10 +4390,10 @@ func (iter *Iter) replyFunc() replyFunc { } else if iter.findCmd { debugf("Iter %p received reply document %d/%d (cursor=%d)", iter, docNum+1, int(op.replyDocs), op.cursorId) var findReply struct { - Ok bool - Code int - Errmsg string - Cursor cursorData + Ok bool + Code int + Errmsg string + Cursor cursorData } if err := bson.Unmarshal(docData, &findReply); err != nil { iter.err = err @@ -4462,12 +4462,12 @@ type writeCmdError struct { ErrMsg string } -func (r *writeCmdResult) QueryErrors() []error { - var errs []error - for _, err := range r.Errors { - errs = append(errs, &QueryError{Code: err.Code, Message: err.ErrMsg}) +func (r *writeCmdResult) BulkErrorCases() []BulkErrorCase { + ecases := make([]BulkErrorCase, len(r.Errors)) + for i, err := range r.Errors { + ecases[i] = BulkErrorCase{err.Index, &QueryError{Code: err.Code, Message: err.ErrMsg}} } - return errs + return ecases } // writeOp runs the given modifying operation, potentially followed up @@ -4490,7 +4490,8 @@ func (c *Collection) writeOp(op interface{}, ordered bool) (lerr *LastError, err if socket.ServerInfo().MaxWireVersion >= 2 { // Servers with a more recent write protocol benefit from write commands. if op, ok := op.(*insertOp); ok && len(op.documents) > 1000 { - var errors []error + var ecases []BulkErrorCase + // Maximum batch size is 1000. Must split out in separate operations for compatibility. all := op.documents for i := 0; i < len(all); i += 1000 { @@ -4501,52 +4502,55 @@ func (c *Collection) writeOp(op interface{}, ordered bool) (lerr *LastError, err op.documents = all[i:l] lerr, err := c.writeOpCommand(socket, safeOp, op, ordered, bypassValidation) if err != nil { - errors = append(errors, lerr.errors...) + for ei := range lerr.ecases { + lerr.ecases[ei].Index += i + } + ecases = append(ecases, lerr.ecases...) if op.flags&1 == 0 { - return &LastError{errors: errors}, err + return &LastError{ecases: ecases}, err } } } - if len(errors) == 0 { + if len(ecases) == 0 { return nil, nil } - return &LastError{errors: errors}, errors[0] + return &LastError{ecases: ecases}, ecases[0].Err } return c.writeOpCommand(socket, safeOp, op, ordered, bypassValidation) } else if updateOps, ok := op.(bulkUpdateOp); ok { var lerr LastError - for _, updateOp := range updateOps { + for i, updateOp := range updateOps { oplerr, err := c.writeOpQuery(socket, safeOp, updateOp, ordered) if err != nil { lerr.N += oplerr.N lerr.modified += oplerr.modified - lerr.errors = append(lerr.errors, oplerr.errors...) + lerr.ecases = append(lerr.ecases, BulkErrorCase{i, err}) if ordered { break } } } - if len(lerr.errors) == 0 { + if len(lerr.ecases) == 0 { return nil, nil } - return &lerr, lerr.errors[0] + return &lerr, lerr.ecases[0].Err } else if deleteOps, ok := op.(bulkDeleteOp); ok { var lerr LastError - for _, deleteOp := range deleteOps { + for i, deleteOp := range deleteOps { oplerr, err := c.writeOpQuery(socket, safeOp, deleteOp, ordered) if err != nil { lerr.N += oplerr.N lerr.modified += oplerr.modified - lerr.errors = append(lerr.errors, oplerr.errors...) + lerr.ecases = append(lerr.ecases, BulkErrorCase{i, err}) if ordered { break } } } - if len(lerr.errors) == 0 { + if len(lerr.ecases) == 0 { return nil, nil } - return &lerr, lerr.errors[0] + return &lerr, lerr.ecases[0].Err } return c.writeOpQuery(socket, safeOp, op, ordered) } @@ -4586,6 +4590,10 @@ func (c *Collection) writeOpQuery(socket *mongoSocket, safeOp *queryOp, op inter bson.Unmarshal(replyData, &result) debugf("Result from writing query: %#v", result) if result.Err != "" { + result.ecases = []BulkErrorCase{{Index: 0, Err: result}} + if insert, ok := op.(*insertOp); ok && len(insert.documents) > 1 { + result.ecases[0].Index = -1 + } return result, result } return result, nil @@ -4649,12 +4657,13 @@ func (c *Collection) writeOpCommand(socket *mongoSocket, safeOp *queryOp, op int var result writeCmdResult err = c.Database.run(socket, cmd, &result) debugf("Write command result: %#v (err=%v)", result, err) + ecases := result.BulkErrorCases() lerr = &LastError{ UpdatedExisting: result.N > 0 && len(result.Upserted) == 0, N: result.N, modified: result.NModified, - errors: result.QueryErrors(), + ecases: ecases, } if len(result.Upserted) > 0 { lerr.UpsertedId = result.Upserted[0].Id diff --git a/session_test.go b/session_test.go index aca2946a2..f54b842be 100644 --- a/session_test.go +++ b/session_test.go @@ -817,6 +817,76 @@ func (s *S) TestCreateCollectionForceIndex(c *C) { c.Assert(indexes, HasLen, 1) } +func (s *S) TestCreateCollectionValidator(c *C) { + if !s.versionAtLeast(3, 2) { + c.Skip("validation depends on MongoDB 3.2+") + } + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + db := session.DB("mydb") + coll := db.C("mycoll") + + // Test Validator. + info := &mgo.CollectionInfo{ + Validator: M{"b": M{"$exists": true}}, + } + err = coll.Create(info) + c.Assert(err, IsNil) + err = coll.Insert(M{"a": 1}) + c.Assert(err, ErrorMatches, "Document failed validation") + err = coll.DropCollection() + c.Assert(err, IsNil) + + // Test ValidatorAction. + info = &mgo.CollectionInfo{ + Validator: M{"b": M{"$exists": true}}, + ValidationAction: "warn", + } + err = coll.Create(info) + c.Assert(err, IsNil) + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + err = coll.DropCollection() + c.Assert(err, IsNil) + + // Test ValidationLevel. + info = &mgo.CollectionInfo{ + Validator: M{"a": M{"$exists": true}}, + ValidationLevel: "moderate", + } + err = coll.Create(info) + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + err = db.Run(bson.D{{"collMod", "mycoll"}, {"validator", M{"b": M{"$exists": true}}}}, nil) + c.Assert(err, IsNil) + err = coll.Insert(M{"a": 2}) + c.Assert(err, ErrorMatches, "Document failed validation") + err = coll.Update(M{"a": 1}, M{"c": 1}) + c.Assert(err, IsNil) + err = coll.DropCollection() + c.Assert(err, IsNil) +} + +func (s *S) TestCreateCollectionStorageEngine(c *C) { + if !s.versionAtLeast(3, 0) { + c.Skip("storageEngine option depends on MongoDB 3.0+") + } + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + db := session.DB("mydb") + coll := db.C("mycoll") + + info := &mgo.CollectionInfo{ + StorageEngine: M{"test": M{}}, + } + err = coll.Create(info) + c.Assert(err, ErrorMatches, "test is not a registered storage engine for this server") +} + func (s *S) TestIsDupValues(c *C) { c.Assert(mgo.IsDup(nil), Equals, false) c.Assert(mgo.IsDup(&mgo.LastError{Code: 1}), Equals, false) From dfe1a769633c38f3a67d7cec141f0323d21e260b Mon Sep 17 00:00:00 2001 From: Maciej Galkowski Date: Fri, 15 Jan 2016 14:52:23 +0000 Subject: [PATCH 223/305] Fix crash on bulk update and delete returned error. --- bulk.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/bulk.go b/bulk.go index b419ed83b..4b04e5de2 100644 --- a/bulk.go +++ b/bulk.go @@ -312,15 +312,19 @@ func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *BulkError func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *BulkError) bool { lerr, err := b.c.writeOp(bulkUpdateOp(action.docs), b.ordered) - result.Matched += lerr.N - result.Modified += lerr.modified + if lerr != nil { + result.Matched += lerr.N + result.Modified += lerr.modified + } return b.checkSuccess(action, berr, lerr, err) } func (b *Bulk) runRemove(action *bulkAction, result *BulkResult, berr *BulkError) bool { lerr, err := b.c.writeOp(bulkDeleteOp(action.docs), b.ordered) - result.Matched += lerr.N - result.Modified += lerr.modified + if lerr != nil { + result.Matched += lerr.N + result.Modified += lerr.modified + } return b.checkSuccess(action, berr, lerr, err) } From caea72dcd9503c53e09304be0d1d0131b607dc1e Mon Sep 17 00:00:00 2001 From: jackspirou Date: Tue, 19 Jan 2016 21:55:03 -0600 Subject: [PATCH 224/305] allow bson.ObjectId to satisfy encoding.TextMarshaler and encoding.TextUnmarshaler --- bson/bson.go | 24 +++++++++- bson/bson_test.go | 118 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 141 insertions(+), 1 deletion(-) diff --git a/bson/bson.go b/bson/bson.go index f1f9ab747..9fceb9b64 100644 --- a/bson/bson.go +++ b/bson/bson.go @@ -201,7 +201,6 @@ func readRandomUint32() uint32 { return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)) } - // machineId stores machine id generated once and used in subsequent calls // to NewObjectId function. var machineId = readMachineId() @@ -293,6 +292,29 @@ func (id *ObjectId) UnmarshalJSON(data []byte) error { return nil } +// MarshalText turns bson.ObjectId into an encoding.TextMarshaler. +func (id ObjectId) MarshalText() ([]byte, error) { + return []byte(fmt.Sprintf("%x", string(id))), nil +} + +// UnmarshalText turns *bson.ObjectId into an encoding.TextUnmarshaler. +func (id *ObjectId) UnmarshalText(data []byte) error { + if len(data) == 1 && data[0] == ' ' || len(data) == 0 { + *id = "" + return nil + } + if len(data) != 24 { + return fmt.Errorf("Invalid ObjectId in Text: %s", data) + } + var buf [12]byte + _, err := hex.Decode(buf[:], data[:]) + if err != nil { + return fmt.Errorf("Invalid ObjectId in Text: %s (%s)", data, err) + } + *id = ObjectId(string(buf[:])) + return nil +} + // Valid returns true if id is valid. A valid id must contain exactly 12 bytes. func (id ObjectId) Valid() bool { return len(id) == 12 diff --git a/bson/bson_test.go b/bson/bson_test.go index 9b63f8ebb..19bf079f1 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -31,6 +31,7 @@ import ( "encoding/binary" "encoding/hex" "encoding/json" + "encoding/xml" "errors" "net/url" "reflect" @@ -1623,6 +1624,123 @@ func (s *S) TestSpecTests(c *C) { } } +// -------------------------------------------------------------------------- +// ObjectId Text encoding.TextUnmarshaler. + +var textIdTests = []struct { + value bson.ObjectId + text string + marshal bool + unmarshal bool + error string +}{{ + value: bson.ObjectIdHex("4d88e15b60f486e428412dc9"), + text: "4d88e15b60f486e428412dc9", + marshal: true, + unmarshal: true, +}, { + text: "", + marshal: true, + unmarshal: true, +}, { + text: "4d88e15b60f486e428412dc9A", + marshal: false, + unmarshal: true, + error: `Invalid ObjectId in Text: 4d88e15b60f486e428412dc9A`, +}, { + text: "4d88e15b60f486e428412dcZ", + marshal: false, + unmarshal: true, + error: `Invalid ObjectId in Text: 4d88e15b60f486e428412dcZ .*`, +}} + +func (s *S) TestObjectIdTextMarshaling(c *C) { + for _, test := range textIdTests { + if test.marshal { + data, err := test.value.MarshalText() + if test.error == "" { + c.Assert(err, IsNil) + c.Assert(string(data), Equals, test.text) + } else { + c.Assert(err, ErrorMatches, test.error) + } + } + + if test.unmarshal { + err := test.value.UnmarshalText([]byte(test.text)) + if test.error == "" { + c.Assert(err, IsNil) + if test.value != "" { + value := bson.ObjectIdHex(test.text) + c.Assert(value, DeepEquals, test.value) + } + } else { + c.Assert(err, ErrorMatches, test.error) + } + } + } +} + +// -------------------------------------------------------------------------- +// ObjectId XML marshalling. + +type xmlType struct { + Id bson.ObjectId +} + +var xmlIdTests = []struct { + value xmlType + xml string + marshal bool + unmarshal bool + error string +}{{ + value: xmlType{Id: bson.ObjectIdHex("4d88e15b60f486e428412dc9")}, + xml: "4d88e15b60f486e428412dc9", + marshal: true, + unmarshal: true, +}, { + value: xmlType{}, + xml: "", + marshal: true, + unmarshal: true, +}, { + xml: "4d88e15b60f486e428412dc9A", + marshal: false, + unmarshal: true, + error: `Invalid ObjectId in Text: 4d88e15b60f486e428412dc9A`, +}, { + xml: "4d88e15b60f486e428412dcZ", + marshal: false, + unmarshal: true, + error: `Invalid ObjectId in Text: 4d88e15b60f486e428412dcZ .*`, +}} + +func (s *S) TestObjectIdXMLMarshaling(c *C) { + for _, test := range xmlIdTests { + if test.marshal { + data, err := xml.Marshal(&test.value) + if test.error == "" { + c.Assert(err, IsNil) + c.Assert(string(data), Equals, test.xml) + } else { + c.Assert(err, ErrorMatches, test.error) + } + } + + if test.unmarshal { + var value xmlType + err := xml.Unmarshal([]byte(test.xml), &value) + if test.error == "" { + c.Assert(err, IsNil) + c.Assert(value, DeepEquals, test.value) + } else { + c.Assert(err, ErrorMatches, test.error) + } + } + } +} + // -------------------------------------------------------------------------- // Some simple benchmarks. From 8945b38523ddf49afca34f6801b9048af526f0e8 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 25 Jan 2016 08:37:54 -0200 Subject: [PATCH 225/305] Tune error messages in ObjectId marshalers. --- bson/bson.go | 12 ++++++------ bson/bson_test.go | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/bson/bson.go b/bson/bson.go index 2d800854a..ac1c02c7f 100644 --- a/bson/bson.go +++ b/bson/bson.go @@ -172,7 +172,7 @@ type ObjectId string func ObjectIdHex(s string) ObjectId { d, err := hex.DecodeString(s) if err != nil || len(d) != 12 { - panic(fmt.Sprintf("Invalid input to ObjectIdHex: %q", s)) + panic(fmt.Sprintf("invalid input to ObjectIdHex: %q", s)) } return ObjectId(d) } @@ -281,12 +281,12 @@ func (id *ObjectId) UnmarshalJSON(data []byte) error { return nil } if len(data) != 26 || data[0] != '"' || data[25] != '"' { - return errors.New(fmt.Sprintf("Invalid ObjectId in JSON: %s", string(data))) + return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s", string(data))) } var buf [12]byte _, err := hex.Decode(buf[:], data[1:25]) if err != nil { - return errors.New(fmt.Sprintf("Invalid ObjectId in JSON: %s (%s)", string(data), err)) + return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s (%s)", string(data), err)) } *id = ObjectId(string(buf[:])) return nil @@ -304,12 +304,12 @@ func (id *ObjectId) UnmarshalText(data []byte) error { return nil } if len(data) != 24 { - return fmt.Errorf("Invalid ObjectId in Text: %s", data) + return fmt.Errorf("invalid ObjectId: %s", data) } var buf [12]byte _, err := hex.Decode(buf[:], data[:]) if err != nil { - return fmt.Errorf("Invalid ObjectId in Text: %s (%s)", data, err) + return fmt.Errorf("invalid ObjectId: %s (%s)", data, err) } *id = ObjectId(string(buf[:])) return nil @@ -324,7 +324,7 @@ func (id ObjectId) Valid() bool { // Calling this function with an invalid id will cause a runtime panic. func (id ObjectId) byteSlice(start, end int) []byte { if len(id) != 12 { - panic(fmt.Sprintf("Invalid ObjectId: %q", string(id))) + panic(fmt.Sprintf("invalid ObjectId: %q", string(id))) } return []byte(string(id)[start:end]) } diff --git a/bson/bson_test.go b/bson/bson_test.go index f25116c7b..b77ec877f 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -1537,12 +1537,12 @@ var jsonIdTests = []struct { unmarshal: true, }, { json: `{"Id":"4d88e15b60f486e428412dc9A"}`, - error: `Invalid ObjectId in JSON: "4d88e15b60f486e428412dc9A"`, + error: `invalid ObjectId in JSON: "4d88e15b60f486e428412dc9A"`, marshal: false, unmarshal: true, }, { json: `{"Id":"4d88e15b60f486e428412dcZ"}`, - error: `Invalid ObjectId in JSON: "4d88e15b60f486e428412dcZ" .*`, + error: `invalid ObjectId in JSON: "4d88e15b60f486e428412dcZ" .*`, marshal: false, unmarshal: true, }} @@ -1650,12 +1650,12 @@ var textIdTests = []struct { text: "4d88e15b60f486e428412dc9A", marshal: false, unmarshal: true, - error: `Invalid ObjectId in Text: 4d88e15b60f486e428412dc9A`, + error: `invalid ObjectId: 4d88e15b60f486e428412dc9A`, }, { text: "4d88e15b60f486e428412dcZ", marshal: false, unmarshal: true, - error: `Invalid ObjectId in Text: 4d88e15b60f486e428412dcZ .*`, + error: `invalid ObjectId: 4d88e15b60f486e428412dcZ .*`, }} func (s *S) TestObjectIdTextMarshaling(c *C) { @@ -1712,12 +1712,12 @@ var xmlIdTests = []struct { xml: "4d88e15b60f486e428412dc9A", marshal: false, unmarshal: true, - error: `Invalid ObjectId in Text: 4d88e15b60f486e428412dc9A`, + error: `invalid ObjectId: 4d88e15b60f486e428412dc9A`, }, { xml: "4d88e15b60f486e428412dcZ", marshal: false, unmarshal: true, - error: `Invalid ObjectId in Text: 4d88e15b60f486e428412dcZ .*`, + error: `invalid ObjectId: 4d88e15b60f486e428412dcZ .*`, }} func (s *S) TestObjectIdXMLMarshaling(c *C) { From 4140674c0cdf02a526b2956682e52e6816db27cf Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 25 Jan 2016 08:44:50 -0200 Subject: [PATCH 226/305] Fix URL docs contributed. --- session.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/session.go b/session.go index ee45d2b86..8152fec64 100644 --- a/session.go +++ b/session.go @@ -195,17 +195,17 @@ const defaultPrefetch = 0.25 // must be relaxed to Monotonic or Eventual via SetMode. // // -// connect=replicaSet +// connect=replicaSet +// +// Discover replica sets automatically. Default connection behavior. // -// Equivalent to the default connection behavior, but is a valid -// connection option that will not cause an error to be thrown. // +// replicaSet= // -// replicaSet= -// -// Defines the set name for the topology being monitored, and informs the -// automatic server discovery logic that the topology being monitored is -// a replica set. +// If specified will prevent the obtained session from communicating +// with any server which is not part of a replica set with the given name. +// The default is to communicate with any server specified or discovered +// via the servers contacted. // // // authSource= From 04d4613849a1b38f1bd370ee9b674c85960b9b00 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 25 Jan 2016 09:05:34 -0200 Subject: [PATCH 227/305] Do not remove GridFS file if index creation fails. --- gridfs.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/gridfs.go b/gridfs.go index 2e179cf0e..8a760a7ab 100644 --- a/gridfs.go +++ b/gridfs.go @@ -528,17 +528,17 @@ func (file *GridFile) completeWrite() { } file.doc.MD5 = hexsum file.err = file.gfs.Files.Insert(file.doc) - if file.err == nil { - index := Index{ - Key: []string{"files_id", "n"}, - Unique: true, - } - file.err = file.gfs.Chunks.EnsureIndex(index) - } } if file.err != nil { file.gfs.Chunks.RemoveAll(bson.D{{"files_id", file.doc.Id}}) } + if file.err == nil { + index := Index{ + Key: []string{"files_id", "n"}, + Unique: true, + } + file.err = file.gfs.Chunks.EnsureIndex(index) + } } // Abort cancels an in-progress write, preventing the file from being From 85d17954f0400b126cbd2cc3245fe1c00b003d40 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 1 Feb 2016 07:53:43 -0200 Subject: [PATCH 228/305] Logic on monotonic fallback test depends on 3.0 --- cluster_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cluster_test.go b/cluster_test.go index d509afc11..3fbb6a8ef 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -1951,6 +1951,9 @@ func (s *S) TestDoNotFallbackToMonotonic(c *C) { // falling back to Monotonic mode. This test ensures all listIndexes // commands go to the primary, as should happen since the session is // in Strong mode. + if !s.versionAtLeast(3, 0) { + c.Skip("command-counting logic depends on 3.0+") + } session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) From 43c75542de0b584f583bab30dac2e574d6f26cfe Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 1 Feb 2016 09:41:30 -0200 Subject: [PATCH 229/305] Restore TestQueryErrorNext's original iter.Next. --- bulk_test.go | 2 +- session_test.go | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/bulk_test.go b/bulk_test.go index 17fb8a5d0..0596114e1 100644 --- a/bulk_test.go +++ b/bulk_test.go @@ -384,7 +384,7 @@ func (s *S) TestBulkUpdateAll(c *C) { bulk := coll.Bulk() bulk.UpdateAll(M{"n": 1}, M{"$set": M{"n": 10}}) - bulk.UpdateAll(M{"n": 2}, M{"$set": M{"n": 2}}) + bulk.UpdateAll(M{"n": 2}, M{"$set": M{"n": 2}}) // Won't change. bulk.UpdateAll(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match. bulk.UpdateAll(M{}, M{"$inc": M{"n": 1}}, M{"n": 11}, M{"$set": M{"n": 5}}) r, err := bulk.Run() diff --git a/session_test.go b/session_test.go index f54b842be..8320e06d3 100644 --- a/session_test.go +++ b/session_test.go @@ -2742,7 +2742,8 @@ func (s *S) TestQueryErrorNext(c *C) { iter := coll.Find(M{"a": 1}).Select(M{"a": M{"b": 1}}).Iter() - ok := iter.Next(nil) + var result struct{} + ok := iter.Next(&result) c.Assert(ok, Equals, false) err = iter.Close() From 3053837f48daad9ad629c94737d27459623473e4 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 1 Feb 2016 12:32:19 -0200 Subject: [PATCH 230/305] Drop out-of-date socket timeout TODO. --- socket.go | 1 - 1 file changed, 1 deletion(-) diff --git a/socket.go b/socket.go index eb66afc66..a2343354d 100644 --- a/socket.go +++ b/socket.go @@ -548,7 +548,6 @@ func (socket *mongoSocket) readLoop() { s := make([]byte, 4) conn := socket.conn // No locking, conn never changes. for { - // XXX Handle timeouts, , etc err := fill(conn, p) if err != nil { socket.kill(err, true) From fc4b094854d0a9b09dcac1678a3034e08a0ce74f Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 1 Feb 2016 13:16:43 -0200 Subject: [PATCH 231/305] Fix bulk result data on 2.4. --- session.go | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/session.go b/session.go index 8152fec64..efe82481e 100644 --- a/session.go +++ b/session.go @@ -196,7 +196,7 @@ const defaultPrefetch = 0.25 // // // connect=replicaSet -// +// // Discover replica sets automatically. Default connection behavior. // // @@ -4504,7 +4504,7 @@ func (c *Collection) writeOp(op interface{}, ordered bool) (lerr *LastError, err if socket.ServerInfo().MaxWireVersion >= 2 { // Servers with a more recent write protocol benefit from write commands. if op, ok := op.(*insertOp); ok && len(op.documents) > 1000 { - var ecases []BulkErrorCase + var lerr LastError // Maximum batch size is 1000. Must split out in separate operations for compatibility. all := op.documents @@ -4514,57 +4514,59 @@ func (c *Collection) writeOp(op interface{}, ordered bool) (lerr *LastError, err l = len(all) } op.documents = all[i:l] - lerr, err := c.writeOpCommand(socket, safeOp, op, ordered, bypassValidation) + oplerr, err := c.writeOpCommand(socket, safeOp, op, ordered, bypassValidation) + lerr.N += oplerr.N + lerr.modified += oplerr.modified if err != nil { for ei := range lerr.ecases { - lerr.ecases[ei].Index += i + oplerr.ecases[ei].Index += i } - ecases = append(ecases, lerr.ecases...) + lerr.ecases = append(lerr.ecases, oplerr.ecases...) if op.flags&1 == 0 { - return &LastError{ecases: ecases}, err + return &lerr, err } } } - if len(ecases) == 0 { - return nil, nil + if len(lerr.ecases) != 0 { + return &lerr, lerr.ecases[0].Err } - return &LastError{ecases: ecases}, ecases[0].Err + return &lerr, nil } return c.writeOpCommand(socket, safeOp, op, ordered, bypassValidation) } else if updateOps, ok := op.(bulkUpdateOp); ok { var lerr LastError for i, updateOp := range updateOps { oplerr, err := c.writeOpQuery(socket, safeOp, updateOp, ordered) + lerr.N += oplerr.N + lerr.modified += oplerr.modified if err != nil { - lerr.N += oplerr.N - lerr.modified += oplerr.modified lerr.ecases = append(lerr.ecases, BulkErrorCase{i, err}) if ordered { break } } } - if len(lerr.ecases) == 0 { - return nil, nil + if len(lerr.ecases) != 0 { + return &lerr, lerr.ecases[0].Err } - return &lerr, lerr.ecases[0].Err + return &lerr, nil } else if deleteOps, ok := op.(bulkDeleteOp); ok { var lerr LastError for i, deleteOp := range deleteOps { oplerr, err := c.writeOpQuery(socket, safeOp, deleteOp, ordered) + lerr.N += oplerr.N + lerr.modified += oplerr.modified if err != nil { - lerr.N += oplerr.N - lerr.modified += oplerr.modified lerr.ecases = append(lerr.ecases, BulkErrorCase{i, err}) if ordered { break } } } - if len(lerr.ecases) == 0 { - return nil, nil + if len(lerr.ecases) != 0 { + return &lerr, lerr.ecases[0].Err } - return &lerr, lerr.ecases[0].Err + return &lerr, nil } return c.writeOpQuery(socket, safeOp, op, ordered) } From 43689a39e8ab22f90a4ff1230637c9ca89eb6cbd Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 1 Feb 2016 13:17:23 -0200 Subject: [PATCH 232/305] Fix ChangeInfo.Updated and introduce Matched. Fixes #149. --- session.go | 17 +++++++++++++---- session_test.go | 30 +++++++++++++++++++++++++----- 2 files changed, 38 insertions(+), 9 deletions(-) diff --git a/session.go b/session.go index efe82481e..252c4c5f3 100644 --- a/session.go +++ b/session.go @@ -2415,8 +2415,12 @@ func (c *Collection) UpdateId(id interface{}, update interface{}) error { // ChangeInfo holds details about the outcome of an update operation. type ChangeInfo struct { - Updated int // Number of existing documents updated + // Updated reports the number of existing documents modified. + // Due to server limitations, this reports the same value as the Matched field when + // talking to MongoDB <= 2.4 and on Upsert and Apply (findAndModify) operations. + Updated int Removed int // Number of documents removed + Matched int // Number of documents matched but not necessarily changed UpsertedId interface{} // Upserted _id field, when not explicitly provided } @@ -2445,7 +2449,7 @@ func (c *Collection) UpdateAll(selector interface{}, update interface{}) (info * } lerr, err := c.writeOp(&op, true) if err == nil && lerr != nil { - info = &ChangeInfo{Updated: lerr.N} + info = &ChangeInfo{Updated: lerr.modified, Matched: lerr.N} } return info, err } @@ -2478,7 +2482,8 @@ func (c *Collection) Upsert(selector interface{}, update interface{}) (info *Cha if err == nil && lerr != nil { info = &ChangeInfo{} if lerr.UpdatedExisting { - info.Updated = lerr.N + info.Matched = lerr.N + info.Updated = lerr.modified } else { info.UpsertedId = lerr.UpsertedId } @@ -2540,7 +2545,7 @@ func (c *Collection) RemoveAll(selector interface{}) (info *ChangeInfo, err erro } lerr, err := c.writeOp(&deleteOp{c.FullName, selector, 0, 0}, true) if err == nil && lerr != nil { - info = &ChangeInfo{Removed: lerr.N} + info = &ChangeInfo{Removed: lerr.N, Matched: lerr.N} } return info, err } @@ -4223,8 +4228,10 @@ func (q *Query) Apply(change Change, result interface{}) (info *ChangeInfo, err lerr := &doc.LastError if lerr.UpdatedExisting { info.Updated = lerr.N + info.Matched = lerr.N } else if change.Remove { info.Removed = lerr.N + info.Matched = lerr.N } else if change.Upsert { info.UpsertedId = lerr.UpsertedId } @@ -4612,6 +4619,8 @@ func (c *Collection) writeOpQuery(socket *mongoSocket, safeOp *queryOp, op inter } return result, result } + // With MongoDB <2.6 we don't know how many actually changed, so make it the same as matched. + result.modified = result.N return result, nil } diff --git a/session_test.go b/session_test.go index 8320e06d3..5d256fe2a 100644 --- a/session_test.go +++ b/session_test.go @@ -451,13 +451,14 @@ func (s *S) TestUpsert(c *C) { ns := []int{40, 41, 42, 43, 44, 45, 46} for _, n := range ns { - err := coll.Insert(M{"k": n, "n": n}) + err := coll.Insert(bson.D{{"k", n}, {"n", n}}) c.Assert(err, IsNil) } - info, err := coll.Upsert(M{"k": 42}, M{"k": 42, "n": 24}) + info, err := coll.Upsert(M{"k": 42}, bson.D{{"k", 42}, {"n", 24}}) c.Assert(err, IsNil) c.Assert(info.Updated, Equals, 1) + c.Assert(info.Matched, Equals, 1) c.Assert(info.UpsertedId, IsNil) result := M{} @@ -465,10 +466,18 @@ func (s *S) TestUpsert(c *C) { c.Assert(err, IsNil) c.Assert(result["n"], Equals, 24) + // Match but do not change. + info, err = coll.Upsert(M{"k": 42}, bson.D{{"k", 42}, {"n", 24}}) + c.Assert(err, IsNil) + c.Assert(info.Updated, Equals, 1) // On 2.6+ this feels like a server mistake. + c.Assert(info.Matched, Equals, 1) + c.Assert(info.UpsertedId, IsNil) + // Insert with internally created id. info, err = coll.Upsert(M{"k": 47}, M{"k": 47, "n": 47}) c.Assert(err, IsNil) c.Assert(info.Updated, Equals, 0) + c.Assert(info.Matched, Equals, 0) c.Assert(info.UpsertedId, NotNil) err = coll.Find(M{"k": 47}).One(result) @@ -484,6 +493,7 @@ func (s *S) TestUpsert(c *C) { info, err = coll.Upsert(M{"k": 48}, M{"k": 48, "n": 48, "_id": 48}) c.Assert(err, IsNil) c.Assert(info.Updated, Equals, 0) + c.Assert(info.Matched, Equals, 0) if s.versionAtLeast(2, 6) { c.Assert(info.UpsertedId, Equals, 48) } else { @@ -545,14 +555,20 @@ func (s *S) TestUpdateAll(c *C) { c.Assert(err, IsNil) } - // Don't actually modify the documents. Should still report 4 matching updates. info, err := coll.UpdateAll(M{"k": M{"$gt": 42}}, M{"$unset": M{"missing": 1}}) c.Assert(err, IsNil) - c.Assert(info.Updated, Equals, 4) + if s.versionAtLeast(2, 6) { + c.Assert(info.Updated, Equals, 0) + c.Assert(info.Matched, Equals, 4) + } else { + c.Assert(info.Updated, Equals, 4) + c.Assert(info.Matched, Equals, 4) + } info, err = coll.UpdateAll(M{"k": M{"$gt": 42}}, M{"$inc": M{"n": 1}}) c.Assert(err, IsNil) c.Assert(info.Updated, Equals, 4) + c.Assert(info.Matched, Equals, 4) result := make(M) err = coll.Find(M{"k": 42}).One(result) @@ -659,6 +675,7 @@ func (s *S) TestRemoveAll(c *C) { c.Assert(err, IsNil) c.Assert(info.Updated, Equals, 0) c.Assert(info.Removed, Equals, 4) + c.Assert(info.Matched, Equals, 4) c.Assert(info.UpsertedId, IsNil) result := &struct{ N int }{} @@ -676,6 +693,7 @@ func (s *S) TestRemoveAll(c *C) { c.Assert(err, IsNil) c.Assert(info.Updated, Equals, 0) c.Assert(info.Removed, Equals, 3) + c.Assert(info.Matched, Equals, 3) c.Assert(info.UpsertedId, IsNil) n, err := coll.Find(nil).Count() @@ -996,13 +1014,15 @@ func (s *S) TestFindAndModify(c *C) { c.Assert(err, IsNil) c.Assert(result["n"], Equals, 42) c.Assert(info.Updated, Equals, 1) + c.Assert(info.Matched, Equals, 1) c.Assert(info.Removed, Equals, 0) c.Assert(info.UpsertedId, IsNil) // A nil result parameter should be acceptable. info, err = coll.Find(M{"n": 43}).Apply(mgo.Change{Update: M{"$unset": M{"missing": 1}}}, nil) c.Assert(err, IsNil) - c.Assert(info.Updated, Equals, 1) + c.Assert(info.Updated, Equals, 1) // On 2.6+ this feels like a server mistake. + c.Assert(info.Matched, Equals, 1) c.Assert(info.Removed, Equals, 0) c.Assert(info.UpsertedId, IsNil) From d7b55b4570d01b6f747c2319c3c71ff11c09513e Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 1 Feb 2016 13:27:03 -0200 Subject: [PATCH 233/305] Enable TCP keep alives for all connections. --- server.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/server.go b/server.go index 1e36b995c..4bb874272 100644 --- a/server.go +++ b/server.go @@ -162,6 +162,11 @@ func (server *mongoServer) Connect(timeout time.Duration) (*mongoSocket, error) // Cannot do this because it lacks timeout support. :-( //conn, err = net.DialTCP("tcp", nil, server.tcpaddr) conn, err = net.DialTimeout("tcp", server.ResolvedAddr, timeout) + if tcpconn, ok := conn.(*net.TCPConn); ok { + tcpconn.SetKeepAlive(true) + } else { + panic("internal error: obtained TCP connection is not a *net.TCPConn!?") + } case dial.old != nil: conn, err = dial.old(server.tcpaddr) case dial.new != nil: From e37b32674ea64f76758a9366e517deeed9546773 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 4 Feb 2016 15:53:13 -0200 Subject: [PATCH 234/305] go fmt --- bulk.go | 5 +++-- bulk_test.go | 7 +++---- session_test.go | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/bulk.go b/bulk.go index 4b04e5de2..b9806905f 100644 --- a/bulk.go +++ b/bulk.go @@ -2,8 +2,9 @@ package mgo import ( "bytes" - "gopkg.in/mgo.v2-unstable/bson" "sort" + + "gopkg.in/mgo.v2-unstable/bson" ) // Bulk represents an operation that can be prepared with several @@ -106,7 +107,7 @@ func (slice bulkErrorCases) Swap(i, j int) { slice[i], slice[j] = slice[j], // for bulk inserts and without any positional information, so the Index // field is set to -1 in these cases. type BulkErrorCase struct { - Index int // Position of operation that failed, or -1 if unknown. + Index int // Position of operation that failed, or -1 if unknown. Err error } diff --git a/bulk_test.go b/bulk_test.go index 0596114e1..496e4d716 100644 --- a/bulk_test.go +++ b/bulk_test.go @@ -196,7 +196,7 @@ func (s *S) TestBulkErrorCases_2_6(c *C) { case 3, 14: bulk.Insert(M{"_id": "dupone"}) case 5, 106: - bulk.Update(M{"_id": i-1}, M{"$set": M{"_id": 4}}) + bulk.Update(M{"_id": i - 1}, M{"$set": M{"_id": 4}}) case 7, 1008: bulk.Insert(M{"_id": "duptwo"}) default: @@ -235,9 +235,9 @@ func (s *S) TestBulkErrorCases_2_4(c *C) { case 3, 14: bulk.Insert(M{"_id": "dupone"}) case 5: - bulk.Update(M{"_id": i-1}, M{"$set": M{"n": 4}}) + bulk.Update(M{"_id": i - 1}, M{"$set": M{"n": 4}}) case 106: - bulk.Update(M{"_id": i-1}, M{"$bogus": M{"n": 4}}) + bulk.Update(M{"_id": i - 1}, M{"$bogus": M{"n": 4}}) case 7, 1008: bulk.Insert(M{"_id": "duptwo"}) default: @@ -502,4 +502,3 @@ func (s *S) TestBulkRemoveAll(c *C) { c.Assert(err, IsNil) c.Assert(res, DeepEquals, []doc{{3}}) } - diff --git a/session_test.go b/session_test.go index 5d256fe2a..21259326e 100644 --- a/session_test.go +++ b/session_test.go @@ -859,7 +859,7 @@ func (s *S) TestCreateCollectionValidator(c *C) { // Test ValidatorAction. info = &mgo.CollectionInfo{ - Validator: M{"b": M{"$exists": true}}, + Validator: M{"b": M{"$exists": true}}, ValidationAction: "warn", } err = coll.Create(info) @@ -871,7 +871,7 @@ func (s *S) TestCreateCollectionValidator(c *C) { // Test ValidationLevel. info = &mgo.CollectionInfo{ - Validator: M{"a": M{"$exists": true}}, + Validator: M{"a": M{"$exists": true}}, ValidationLevel: "moderate", } err = coll.Create(info) From 547dc79326ed904c42940deac38d9747ec936659 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sun, 7 Feb 2016 00:08:42 -0200 Subject: [PATCH 235/305] Fix crash in new tcp keep-alive logic. --- server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.go b/server.go index 29aa71909..f6773593a 100644 --- a/server.go +++ b/server.go @@ -164,7 +164,7 @@ func (server *mongoServer) Connect(timeout time.Duration) (*mongoSocket, error) conn, err = net.DialTimeout("tcp", server.ResolvedAddr, timeout) if tcpconn, ok := conn.(*net.TCPConn); ok { tcpconn.SetKeepAlive(true) - } else { + } else if err == nil { panic("internal error: obtained TCP connection is not a *net.TCPConn!?") } case dial.old != nil: From d90005c5262a3463800497ea5a89aed5fe22c886 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 8 Feb 2016 22:46:34 -0200 Subject: [PATCH 236/305] Fix batch size setting on getMore command. --- session.go | 8 ++++---- session_test.go | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/session.go b/session.go index 67b3cd9ae..8312924e3 100644 --- a/session.go +++ b/session.go @@ -3165,10 +3165,10 @@ type findCmd struct { // https://docs.mongodb.org/master/reference/command/getMore/#dbcmd.getMore // type getMoreCmd struct { - CursorId int64 `bson:"getMore"` - Collection string `bson:"collection"` - BatchSize interface{} `bson:"batchSize,omitempty"` - MaxTimeMS int64 `bson:"maxTimeMS,omitempty"` + CursorId int64 `bson:"getMore"` + Collection string `bson:"collection"` + BatchSize int32 `bson:"batchSize,omitempty"` + MaxTimeMS int64 `bson:"maxTimeMS,omitempty"` } // run duplicates the behavior of collection.Find(query).One(&result) diff --git a/session_test.go b/session_test.go index fbbeaa490..3a4f4fab4 100644 --- a/session_test.go +++ b/session_test.go @@ -1586,6 +1586,39 @@ func (s *S) TestTooManyItemsLimitBug(c *C) { c.Assert(iters, Equals, limit) } +func (s *S) TestBatchSizeZeroGetMore(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU())) + + mgo.SetDebug(false) + coll := session.DB("mydb").C("mycoll") + words := strings.Split("foo bar baz", " ") + for i := 0; i < 5; i++ { + words = append(words, words...) + } + doc := bson.D{{"words", words}} + inserts := 10000 + iters := 0 + for i := 0; i < inserts; i++ { + err := coll.Insert(&doc) + c.Assert(err, IsNil) + } + iter := coll.Find(nil).Iter() + for iter.Next(&doc) { + if iters%100 == 0 { + c.Logf("Seen %d docments", iters) + } + iters++ + } + c.Assert(iter.Close(), IsNil) +} + func serverCursorsOpen(session *mgo.Session) int { var result struct { Cursors struct { From b84e8581292b0773606a61abd706da189e54346d Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sun, 7 Feb 2016 00:08:42 -0200 Subject: [PATCH 237/305] Fix crash in new tcp keep-alive logic. --- server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server.go b/server.go index 4bb874272..fcac1e7de 100644 --- a/server.go +++ b/server.go @@ -164,7 +164,7 @@ func (server *mongoServer) Connect(timeout time.Duration) (*mongoSocket, error) conn, err = net.DialTimeout("tcp", server.ResolvedAddr, timeout) if tcpconn, ok := conn.(*net.TCPConn); ok { tcpconn.SetKeepAlive(true) - } else { + } else if err == nil { panic("internal error: obtained TCP connection is not a *net.TCPConn!?") } case dial.old != nil: From 9a615babae3c0a07875229a234244a7088303be4 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 8 Feb 2016 22:46:34 -0200 Subject: [PATCH 238/305] Fix batch size setting on getMore command. --- session.go | 8 ++++---- session_test.go | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/session.go b/session.go index 252c4c5f3..8e4d447a7 100644 --- a/session.go +++ b/session.go @@ -3165,10 +3165,10 @@ type findCmd struct { // https://docs.mongodb.org/master/reference/command/getMore/#dbcmd.getMore // type getMoreCmd struct { - CursorId int64 `bson:"getMore"` - Collection string `bson:"collection"` - BatchSize interface{} `bson:"batchSize,omitempty"` - MaxTimeMS int64 `bson:"maxTimeMS,omitempty"` + CursorId int64 `bson:"getMore"` + Collection string `bson:"collection"` + BatchSize int32 `bson:"batchSize,omitempty"` + MaxTimeMS int64 `bson:"maxTimeMS,omitempty"` } // run duplicates the behavior of collection.Find(query).One(&result) diff --git a/session_test.go b/session_test.go index 21259326e..23df6e3e3 100644 --- a/session_test.go +++ b/session_test.go @@ -1586,6 +1586,39 @@ func (s *S) TestTooManyItemsLimitBug(c *C) { c.Assert(iters, Equals, limit) } +func (s *S) TestBatchSizeZeroGetMore(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU())) + + mgo.SetDebug(false) + coll := session.DB("mydb").C("mycoll") + words := strings.Split("foo bar baz", " ") + for i := 0; i < 5; i++ { + words = append(words, words...) + } + doc := bson.D{{"words", words}} + inserts := 10000 + iters := 0 + for i := 0; i < inserts; i++ { + err := coll.Insert(&doc) + c.Assert(err, IsNil) + } + iter := coll.Find(nil).Iter() + for iter.Next(&doc) { + if iters%100 == 0 { + c.Logf("Seen %d docments", iters) + } + iters++ + } + c.Assert(iter.Close(), IsNil) +} + func serverCursorsOpen(session *mgo.Session) int { var result struct { Cursors struct { From 6da6d2b623d576d197c847b2622de9f4690ff4ef Mon Sep 17 00:00:00 2001 From: Livio Soares Date: Tue, 23 Feb 2016 08:38:14 -0500 Subject: [PATCH 239/305] bson: Add test for embedded unexported struct using the ",inline" flag. This test is currently breaking with Go 1.6 due to changes in the `reflect` package. --- bson/bson_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/bson/bson_test.go b/bson/bson_test.go index b77ec877f..bff0d3ab5 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -1053,6 +1053,13 @@ type inlineDupMap struct { type inlineBadKeyMap struct { M map[int]int ",inline" } +type inlineUnexported struct { + M map[string]interface{} ",inline" + unexported ",inline" +} +type unexported struct { + A int +} type getterSetterD bson.D @@ -1284,6 +1291,7 @@ var twoWayCrossItems = []crossTypeItem{ {&inlineMapInt{A: 1, M: map[string]int{"b": 2}}, map[string]int{"a": 1, "b": 2}}, {&inlineMapInt{A: 1, M: nil}, map[string]int{"a": 1}}, {&inlineMapMyM{A: 1, M: MyM{"b": MyM{"c": 3}}}, map[string]interface{}{"a": 1, "b": map[string]interface{}{"c": 3}}}, + {&inlineUnexported{M: map[string]interface{}{"b": 1}, unexported: unexported{A: 2}}, map[string]interface{}{"b": 1, "a": 2}}, // []byte <=> Binary {&struct{ B []byte }{[]byte("abc")}, map[string]bson.Binary{"b": bson.Binary{Data: []byte("abc")}}}, From f3462d37ab5d4cb9fc98bbb25265d10ddbeb9b41 Mon Sep 17 00:00:00 2001 From: Livio Soares Date: Tue, 23 Feb 2016 08:56:19 -0500 Subject: [PATCH 240/305] bson: Fix for ,inline-d structs of unexported embedded for Go 1.6. Simply apply the recommendations described in the release document: https://golang.org/doc/go1.6#reflect To modify this check `f.PkgPath != ""` to this check `f.PkgPath != "" && !f.Anonymous`. --- bson/bson.go | 2 +- bson/encode.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bson/bson.go b/bson/bson.go index ac1c02c7f..579aec13f 100644 --- a/bson/bson.go +++ b/bson/bson.go @@ -627,7 +627,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { inlineMap := -1 for i := 0; i != n; i++ { field := st.Field(i) - if field.PkgPath != "" { + if field.PkgPath != "" && !field.Anonymous { continue // Private field } diff --git a/bson/encode.go b/bson/encode.go index 36eb29ce6..c228e28d3 100644 --- a/bson/encode.go +++ b/bson/encode.go @@ -188,7 +188,7 @@ func isZero(v reflect.Value) bool { return v.Interface().(time.Time).IsZero() } for i := 0; i < v.NumField(); i++ { - if vt.Field(i).PkgPath != "" { + if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous { continue // Private field } if !isZero(v.Field(i)) { From 59739de89a8e1315599963ad9e62b4dfcd20dfbc Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 1 Mar 2016 18:12:11 -0300 Subject: [PATCH 241/305] Tentative .travis.yml --- .travis.yml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 .travis.yml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..1c5fc9db2 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go_import_path: gopkg.in/mgo.v2 + +addons: + apt: + packages: + - supervisor + - mongodb + +before_script: + - make startdb + +script: + - cd bson && go test -check.v + - go test -check.v -fast + +# vim:ts=4:et From 19047afdff71d54e00ecb0ef75b28df488df3456 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 1 Mar 2016 18:49:32 -0300 Subject: [PATCH 242/305] Switch to daemontools and adopt Travis-CI. --- .travis.yml | 36 +++++++++++++++++--- session_test.go | 38 +++++++++------------ suite_test.go | 8 ++--- testdb/daemons/.env | 57 ++++++++++++++++++++++++++++++++ testdb/daemons/cfg1/db/.empty | 0 testdb/daemons/cfg1/log/run | 3 ++ testdb/daemons/cfg1/run | 8 +++++ testdb/daemons/cfg2/db/.empty | 0 testdb/daemons/cfg2/log/run | 3 ++ testdb/daemons/cfg2/run | 8 +++++ testdb/daemons/cfg3/db/.empty | 0 testdb/daemons/cfg3/log/run | 3 ++ testdb/daemons/cfg3/run | 9 +++++ testdb/daemons/db1/db/.empty | 0 testdb/daemons/db1/log/run | 3 ++ testdb/daemons/db1/run | 15 +++++++++ testdb/daemons/db2/db/.empty | 0 testdb/daemons/db2/log/run | 3 ++ testdb/daemons/db2/run | 8 +++++ testdb/daemons/db3/db/.empty | 0 testdb/daemons/db3/log/run | 3 ++ testdb/daemons/db3/run | 12 +++++++ testdb/daemons/rs1a/db/.empty | 0 testdb/daemons/rs1a/log/run | 3 ++ testdb/daemons/rs1a/run | 8 +++++ testdb/daemons/rs1b/db/.empty | 0 testdb/daemons/rs1b/log/run | 3 ++ testdb/daemons/rs1b/run | 8 +++++ testdb/daemons/rs1c/db/.empty | 0 testdb/daemons/rs1c/log/run | 3 ++ testdb/daemons/rs1c/run | 8 +++++ testdb/daemons/rs2a/db/.empty | 0 testdb/daemons/rs2a/log/run | 3 ++ testdb/daemons/rs2a/run | 8 +++++ testdb/daemons/rs2b/db/.empty | 0 testdb/daemons/rs2b/log/run | 3 ++ testdb/daemons/rs2b/run | 8 +++++ testdb/daemons/rs2c/db/.empty | 0 testdb/daemons/rs2c/log/run | 3 ++ testdb/daemons/rs2c/run | 8 +++++ testdb/daemons/rs3a/db/.empty | 0 testdb/daemons/rs3a/log/run | 3 ++ testdb/daemons/rs3a/run | 9 +++++ testdb/daemons/rs3b/db/.empty | 0 testdb/daemons/rs3b/log/run | 3 ++ testdb/daemons/rs3b/run | 9 +++++ testdb/daemons/rs3c/db/.empty | 0 testdb/daemons/rs3c/log/run | 3 ++ testdb/daemons/rs3c/run | 9 +++++ testdb/daemons/rs4a/db/.empty | 0 testdb/daemons/rs4a/log/run | 3 ++ testdb/daemons/rs4a/run | 8 +++++ testdb/daemons/s1/log/run | 3 ++ testdb/daemons/s1/run | 7 ++++ testdb/daemons/s2/log/run | 3 ++ testdb/daemons/s2/run | 7 ++++ testdb/daemons/s3/log/run | 3 ++ testdb/daemons/s3/run | 8 +++++ testdb/setup.sh | 62 +++++++++++++++++++++++++++-------- 59 files changed, 378 insertions(+), 45 deletions(-) create mode 100644 testdb/daemons/.env create mode 100644 testdb/daemons/cfg1/db/.empty create mode 100755 testdb/daemons/cfg1/log/run create mode 100755 testdb/daemons/cfg1/run create mode 100644 testdb/daemons/cfg2/db/.empty create mode 100755 testdb/daemons/cfg2/log/run create mode 100755 testdb/daemons/cfg2/run create mode 100644 testdb/daemons/cfg3/db/.empty create mode 100755 testdb/daemons/cfg3/log/run create mode 100755 testdb/daemons/cfg3/run create mode 100644 testdb/daemons/db1/db/.empty create mode 100755 testdb/daemons/db1/log/run create mode 100755 testdb/daemons/db1/run create mode 100644 testdb/daemons/db2/db/.empty create mode 100755 testdb/daemons/db2/log/run create mode 100755 testdb/daemons/db2/run create mode 100644 testdb/daemons/db3/db/.empty create mode 100755 testdb/daemons/db3/log/run create mode 100755 testdb/daemons/db3/run create mode 100644 testdb/daemons/rs1a/db/.empty create mode 100755 testdb/daemons/rs1a/log/run create mode 100755 testdb/daemons/rs1a/run create mode 100644 testdb/daemons/rs1b/db/.empty create mode 100755 testdb/daemons/rs1b/log/run create mode 100755 testdb/daemons/rs1b/run create mode 100644 testdb/daemons/rs1c/db/.empty create mode 100755 testdb/daemons/rs1c/log/run create mode 100755 testdb/daemons/rs1c/run create mode 100644 testdb/daemons/rs2a/db/.empty create mode 100755 testdb/daemons/rs2a/log/run create mode 100755 testdb/daemons/rs2a/run create mode 100644 testdb/daemons/rs2b/db/.empty create mode 100755 testdb/daemons/rs2b/log/run create mode 100755 testdb/daemons/rs2b/run create mode 100644 testdb/daemons/rs2c/db/.empty create mode 100755 testdb/daemons/rs2c/log/run create mode 100755 testdb/daemons/rs2c/run create mode 100644 testdb/daemons/rs3a/db/.empty create mode 100755 testdb/daemons/rs3a/log/run create mode 100755 testdb/daemons/rs3a/run create mode 100644 testdb/daemons/rs3b/db/.empty create mode 100755 testdb/daemons/rs3b/log/run create mode 100755 testdb/daemons/rs3b/run create mode 100644 testdb/daemons/rs3c/db/.empty create mode 100755 testdb/daemons/rs3c/log/run create mode 100755 testdb/daemons/rs3c/run create mode 100644 testdb/daemons/rs4a/db/.empty create mode 100755 testdb/daemons/rs4a/log/run create mode 100755 testdb/daemons/rs4a/run create mode 100755 testdb/daemons/s1/log/run create mode 100755 testdb/daemons/s1/run create mode 100755 testdb/daemons/s2/log/run create mode 100755 testdb/daemons/s2/run create mode 100755 testdb/daemons/s3/log/run create mode 100755 testdb/daemons/s3/run diff --git a/.travis.yml b/.travis.yml index 1c5fc9db2..ee85f731d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,18 +1,44 @@ language: go -go_import_path: gopkg.in/mgo.v2 +go_import_path: gopkg.in/mgo.v2-unstable addons: apt: packages: - - supervisor - - mongodb + +env: + global: + - BUCKET=https://niemeyer.s3.amazonaws.com + matrix: + - GO=1.4.1 MONGODB=x86_64-2.2.7 + - GO=1.4.1 MONGODB=x86_64-2.4.14 + - GO=1.4.1 MONGODB=x86_64-2.6.11 + - GO=1.4.1 MONGODB=x86_64-3.0.9 + - GO=1.4.1 MONGODB=x86_64-3.2.3-nojournal + - GO=1.5.3 MONGODB=x86_64-3.0.9 + - GO=1.6 MONGODB=x86_64-3.0.9 + +install: + - eval "$(gimme $GO)" + + - wget $BUCKET/mongodb-linux-$MONGODB.tgz + - tar xzvf mongodb-linux-$MONGODB.tgz + - export PATH=$PWD/mongodb-linux-$MONGODB/bin:$PATH + + - wget $BUCKET/daemontools.tar.gz + - tar xzvf daemontools.tar.gz + - export PATH=$PWD/daemontools:$PATH + + - go get gopkg.in/check.v1 + - go get gopkg.in/yaml.v2 before_script: + - export NOIPV6=1 - make startdb script: - - cd bson && go test -check.v + - (cd bson && go test -check.v) - go test -check.v -fast -# vim:ts=4:et + +# vim:sw=4:ts=4:et diff --git a/session_test.go b/session_test.go index 23df6e3e3..042bcf602 100644 --- a/session_test.go +++ b/session_test.go @@ -30,6 +30,7 @@ import ( "flag" "fmt" "math" + "os" "runtime" "sort" "strconv" @@ -88,9 +89,11 @@ func (s *S) TestDialIPAddress(c *C) { c.Assert(err, IsNil) defer session.Close() - session, err = mgo.Dial("[::1%]:40001") - c.Assert(err, IsNil) - defer session.Close() + if os.Getenv("NOIPV6") != "1" { + session, err = mgo.Dial("[::1%]:40001") + c.Assert(err, IsNil) + defer session.Close() + } } func (s *S) TestURLSingle(c *C) { @@ -1957,13 +1960,11 @@ func (s *S) TestFindTailTimeoutNoSleep(c *C) { } } - mgo.ResetStats() - // The following call to Next will block. go func() { // The internal AwaitData timing of MongoDB is around 2 seconds, // so this item should arrive within the AwaitData threshold. - time.Sleep(5e8) + time.Sleep(500 * time.Millisecond) session := session.New() defer session.Close() coll := session.DB("mydb").C("mycoll") @@ -1978,20 +1979,6 @@ func (s *S) TestFindTailTimeoutNoSleep(c *C) { c.Assert(result.N, Equals, 47) c.Log("Got Next with N=47!") - // The following may break because it depends a bit on the internal - // timing used by MongoDB's AwaitData logic. If it does, the problem - // will be observed as more GET_MORE_OPs than predicted: - // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + - // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 - stats := mgo.GetStats() - if s.versionAtLeast(2, 6) { - c.Assert(stats.SentOps, Equals, 3) - } else { - c.Assert(stats.SentOps, Equals, 4) - } - c.Assert(stats.ReceivedOps, Equals, 3) // REPLY_OPs for 1*QUERY_OP for nonce + 1*GET_MORE_OPs and 1*QUERY_OP - c.Assert(stats.ReceivedDocs, Equals, 3) // nonce + N=47 result + getLastError response - c.Log("Will wait for a result which will never come...") started := time.Now() @@ -2477,6 +2464,10 @@ func (s *S) TestSortScoreText(c *C) { c.Assert(err, IsNil) defer session.Close() + if !s.versionAtLeast(2, 4) { + c.Skip("Text search depends on 2.4+") + } + coll := session.DB("mydb").C("mycoll") err = coll.EnsureIndex(mgo.Index{ @@ -2961,6 +2952,10 @@ func (s *S) TestEnsureIndex(c *C) { idxs := session.DB("mydb").C("system.indexes") for _, test := range indexTests { + if !s.versionAtLeast(2, 4) && test.expected["textIndexVersion"] != nil { + continue + } + err = coll.EnsureIndex(test.index) msg := "text search not enabled" if err != nil && strings.Contains(err.Error(), msg) { @@ -3718,7 +3713,7 @@ func (s *S) TestFsyncLock(c *C) { done := make(chan time.Time) go func() { - time.Sleep(3e9) + time.Sleep(3 * time.Second) now := time.Now() err := session.FsyncUnlock() c.Check(err, IsNil) @@ -3731,7 +3726,6 @@ func (s *S) TestFsyncLock(c *C) { c.Assert(err, IsNil) c.Assert(unlocked.After(unlocking), Equals, true) - c.Assert(unlocked.Sub(unlocking) < 1e9, Equals, true) } func (s *S) TestFsync(c *C) { diff --git a/suite_test.go b/suite_test.go index cf4b6ca84..ecb249952 100644 --- a/suite_test.go +++ b/suite_test.go @@ -142,7 +142,7 @@ func (s *S) Stop(host string) { // Give a moment for slaves to sync and avoid getting rollback issues. panicOnWindows() time.Sleep(2 * time.Second) - err := run("cd _testdb && supervisorctl stop " + supvName(host)) + err := run("svc -d _testdb/daemons/" + supvName(host)) if err != nil { panic(err) } @@ -185,8 +185,8 @@ func (s *S) Thaw(host string) { func (s *S) StartAll() { if s.stopped { // Restart any stopped nodes. - run("cd _testdb && supervisorctl start all") - err := run("cd testdb && mongo --nodb wait.js") + run("svc -u _testdb/daemons/*") + err := run("mongo --nodb testdb/wait.js") if err != nil { panic(err) } @@ -231,7 +231,7 @@ var supvNames = map[string]string{ "40203": "s3", } -// supvName returns the supervisord name for the given host address. +// supvName returns the daemon name for the given host address. func supvName(host string) string { host, port, err := net.SplitHostPort(host) if err != nil { diff --git a/testdb/daemons/.env b/testdb/daemons/.env new file mode 100644 index 000000000..96ee89e94 --- /dev/null +++ b/testdb/daemons/.env @@ -0,0 +1,57 @@ + +set -e + +MONGOVERSION=$(mongod --version | sed -n 's/.*v\([0-9]\+\.[0-9]\+\)\..*/\1/p') +MONGOMAJOR=$(echo $MONGOVERSION | sed 's/\([0-9]\+\)\..*/\1/') +MONGOMINOR=$(echo $MONGOVERSION | sed 's/[0-9]\+\.\([0-9]\+\)/\1/') + +versionAtLeast() { + TESTMAJOR="$1" + TESTMINOR="$2" + if [ "$MONGOMAJOR" -gt "$TESTMAJOR" ]; then + return 0 + fi + if [ "$MONGOMAJOR" -lt "$TESTMAJOR" ]; then + return 100 + fi + if [ "$MONGOMINOR" -ge "$TESTMINOR" ]; then + return 0 + fi + return 100 +} + +COMMONDOPTSNOIP=" + --nohttpinterface + --noprealloc + --nojournal + --smallfiles + --nssize=1 + --oplogSize=1 + --dbpath ./db + " +COMMONDOPTS=" + $COMMONDOPTSNOIP + --bind_ip=127.0.0.1 + " +COMMONCOPTS=" + $COMMONDOPTS + " +COMMONSOPTS=" + --chunkSize 1 + --bind_ip=127.0.0.1 + " + +if versionAtLeast 3 2; then + # 3.2 doesn't like --nojournal on config servers. + #COMMONCOPTS="$(echo "$COMMONCOPTS" | sed '/--nojournal/d')" + # Using a hacked version of MongoDB 3.2 for now. + + # Go back to MMAPv1 so it's not super sluggish. :-( + COMMONDOPTSNOIP="--storageEngine=mmapv1 $COMMONDOPTSNOIP" + COMMONDOPTS="--storageEngine=mmapv1 $COMMONDOPTS" + COMMONCOPTS="--storageEngine=mmapv1 $COMMONCOPTS" +fi + +if [ "$TRAVIS" = true ]; then + set -x +fi diff --git a/testdb/daemons/cfg1/db/.empty b/testdb/daemons/cfg1/db/.empty new file mode 100644 index 000000000..e69de29bb diff --git a/testdb/daemons/cfg1/log/run b/testdb/daemons/cfg1/log/run new file mode 100755 index 000000000..e9d4404ba --- /dev/null +++ b/testdb/daemons/cfg1/log/run @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff --git a/testdb/daemons/cfg1/run b/testdb/daemons/cfg1/run new file mode 100755 index 000000000..ad6bddd04 --- /dev/null +++ b/testdb/daemons/cfg1/run @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONCOPTS \ + --port 40101 \ + --configsvr + diff --git a/testdb/daemons/cfg2/db/.empty b/testdb/daemons/cfg2/db/.empty new file mode 100644 index 000000000..e69de29bb diff --git a/testdb/daemons/cfg2/log/run b/testdb/daemons/cfg2/log/run new file mode 100755 index 000000000..e9d4404ba --- /dev/null +++ b/testdb/daemons/cfg2/log/run @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff --git a/testdb/daemons/cfg2/run b/testdb/daemons/cfg2/run new file mode 100755 index 000000000..07d159ef5 --- /dev/null +++ b/testdb/daemons/cfg2/run @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONCOPTS \ + --port 40102 \ + --configsvr + diff --git a/testdb/daemons/cfg3/db/.empty b/testdb/daemons/cfg3/db/.empty new file mode 100644 index 000000000..e69de29bb diff --git a/testdb/daemons/cfg3/log/run b/testdb/daemons/cfg3/log/run new file mode 100755 index 000000000..e9d4404ba --- /dev/null +++ b/testdb/daemons/cfg3/log/run @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff --git a/testdb/daemons/cfg3/run b/testdb/daemons/cfg3/run new file mode 100755 index 000000000..62603b98c --- /dev/null +++ b/testdb/daemons/cfg3/run @@ -0,0 +1,9 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONCOPTS \ + --port 40103 \ + --configsvr \ + --auth \ + --keyFile=../../keyfile diff --git a/testdb/daemons/db1/db/.empty b/testdb/daemons/db1/db/.empty new file mode 100644 index 000000000..e69de29bb diff --git a/testdb/daemons/db1/log/run b/testdb/daemons/db1/log/run new file mode 100755 index 000000000..e9d4404ba --- /dev/null +++ b/testdb/daemons/db1/log/run @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff --git a/testdb/daemons/db1/run b/testdb/daemons/db1/run new file mode 100755 index 000000000..b6636d195 --- /dev/null +++ b/testdb/daemons/db1/run @@ -0,0 +1,15 @@ +#!/bin/sh + +. ../.env + +if [ x$NOIPV6 = x1 ]; then + BINDIP="127.0.0.1" +else + BINDIP="127.0.0.1,::1" +fi + +exec mongod $COMMONDOPTSNOIP \ + --shardsvr \ + --bind_ip=$BINDIP \ + --port 40001 \ + --ipv6 diff --git a/testdb/daemons/db2/db/.empty b/testdb/daemons/db2/db/.empty new file mode 100644 index 000000000..e69de29bb diff --git a/testdb/daemons/db2/log/run b/testdb/daemons/db2/log/run new file mode 100755 index 000000000..e9d4404ba --- /dev/null +++ b/testdb/daemons/db2/log/run @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff --git a/testdb/daemons/db2/run b/testdb/daemons/db2/run new file mode 100755 index 000000000..5c7b1aa50 --- /dev/null +++ b/testdb/daemons/db2/run @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --port 40002 \ + --auth diff --git a/testdb/daemons/db3/db/.empty b/testdb/daemons/db3/db/.empty new file mode 100644 index 000000000..e69de29bb diff --git a/testdb/daemons/db3/log/run b/testdb/daemons/db3/log/run new file mode 100755 index 000000000..e9d4404ba --- /dev/null +++ b/testdb/daemons/db3/log/run @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff --git a/testdb/daemons/db3/run b/testdb/daemons/db3/run new file mode 100755 index 000000000..67a8284cf --- /dev/null +++ b/testdb/daemons/db3/run @@ -0,0 +1,12 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --port 40003 \ + --auth \ + --sslMode preferSSL \ + --sslCAFile ../../server.pem \ + --sslPEMKeyFile ../../server.pem + diff --git a/testdb/daemons/rs1a/db/.empty b/testdb/daemons/rs1a/db/.empty new file mode 100644 index 000000000..e69de29bb diff --git a/testdb/daemons/rs1a/log/run b/testdb/daemons/rs1a/log/run new file mode 100755 index 000000000..e9d4404ba --- /dev/null +++ b/testdb/daemons/rs1a/log/run @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff --git a/testdb/daemons/rs1a/run b/testdb/daemons/rs1a/run new file mode 100755 index 000000000..9de773041 --- /dev/null +++ b/testdb/daemons/rs1a/run @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs1 \ + --port 40011 diff --git a/testdb/daemons/rs1b/db/.empty b/testdb/daemons/rs1b/db/.empty new file mode 100644 index 000000000..e69de29bb diff --git a/testdb/daemons/rs1b/log/run b/testdb/daemons/rs1b/log/run new file mode 100755 index 000000000..e9d4404ba --- /dev/null +++ b/testdb/daemons/rs1b/log/run @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff --git a/testdb/daemons/rs1b/run b/testdb/daemons/rs1b/run new file mode 100755 index 000000000..dae593e12 --- /dev/null +++ b/testdb/daemons/rs1b/run @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs1 \ + --port 40012 diff --git a/testdb/daemons/rs1c/db/.empty b/testdb/daemons/rs1c/db/.empty new file mode 100644 index 000000000..e69de29bb diff --git a/testdb/daemons/rs1c/log/run b/testdb/daemons/rs1c/log/run new file mode 100755 index 000000000..e9d4404ba --- /dev/null +++ b/testdb/daemons/rs1c/log/run @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff --git a/testdb/daemons/rs1c/run b/testdb/daemons/rs1c/run new file mode 100755 index 000000000..c28cdc35d --- /dev/null +++ b/testdb/daemons/rs1c/run @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs1 \ + --port 40013 diff --git a/testdb/daemons/rs2a/db/.empty b/testdb/daemons/rs2a/db/.empty new file mode 100644 index 000000000..e69de29bb diff --git a/testdb/daemons/rs2a/log/run b/testdb/daemons/rs2a/log/run new file mode 100755 index 000000000..e9d4404ba --- /dev/null +++ b/testdb/daemons/rs2a/log/run @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff --git a/testdb/daemons/rs2a/run b/testdb/daemons/rs2a/run new file mode 100755 index 000000000..2c77ab1ab --- /dev/null +++ b/testdb/daemons/rs2a/run @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs2 \ + --port 40021 diff --git a/testdb/daemons/rs2b/db/.empty b/testdb/daemons/rs2b/db/.empty new file mode 100644 index 000000000..e69de29bb diff --git a/testdb/daemons/rs2b/log/run b/testdb/daemons/rs2b/log/run new file mode 100755 index 000000000..e9d4404ba --- /dev/null +++ b/testdb/daemons/rs2b/log/run @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff --git a/testdb/daemons/rs2b/run b/testdb/daemons/rs2b/run new file mode 100755 index 000000000..57bcfce15 --- /dev/null +++ b/testdb/daemons/rs2b/run @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs2 \ + --port 40022 diff --git a/testdb/daemons/rs2c/db/.empty b/testdb/daemons/rs2c/db/.empty new file mode 100644 index 000000000..e69de29bb diff --git a/testdb/daemons/rs2c/log/run b/testdb/daemons/rs2c/log/run new file mode 100755 index 000000000..e9d4404ba --- /dev/null +++ b/testdb/daemons/rs2c/log/run @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff --git a/testdb/daemons/rs2c/run b/testdb/daemons/rs2c/run new file mode 100755 index 000000000..a71222705 --- /dev/null +++ b/testdb/daemons/rs2c/run @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs2 \ + --port 40023 diff --git a/testdb/daemons/rs3a/db/.empty b/testdb/daemons/rs3a/db/.empty new file mode 100644 index 000000000..e69de29bb diff --git a/testdb/daemons/rs3a/log/run b/testdb/daemons/rs3a/log/run new file mode 100755 index 000000000..e9d4404ba --- /dev/null +++ b/testdb/daemons/rs3a/log/run @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff --git a/testdb/daemons/rs3a/run b/testdb/daemons/rs3a/run new file mode 100755 index 000000000..47924e126 --- /dev/null +++ b/testdb/daemons/rs3a/run @@ -0,0 +1,9 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs3 \ + --port 40031 \ + --keyFile=../../keyfile diff --git a/testdb/daemons/rs3b/db/.empty b/testdb/daemons/rs3b/db/.empty new file mode 100644 index 000000000..e69de29bb diff --git a/testdb/daemons/rs3b/log/run b/testdb/daemons/rs3b/log/run new file mode 100755 index 000000000..e9d4404ba --- /dev/null +++ b/testdb/daemons/rs3b/log/run @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff --git a/testdb/daemons/rs3b/run b/testdb/daemons/rs3b/run new file mode 100755 index 000000000..ad596c2ce --- /dev/null +++ b/testdb/daemons/rs3b/run @@ -0,0 +1,9 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs3 \ + --port 40032 \ + --keyFile=../../keyfile diff --git a/testdb/daemons/rs3c/db/.empty b/testdb/daemons/rs3c/db/.empty new file mode 100644 index 000000000..e69de29bb diff --git a/testdb/daemons/rs3c/log/run b/testdb/daemons/rs3c/log/run new file mode 100755 index 000000000..e9d4404ba --- /dev/null +++ b/testdb/daemons/rs3c/log/run @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff --git a/testdb/daemons/rs3c/run b/testdb/daemons/rs3c/run new file mode 100755 index 000000000..8f250c95b --- /dev/null +++ b/testdb/daemons/rs3c/run @@ -0,0 +1,9 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs3 \ + --port 40033 \ + --keyFile=../../keyfile diff --git a/testdb/daemons/rs4a/db/.empty b/testdb/daemons/rs4a/db/.empty new file mode 100644 index 000000000..e69de29bb diff --git a/testdb/daemons/rs4a/log/run b/testdb/daemons/rs4a/log/run new file mode 100755 index 000000000..e9d4404ba --- /dev/null +++ b/testdb/daemons/rs4a/log/run @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff --git a/testdb/daemons/rs4a/run b/testdb/daemons/rs4a/run new file mode 100755 index 000000000..c2f2d5563 --- /dev/null +++ b/testdb/daemons/rs4a/run @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongod $COMMONDOPTS \ + --shardsvr \ + --replSet rs4 \ + --port 40041 diff --git a/testdb/daemons/s1/log/run b/testdb/daemons/s1/log/run new file mode 100755 index 000000000..e9d4404ba --- /dev/null +++ b/testdb/daemons/s1/log/run @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff --git a/testdb/daemons/s1/run b/testdb/daemons/s1/run new file mode 100755 index 000000000..0e31d2c94 --- /dev/null +++ b/testdb/daemons/s1/run @@ -0,0 +1,7 @@ +#!/bin/sh + +. ../.env + +exec mongos $COMMONSOPTS \ + --port 40201 \ + --configdb 127.0.0.1:40101 diff --git a/testdb/daemons/s2/log/run b/testdb/daemons/s2/log/run new file mode 100755 index 000000000..e9d4404ba --- /dev/null +++ b/testdb/daemons/s2/log/run @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff --git a/testdb/daemons/s2/run b/testdb/daemons/s2/run new file mode 100755 index 000000000..3b5c67d58 --- /dev/null +++ b/testdb/daemons/s2/run @@ -0,0 +1,7 @@ +#!/bin/sh + +. ../.env + +exec mongos $COMMONSOPTS \ + --port 40202 \ + --configdb 127.0.0.1:40102 diff --git a/testdb/daemons/s3/log/run b/testdb/daemons/s3/log/run new file mode 100755 index 000000000..e9d4404ba --- /dev/null +++ b/testdb/daemons/s3/log/run @@ -0,0 +1,3 @@ +#!/bin/sh + +exec cat - > log.txt diff --git a/testdb/daemons/s3/run b/testdb/daemons/s3/run new file mode 100755 index 000000000..ce03a77cc --- /dev/null +++ b/testdb/daemons/s3/run @@ -0,0 +1,8 @@ +#!/bin/sh + +. ../.env + +exec mongos $COMMONSOPTS \ + --port 40203 \ + --configdb 127.0.0.1:40103 \ + --keyFile=../../keyfile diff --git a/testdb/setup.sh b/testdb/setup.sh index a121847e3..a108b5edc 100755 --- a/testdb/setup.sh +++ b/testdb/setup.sh @@ -1,38 +1,72 @@ #!/bin/sh -e +LINE="---------------" + start() { mkdir _testdb cd _testdb - mkdir db1 db2 db3 rs1a rs1b rs1c rs2a rs2b rs2c rs3a rs3b rs3c rs4a cfg1 cfg2 cfg3 - cp ../testdb/supervisord.conf supervisord.conf - cp ../testdb/server.pem server.pem echo keyfile > keyfile chmod 600 keyfile - COUNT=$(grep '^\[program' supervisord.conf | wc -l | tr -d ' ') + cp ../testdb/server.pem server.pem + cp -a ../testdb/daemons . if ! mongod --help | grep -q -- --ssl; then - COUNT=$(($COUNT - 1)) + rm -rf daemons/db3 + fi + COUNT=$(ls daemons | wc -l) + echo "Running daemons..." + svscan daemons & + SVSCANPID=$! + echo $SVSCANPID > svscan.pid + if ! kill -0 $SVSCANPID; then + echo "Cannot execute svscan." + exit 1 fi - echo "Running supervisord..." - supervisord || ( echo "Supervisord failed executing ($?)" && exit 1 ) - echo "Supervisord is up, starting $COUNT processes..." + echo "Starting $COUNT processes..." for i in $(seq 30); do - RUNNING=$(supervisorctl status | grep RUNNING | wc -l | tr -d ' ') - echo "$RUNNING processes running..." - if [ x$COUNT = x$RUNNING ]; then + UP=$(svstat daemons/* | grep ' up ' | grep -v ' [0-3] seconds' | wc -l) + echo "$UP processes up..." + if [ x$COUNT = x$UP ]; then echo "Running setup.js with mongo..." mongo --nodb ../testdb/init.js exit 0 fi sleep 1 done - echo "Failed to start all processes. Check out what's up at $PWD now!" + echo "Failed to start processes. svstat _testdb/daemons/* output:" + echo $LINE + svstat daemons/* + echo $LINE + for DAEMON in daemons/*; do + if $(svstat $DAEMON | grep ' up ' | grep ' [0-3] seconds' > /dev/null); then + echo "Logs for _testdb/$DAEMON:" + echo $LINE + cat $DAEMON/log/log.txt + echo $LINE + fi + done exit 1 } stop() { if [ -d _testdb ]; then - echo "Shutting down test cluster..." - (cd _testdb && supervisorctl shutdown) + cd _testdb + if [ -f svscan.pid ]; then + kill -9 $(cat svscan.pid) 2> /dev/null || true + svc -dx daemons/* daemons/*/log > /dev/null 2>&1 || true + COUNT=$(ls daemons | wc -l) + echo "Shutting down $COUNT processes..." + while true; do + DOWN=$(svstat daemons/* | grep 'supervise not running' | wc -l) + echo "$DOWN processes down..." + if [ x$DOWN = x$COUNT ]; then + break + fi + sleep 1 + done + rm svscan.pid + echo "Done." + fi + cd .. rm -rf _testdb fi } From dae7702eb0cec249ad6dbe930eb9eaba3dc2aad0 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 3 Mar 2016 00:23:08 -0300 Subject: [PATCH 243/305] Drop supervisord.conf. --- testdb/supervisord.conf | 68 ----------------------------------------- 1 file changed, 68 deletions(-) delete mode 100644 testdb/supervisord.conf diff --git a/testdb/supervisord.conf b/testdb/supervisord.conf deleted file mode 100644 index 724eaa79c..000000000 --- a/testdb/supervisord.conf +++ /dev/null @@ -1,68 +0,0 @@ -[supervisord] -logfile = %(here)s/supervisord.log -pidfile = %(here)s/supervisord.pid -directory = %(here)s -#nodaemon = true - -[inet_http_server] -port = 127.0.0.1:9001 - -[supervisorctl] -serverurl = http://127.0.0.1:9001 - -[rpcinterface:supervisor] -supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface - -[program:db1] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db1 --bind_ip=127.0.0.1,::1 --port 40001 --ipv6 - -[program:db2] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db2 --bind_ip=127.0.0.1 --port 40002 --auth - -[program:db3] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --dbpath %(here)s/db3 --bind_ip=127.0.0.1 --port 40003 --auth --sslMode preferSSL --sslCAFile %(here)s/server.pem --sslPEMKeyFile %(here)s/server.pem - -[program:rs1a] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1a --bind_ip=127.0.0.1 --port 40011 -[program:rs1b] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1b --bind_ip=127.0.0.1 --port 40012 -[program:rs1c] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1c --bind_ip=127.0.0.1 --port 40013 - -[program:rs2a] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(here)s/rs2a --bind_ip=127.0.0.1 --port 40021 -[program:rs2b] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(here)s/rs2b --bind_ip=127.0.0.1 --port 40022 -[program:rs2c] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs2 --dbpath %(here)s/rs2c --bind_ip=127.0.0.1 --port 40023 - -[program:rs3a] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(here)s/rs3a --bind_ip=127.0.0.1 --port 40031 --auth --keyFile=%(here)s/keyfile -[program:rs3b] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(here)s/rs3b --bind_ip=127.0.0.1 --port 40032 --auth --keyFile=%(here)s/keyfile -[program:rs3c] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs3 --dbpath %(here)s/rs3c --bind_ip=127.0.0.1 --port 40033 --auth --keyFile=%(here)s/keyfile - -[program:rs4a] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs4 --dbpath %(here)s/rs4a --bind_ip=127.0.0.1 --port 40041 - -[program:cfg1] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg1 --bind_ip=127.0.0.1 --port 40101 - -[program:cfg2] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg2 --bind_ip=127.0.0.1 --port 40102 - -[program:cfg3] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --configsvr --dbpath %(here)s/cfg3 --bind_ip=127.0.0.1 --port 40103 --auth --keyFile=%(here)s/keyfile - -[program:s1] -command = mongos --configdb 127.0.0.1:40101 --bind_ip=127.0.0.1 --port 40201 --chunkSize 1 -startretries = 10 - -[program:s2] -command = mongos --configdb 127.0.0.1:40102 --bind_ip=127.0.0.1 --port 40202 --chunkSize 1 -startretries = 10 - -[program:s3] -command = mongos --configdb 127.0.0.1:40103 --bind_ip=127.0.0.1 --port 40203 --chunkSize 1 --keyFile=%(here)s/keyfile -startretries = 10 From 37d465b92e2309ce15e735d0d4f042a208c09162 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 3 Mar 2016 00:30:08 -0300 Subject: [PATCH 244/305] Rename testdb to harness. --- Makefile | 4 ++-- auth_test.go | 2 +- {testdb => harness}/client.pem | 0 {testdb => harness}/daemons/.env | 0 {testdb => harness}/daemons/cfg1/db/.empty | 0 {testdb => harness}/daemons/cfg1/log/run | 0 {testdb => harness}/daemons/cfg1/run | 0 {testdb => harness}/daemons/cfg2/db/.empty | 0 {testdb => harness}/daemons/cfg2/log/run | 0 {testdb => harness}/daemons/cfg2/run | 0 {testdb => harness}/daemons/cfg3/db/.empty | 0 {testdb => harness}/daemons/cfg3/log/run | 0 {testdb => harness}/daemons/cfg3/run | 0 {testdb => harness}/daemons/db1/db/.empty | 0 {testdb => harness}/daemons/db1/log/run | 0 {testdb => harness}/daemons/db1/run | 0 {testdb => harness}/daemons/db2/db/.empty | 0 {testdb => harness}/daemons/db2/log/run | 0 {testdb => harness}/daemons/db2/run | 0 {testdb => harness}/daemons/db3/db/.empty | 0 {testdb => harness}/daemons/db3/log/run | 0 {testdb => harness}/daemons/db3/run | 0 {testdb => harness}/daemons/rs1a/db/.empty | 0 {testdb => harness}/daemons/rs1a/log/run | 0 {testdb => harness}/daemons/rs1a/run | 0 {testdb => harness}/daemons/rs1b/db/.empty | 0 {testdb => harness}/daemons/rs1b/log/run | 0 {testdb => harness}/daemons/rs1b/run | 0 {testdb => harness}/daemons/rs1c/db/.empty | 0 {testdb => harness}/daemons/rs1c/log/run | 0 {testdb => harness}/daemons/rs1c/run | 0 {testdb => harness}/daemons/rs2a/db/.empty | 0 {testdb => harness}/daemons/rs2a/log/run | 0 {testdb => harness}/daemons/rs2a/run | 0 {testdb => harness}/daemons/rs2b/db/.empty | 0 {testdb => harness}/daemons/rs2b/log/run | 0 {testdb => harness}/daemons/rs2b/run | 0 {testdb => harness}/daemons/rs2c/db/.empty | 0 {testdb => harness}/daemons/rs2c/log/run | 0 {testdb => harness}/daemons/rs2c/run | 0 {testdb => harness}/daemons/rs3a/db/.empty | 0 {testdb => harness}/daemons/rs3a/log/run | 0 {testdb => harness}/daemons/rs3a/run | 0 {testdb => harness}/daemons/rs3b/db/.empty | 0 {testdb => harness}/daemons/rs3b/log/run | 0 {testdb => harness}/daemons/rs3b/run | 0 {testdb => harness}/daemons/rs3c/db/.empty | 0 {testdb => harness}/daemons/rs3c/log/run | 0 {testdb => harness}/daemons/rs3c/run | 0 {testdb => harness}/daemons/rs4a/db/.empty | 0 {testdb => harness}/daemons/rs4a/log/run | 0 {testdb => harness}/daemons/rs4a/run | 0 {testdb => harness}/daemons/s1/log/run | 0 {testdb => harness}/daemons/s1/run | 0 {testdb => harness}/daemons/s2/log/run | 0 {testdb => harness}/daemons/s2/run | 0 {testdb => harness}/daemons/s3/log/run | 0 {testdb => harness}/daemons/s3/run | 0 {testdb => harness}/dropall.js | 0 {testdb => harness}/init.js | 0 {testdb => harness}/server.pem | 0 {testdb => harness}/setup.sh | 20 ++++++++++---------- {testdb => harness}/wait.js | 0 suite_test.go | 8 ++++---- 64 files changed, 17 insertions(+), 17 deletions(-) rename {testdb => harness}/client.pem (100%) rename {testdb => harness}/daemons/.env (100%) rename {testdb => harness}/daemons/cfg1/db/.empty (100%) rename {testdb => harness}/daemons/cfg1/log/run (100%) rename {testdb => harness}/daemons/cfg1/run (100%) rename {testdb => harness}/daemons/cfg2/db/.empty (100%) rename {testdb => harness}/daemons/cfg2/log/run (100%) rename {testdb => harness}/daemons/cfg2/run (100%) rename {testdb => harness}/daemons/cfg3/db/.empty (100%) rename {testdb => harness}/daemons/cfg3/log/run (100%) rename {testdb => harness}/daemons/cfg3/run (100%) rename {testdb => harness}/daemons/db1/db/.empty (100%) rename {testdb => harness}/daemons/db1/log/run (100%) rename {testdb => harness}/daemons/db1/run (100%) rename {testdb => harness}/daemons/db2/db/.empty (100%) rename {testdb => harness}/daemons/db2/log/run (100%) rename {testdb => harness}/daemons/db2/run (100%) rename {testdb => harness}/daemons/db3/db/.empty (100%) rename {testdb => harness}/daemons/db3/log/run (100%) rename {testdb => harness}/daemons/db3/run (100%) rename {testdb => harness}/daemons/rs1a/db/.empty (100%) rename {testdb => harness}/daemons/rs1a/log/run (100%) rename {testdb => harness}/daemons/rs1a/run (100%) rename {testdb => harness}/daemons/rs1b/db/.empty (100%) rename {testdb => harness}/daemons/rs1b/log/run (100%) rename {testdb => harness}/daemons/rs1b/run (100%) rename {testdb => harness}/daemons/rs1c/db/.empty (100%) rename {testdb => harness}/daemons/rs1c/log/run (100%) rename {testdb => harness}/daemons/rs1c/run (100%) rename {testdb => harness}/daemons/rs2a/db/.empty (100%) rename {testdb => harness}/daemons/rs2a/log/run (100%) rename {testdb => harness}/daemons/rs2a/run (100%) rename {testdb => harness}/daemons/rs2b/db/.empty (100%) rename {testdb => harness}/daemons/rs2b/log/run (100%) rename {testdb => harness}/daemons/rs2b/run (100%) rename {testdb => harness}/daemons/rs2c/db/.empty (100%) rename {testdb => harness}/daemons/rs2c/log/run (100%) rename {testdb => harness}/daemons/rs2c/run (100%) rename {testdb => harness}/daemons/rs3a/db/.empty (100%) rename {testdb => harness}/daemons/rs3a/log/run (100%) rename {testdb => harness}/daemons/rs3a/run (100%) rename {testdb => harness}/daemons/rs3b/db/.empty (100%) rename {testdb => harness}/daemons/rs3b/log/run (100%) rename {testdb => harness}/daemons/rs3b/run (100%) rename {testdb => harness}/daemons/rs3c/db/.empty (100%) rename {testdb => harness}/daemons/rs3c/log/run (100%) rename {testdb => harness}/daemons/rs3c/run (100%) rename {testdb => harness}/daemons/rs4a/db/.empty (100%) rename {testdb => harness}/daemons/rs4a/log/run (100%) rename {testdb => harness}/daemons/rs4a/run (100%) rename {testdb => harness}/daemons/s1/log/run (100%) rename {testdb => harness}/daemons/s1/run (100%) rename {testdb => harness}/daemons/s2/log/run (100%) rename {testdb => harness}/daemons/s2/run (100%) rename {testdb => harness}/daemons/s3/log/run (100%) rename {testdb => harness}/daemons/s3/run (100%) rename {testdb => harness}/dropall.js (100%) rename {testdb => harness}/init.js (100%) rename {testdb => harness}/server.pem (100%) rename {testdb => harness}/setup.sh (84%) rename {testdb => harness}/wait.js (100%) diff --git a/Makefile b/Makefile index 51bee7322..d1027d450 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ startdb: - @testdb/setup.sh start + @harness/setup.sh start stopdb: - @testdb/setup.sh stop + @harness/setup.sh stop diff --git a/auth_test.go b/auth_test.go index cb2777543..9443ad6fb 100644 --- a/auth_test.go +++ b/auth_test.go @@ -904,7 +904,7 @@ func (s *S) TestAuthX509Cred(c *C) { c.Skip("server does not support SSL") } - clientCertPEM, err := ioutil.ReadFile("testdb/client.pem") + clientCertPEM, err := ioutil.ReadFile("harness/client.pem") c.Assert(err, IsNil) clientCert, err := tls.X509KeyPair(clientCertPEM, clientCertPEM) diff --git a/testdb/client.pem b/harness/client.pem similarity index 100% rename from testdb/client.pem rename to harness/client.pem diff --git a/testdb/daemons/.env b/harness/daemons/.env similarity index 100% rename from testdb/daemons/.env rename to harness/daemons/.env diff --git a/testdb/daemons/cfg1/db/.empty b/harness/daemons/cfg1/db/.empty similarity index 100% rename from testdb/daemons/cfg1/db/.empty rename to harness/daemons/cfg1/db/.empty diff --git a/testdb/daemons/cfg1/log/run b/harness/daemons/cfg1/log/run similarity index 100% rename from testdb/daemons/cfg1/log/run rename to harness/daemons/cfg1/log/run diff --git a/testdb/daemons/cfg1/run b/harness/daemons/cfg1/run similarity index 100% rename from testdb/daemons/cfg1/run rename to harness/daemons/cfg1/run diff --git a/testdb/daemons/cfg2/db/.empty b/harness/daemons/cfg2/db/.empty similarity index 100% rename from testdb/daemons/cfg2/db/.empty rename to harness/daemons/cfg2/db/.empty diff --git a/testdb/daemons/cfg2/log/run b/harness/daemons/cfg2/log/run similarity index 100% rename from testdb/daemons/cfg2/log/run rename to harness/daemons/cfg2/log/run diff --git a/testdb/daemons/cfg2/run b/harness/daemons/cfg2/run similarity index 100% rename from testdb/daemons/cfg2/run rename to harness/daemons/cfg2/run diff --git a/testdb/daemons/cfg3/db/.empty b/harness/daemons/cfg3/db/.empty similarity index 100% rename from testdb/daemons/cfg3/db/.empty rename to harness/daemons/cfg3/db/.empty diff --git a/testdb/daemons/cfg3/log/run b/harness/daemons/cfg3/log/run similarity index 100% rename from testdb/daemons/cfg3/log/run rename to harness/daemons/cfg3/log/run diff --git a/testdb/daemons/cfg3/run b/harness/daemons/cfg3/run similarity index 100% rename from testdb/daemons/cfg3/run rename to harness/daemons/cfg3/run diff --git a/testdb/daemons/db1/db/.empty b/harness/daemons/db1/db/.empty similarity index 100% rename from testdb/daemons/db1/db/.empty rename to harness/daemons/db1/db/.empty diff --git a/testdb/daemons/db1/log/run b/harness/daemons/db1/log/run similarity index 100% rename from testdb/daemons/db1/log/run rename to harness/daemons/db1/log/run diff --git a/testdb/daemons/db1/run b/harness/daemons/db1/run similarity index 100% rename from testdb/daemons/db1/run rename to harness/daemons/db1/run diff --git a/testdb/daemons/db2/db/.empty b/harness/daemons/db2/db/.empty similarity index 100% rename from testdb/daemons/db2/db/.empty rename to harness/daemons/db2/db/.empty diff --git a/testdb/daemons/db2/log/run b/harness/daemons/db2/log/run similarity index 100% rename from testdb/daemons/db2/log/run rename to harness/daemons/db2/log/run diff --git a/testdb/daemons/db2/run b/harness/daemons/db2/run similarity index 100% rename from testdb/daemons/db2/run rename to harness/daemons/db2/run diff --git a/testdb/daemons/db3/db/.empty b/harness/daemons/db3/db/.empty similarity index 100% rename from testdb/daemons/db3/db/.empty rename to harness/daemons/db3/db/.empty diff --git a/testdb/daemons/db3/log/run b/harness/daemons/db3/log/run similarity index 100% rename from testdb/daemons/db3/log/run rename to harness/daemons/db3/log/run diff --git a/testdb/daemons/db3/run b/harness/daemons/db3/run similarity index 100% rename from testdb/daemons/db3/run rename to harness/daemons/db3/run diff --git a/testdb/daemons/rs1a/db/.empty b/harness/daemons/rs1a/db/.empty similarity index 100% rename from testdb/daemons/rs1a/db/.empty rename to harness/daemons/rs1a/db/.empty diff --git a/testdb/daemons/rs1a/log/run b/harness/daemons/rs1a/log/run similarity index 100% rename from testdb/daemons/rs1a/log/run rename to harness/daemons/rs1a/log/run diff --git a/testdb/daemons/rs1a/run b/harness/daemons/rs1a/run similarity index 100% rename from testdb/daemons/rs1a/run rename to harness/daemons/rs1a/run diff --git a/testdb/daemons/rs1b/db/.empty b/harness/daemons/rs1b/db/.empty similarity index 100% rename from testdb/daemons/rs1b/db/.empty rename to harness/daemons/rs1b/db/.empty diff --git a/testdb/daemons/rs1b/log/run b/harness/daemons/rs1b/log/run similarity index 100% rename from testdb/daemons/rs1b/log/run rename to harness/daemons/rs1b/log/run diff --git a/testdb/daemons/rs1b/run b/harness/daemons/rs1b/run similarity index 100% rename from testdb/daemons/rs1b/run rename to harness/daemons/rs1b/run diff --git a/testdb/daemons/rs1c/db/.empty b/harness/daemons/rs1c/db/.empty similarity index 100% rename from testdb/daemons/rs1c/db/.empty rename to harness/daemons/rs1c/db/.empty diff --git a/testdb/daemons/rs1c/log/run b/harness/daemons/rs1c/log/run similarity index 100% rename from testdb/daemons/rs1c/log/run rename to harness/daemons/rs1c/log/run diff --git a/testdb/daemons/rs1c/run b/harness/daemons/rs1c/run similarity index 100% rename from testdb/daemons/rs1c/run rename to harness/daemons/rs1c/run diff --git a/testdb/daemons/rs2a/db/.empty b/harness/daemons/rs2a/db/.empty similarity index 100% rename from testdb/daemons/rs2a/db/.empty rename to harness/daemons/rs2a/db/.empty diff --git a/testdb/daemons/rs2a/log/run b/harness/daemons/rs2a/log/run similarity index 100% rename from testdb/daemons/rs2a/log/run rename to harness/daemons/rs2a/log/run diff --git a/testdb/daemons/rs2a/run b/harness/daemons/rs2a/run similarity index 100% rename from testdb/daemons/rs2a/run rename to harness/daemons/rs2a/run diff --git a/testdb/daemons/rs2b/db/.empty b/harness/daemons/rs2b/db/.empty similarity index 100% rename from testdb/daemons/rs2b/db/.empty rename to harness/daemons/rs2b/db/.empty diff --git a/testdb/daemons/rs2b/log/run b/harness/daemons/rs2b/log/run similarity index 100% rename from testdb/daemons/rs2b/log/run rename to harness/daemons/rs2b/log/run diff --git a/testdb/daemons/rs2b/run b/harness/daemons/rs2b/run similarity index 100% rename from testdb/daemons/rs2b/run rename to harness/daemons/rs2b/run diff --git a/testdb/daemons/rs2c/db/.empty b/harness/daemons/rs2c/db/.empty similarity index 100% rename from testdb/daemons/rs2c/db/.empty rename to harness/daemons/rs2c/db/.empty diff --git a/testdb/daemons/rs2c/log/run b/harness/daemons/rs2c/log/run similarity index 100% rename from testdb/daemons/rs2c/log/run rename to harness/daemons/rs2c/log/run diff --git a/testdb/daemons/rs2c/run b/harness/daemons/rs2c/run similarity index 100% rename from testdb/daemons/rs2c/run rename to harness/daemons/rs2c/run diff --git a/testdb/daemons/rs3a/db/.empty b/harness/daemons/rs3a/db/.empty similarity index 100% rename from testdb/daemons/rs3a/db/.empty rename to harness/daemons/rs3a/db/.empty diff --git a/testdb/daemons/rs3a/log/run b/harness/daemons/rs3a/log/run similarity index 100% rename from testdb/daemons/rs3a/log/run rename to harness/daemons/rs3a/log/run diff --git a/testdb/daemons/rs3a/run b/harness/daemons/rs3a/run similarity index 100% rename from testdb/daemons/rs3a/run rename to harness/daemons/rs3a/run diff --git a/testdb/daemons/rs3b/db/.empty b/harness/daemons/rs3b/db/.empty similarity index 100% rename from testdb/daemons/rs3b/db/.empty rename to harness/daemons/rs3b/db/.empty diff --git a/testdb/daemons/rs3b/log/run b/harness/daemons/rs3b/log/run similarity index 100% rename from testdb/daemons/rs3b/log/run rename to harness/daemons/rs3b/log/run diff --git a/testdb/daemons/rs3b/run b/harness/daemons/rs3b/run similarity index 100% rename from testdb/daemons/rs3b/run rename to harness/daemons/rs3b/run diff --git a/testdb/daemons/rs3c/db/.empty b/harness/daemons/rs3c/db/.empty similarity index 100% rename from testdb/daemons/rs3c/db/.empty rename to harness/daemons/rs3c/db/.empty diff --git a/testdb/daemons/rs3c/log/run b/harness/daemons/rs3c/log/run similarity index 100% rename from testdb/daemons/rs3c/log/run rename to harness/daemons/rs3c/log/run diff --git a/testdb/daemons/rs3c/run b/harness/daemons/rs3c/run similarity index 100% rename from testdb/daemons/rs3c/run rename to harness/daemons/rs3c/run diff --git a/testdb/daemons/rs4a/db/.empty b/harness/daemons/rs4a/db/.empty similarity index 100% rename from testdb/daemons/rs4a/db/.empty rename to harness/daemons/rs4a/db/.empty diff --git a/testdb/daemons/rs4a/log/run b/harness/daemons/rs4a/log/run similarity index 100% rename from testdb/daemons/rs4a/log/run rename to harness/daemons/rs4a/log/run diff --git a/testdb/daemons/rs4a/run b/harness/daemons/rs4a/run similarity index 100% rename from testdb/daemons/rs4a/run rename to harness/daemons/rs4a/run diff --git a/testdb/daemons/s1/log/run b/harness/daemons/s1/log/run similarity index 100% rename from testdb/daemons/s1/log/run rename to harness/daemons/s1/log/run diff --git a/testdb/daemons/s1/run b/harness/daemons/s1/run similarity index 100% rename from testdb/daemons/s1/run rename to harness/daemons/s1/run diff --git a/testdb/daemons/s2/log/run b/harness/daemons/s2/log/run similarity index 100% rename from testdb/daemons/s2/log/run rename to harness/daemons/s2/log/run diff --git a/testdb/daemons/s2/run b/harness/daemons/s2/run similarity index 100% rename from testdb/daemons/s2/run rename to harness/daemons/s2/run diff --git a/testdb/daemons/s3/log/run b/harness/daemons/s3/log/run similarity index 100% rename from testdb/daemons/s3/log/run rename to harness/daemons/s3/log/run diff --git a/testdb/daemons/s3/run b/harness/daemons/s3/run similarity index 100% rename from testdb/daemons/s3/run rename to harness/daemons/s3/run diff --git a/testdb/dropall.js b/harness/dropall.js similarity index 100% rename from testdb/dropall.js rename to harness/dropall.js diff --git a/testdb/init.js b/harness/init.js similarity index 100% rename from testdb/init.js rename to harness/init.js diff --git a/testdb/server.pem b/harness/server.pem similarity index 100% rename from testdb/server.pem rename to harness/server.pem diff --git a/testdb/setup.sh b/harness/setup.sh similarity index 84% rename from testdb/setup.sh rename to harness/setup.sh index a108b5edc..2c164255b 100755 --- a/testdb/setup.sh +++ b/harness/setup.sh @@ -3,12 +3,12 @@ LINE="---------------" start() { - mkdir _testdb - cd _testdb + mkdir _harness + cd _harness echo keyfile > keyfile chmod 600 keyfile - cp ../testdb/server.pem server.pem - cp -a ../testdb/daemons . + cp ../harness/server.pem server.pem + cp -a ../harness/daemons . if ! mongod --help | grep -q -- --ssl; then rm -rf daemons/db3 fi @@ -27,18 +27,18 @@ start() { echo "$UP processes up..." if [ x$COUNT = x$UP ]; then echo "Running setup.js with mongo..." - mongo --nodb ../testdb/init.js + mongo --nodb ../harness/init.js exit 0 fi sleep 1 done - echo "Failed to start processes. svstat _testdb/daemons/* output:" + echo "Failed to start processes. svstat _harness/daemons/* output:" echo $LINE svstat daemons/* echo $LINE for DAEMON in daemons/*; do if $(svstat $DAEMON | grep ' up ' | grep ' [0-3] seconds' > /dev/null); then - echo "Logs for _testdb/$DAEMON:" + echo "Logs for _harness/$DAEMON:" echo $LINE cat $DAEMON/log/log.txt echo $LINE @@ -48,8 +48,8 @@ start() { } stop() { - if [ -d _testdb ]; then - cd _testdb + if [ -d _harness ]; then + cd _harness if [ -f svscan.pid ]; then kill -9 $(cat svscan.pid) 2> /dev/null || true svc -dx daemons/* daemons/*/log > /dev/null 2>&1 || true @@ -67,7 +67,7 @@ stop() { echo "Done." fi cd .. - rm -rf _testdb + rm -rf _harness fi } diff --git a/testdb/wait.js b/harness/wait.js similarity index 100% rename from testdb/wait.js rename to harness/wait.js diff --git a/suite_test.go b/suite_test.go index ecb249952..3dba583ad 100644 --- a/suite_test.go +++ b/suite_test.go @@ -93,7 +93,7 @@ func (s *S) SetUpSuite(c *C) { } func (s *S) SetUpTest(c *C) { - err := run("mongo --nodb testdb/dropall.js") + err := run("mongo --nodb harness/dropall.js") if err != nil { panic(err.Error()) } @@ -142,7 +142,7 @@ func (s *S) Stop(host string) { // Give a moment for slaves to sync and avoid getting rollback issues. panicOnWindows() time.Sleep(2 * time.Second) - err := run("svc -d _testdb/daemons/" + supvName(host)) + err := run("svc -d _harness/daemons/" + supvName(host)) if err != nil { panic(err) } @@ -185,8 +185,8 @@ func (s *S) Thaw(host string) { func (s *S) StartAll() { if s.stopped { // Restart any stopped nodes. - run("svc -u _testdb/daemons/*") - err := run("mongo --nodb testdb/wait.js") + run("svc -u _harness/daemons/*") + err := run("mongo --nodb harness/wait.js") if err != nil { panic(err) } From 775152bc59593661ec3107fdf8ed8ae26cf423dd Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 3 Mar 2016 00:56:09 -0300 Subject: [PATCH 245/305] Reorganized harness/ files. --- auth_test.go | 2 +- harness/certs/client.crt | 20 +++++++++++++ harness/certs/client.key | 27 +++++++++++++++++ harness/{ => certs}/client.pem | 0 harness/certs/client.req | 17 +++++++++++ harness/certs/server.crt | 22 ++++++++++++++ harness/certs/server.key | 28 ++++++++++++++++++ harness/{ => certs}/server.pem | 0 .../daemons/cfg1/db/journal/tempLatencyTest | Bin 0 -> 204800 bytes harness/daemons/cfg1/db/mongod.lock | 0 harness/daemons/cfg3/run | 2 +- harness/daemons/db3/run | 4 +-- harness/daemons/rs3a/run | 2 +- harness/daemons/rs3b/run | 2 +- harness/daemons/rs3c/run | 2 +- harness/daemons/s3/run | 2 +- harness/{ => mongojs}/dropall.js | 0 harness/{ => mongojs}/init.js | 0 harness/{ => mongojs}/wait.js | 0 harness/setup.sh | 10 +++---- suite_test.go | 4 +-- 21 files changed, 129 insertions(+), 15 deletions(-) create mode 100644 harness/certs/client.crt create mode 100644 harness/certs/client.key rename harness/{ => certs}/client.pem (100%) create mode 100644 harness/certs/client.req create mode 100644 harness/certs/server.crt create mode 100644 harness/certs/server.key rename harness/{ => certs}/server.pem (100%) create mode 100644 harness/daemons/cfg1/db/journal/tempLatencyTest create mode 100755 harness/daemons/cfg1/db/mongod.lock rename harness/{ => mongojs}/dropall.js (100%) rename harness/{ => mongojs}/init.js (100%) rename harness/{ => mongojs}/wait.js (100%) diff --git a/auth_test.go b/auth_test.go index 9443ad6fb..c3aaaede1 100644 --- a/auth_test.go +++ b/auth_test.go @@ -904,7 +904,7 @@ func (s *S) TestAuthX509Cred(c *C) { c.Skip("server does not support SSL") } - clientCertPEM, err := ioutil.ReadFile("harness/client.pem") + clientCertPEM, err := ioutil.ReadFile("harness/certs/client.pem") c.Assert(err, IsNil) clientCert, err := tls.X509KeyPair(clientCertPEM, clientCertPEM) diff --git a/harness/certs/client.crt b/harness/certs/client.crt new file mode 100644 index 000000000..6143d9254 --- /dev/null +++ b/harness/certs/client.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDLjCCAhYCAQcwDQYJKoZIhvcNAQELBQAwXDELMAkGA1UEBhMCR08xDDAKBgNV +BAgMA01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNl +cnZlcjESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE1MDkyOTA4NDAzMFoYDzIxMTUw +OTA1MDg0MDMwWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECAwDTUdPMQwwCgYDVQQH +DANNR08xDDAKBgNVBAoMA01HTzEPMA0GA1UECwwGQ2xpZW50MRIwEAYDVQQDDAls +b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0UiQhmT+H +4IIqrn8SMESDzvcl3rwImwUoRIHlmXkovCIZCbvBCJ1nAu6X5zIN89EPPOjfNrgZ +616wPgVV/YEQXp+D7+jTAsE5s8JepRXFdecResmvh/+0i2DSuI4QFsuyVAPM1O0I +AQ5EKgr0weZZmsX6lhPD4uYehV4DxDE0i/8aTAlDoNgRCAJrYFMharRTDdY7bQzd +7ZYab/pK/3DSmOKxl/AFJ8Enmcj9w1bsvy0fgAgoGEBnBru80PRFpFiqk72TJkXO +Hx7zcYFpegtKPbAreTCModaCnjP//fskCp4XJrkfH5+01NeeX/r1OfEbjgE/wzzx +l8NaWnPCmxNfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFwYpje3dCLDOIHYjd+5 +CpFOEb+bJsS4ryqm/NblTjIhCLo58hNpMsBqdJHRbHAFRCOE8fvY8yiWtdHeFZcW +DgVRAXfHONLtN7faZaZQnhy/YzOhLfC/8dUMB0gQA8KXhBCPZqQmexE28AfkEO47 +PwICAxIWINfjm5VnFMkA3b7bDNLHon/pev2m7HqVQ3pRUJQNK3XgFOdDgRrnuXpR +OKAfHORHVGTh1gf1DVwc0oM+0gnkSiJ1VG0n5pE3zhZ24fmZxu6JQ6X515W7APQI +/nKVH+f1Fo+ustyTNLt8Bwxi1XmwT7IXwnkVSE9Ff6VejppXRF01V0aaWsa3kU3r +z3A= +-----END CERTIFICATE----- diff --git a/harness/certs/client.key b/harness/certs/client.key new file mode 100644 index 000000000..892db714f --- /dev/null +++ b/harness/certs/client.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAtFIkIZk/h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7 +wQidZwLul+cyDfPRDzzo3za4GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJ +r4f/tItg0riOEBbLslQDzNTtCAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJ +Q6DYEQgCa2BTIWq0Uw3WO20M3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AI +KBhAZwa7vND0RaRYqpO9kyZFzh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5 +Hx+ftNTXnl/69TnxG44BP8M88ZfDWlpzwpsTXwIDAQABAoIBADzCjOAxZkHfuZyu +La0wTHXpkEfXdJ6ltagq5WY7P6MlOYwcRoK152vlhgXzZl9jL6ely4YjRwec0swq +KdwezpV4fOGVPmuTuw45bx47HEnr/49ZQ4p9FgF9EYQPofbz53FQc/NaMACJcogv +bn+osniw+VMFrOVNmGLiZ5p3Smk8zfXE7GRHO8CL5hpWLWO/aK236yytbfWOjM2f +Pr76ICb26TPRNzYaYUEThU6DtgdLU8pLnJ6QKKaDsjn+zqQzRa+Nvc0c0K8gvWwA +Afq7t0325+uMSwfpLgCOFldcaZQ5uvteJ0CAVRq1MvStnSHBmMzPlgS+NzsDm6lp +QH5+rIkCgYEA5j3jrWsv7TueTNbk8Hr/Zwywc+fA2Ex0pBURBHlHyc6ahSXWSCqo +DtvRGX0GDoK1lCfaIf1qb/DLlGaoHpkEeqcNhXQ+hHs+bZAxfbfBY9+ikit5ZTtl +QN1tIlhaiyLDnwhkpi/hMw1tiouxJUf84Io61z0sCL4hyZSPCpjn0H0CgYEAyH6F +Mwl+bCD3VDL/Dr5WSoOr2B/M3bF5SfvdStwy2IPcDJ716je1Ud/2qFCnKGgqvWhJ ++HU15c7CjAWo7/pXq2/pEMD8fDKTYww4Hr4p6duEA7DpbOGkwcUX8u3eknxUWT9F +jOSbTCvAxuDOC1K3AElyMxVVTNUrFFe8M84R9gsCgYBXmb6RkdG3WlKde7m5gaLB +K4PLZabq5RQQBe/mmtpkfxYtiLrh1FEC7kG9h+MRDExX5V3KRugDVUOv3+shUSjy +HbM4ToUm1NloyE78PTj4bfMl2CKlEJcyucy3H5S7kWuKi5/31wnA6d/+sa2huKUP +Lai7kgu5+9VRJBPUfV7d5QKBgCnhk/13TDtWH5QtGu5/gBMMskbxTaA5xHZZ8H4E +xXJJCRxx0Dje7jduK145itF8AQGT2W/XPC0HJciOHh4TE2EyfWMMjTF8dyFHmimB +28uIGWmT+Q7Pi9UWUMxkOAwtgIksGGE4F+CvexOQPjpLSwL6VKqrGCh2lwsm0J+Z +ulLFAoGAKlC93c6XEj1A31c1+usdEhUe9BrmTqtSYLYpDNpeMLdZ3VctrAZuOQPZ +4A4gkkQkqqwZGBYYSEqwqiLU6MsBdHPPZ9u3JXLLOQuh1xGeaKylvHj7qx6iT0Xo +I+FkJ6/3JeMgOina/+wlzD4oyQpqR4Mnh+TuLkDfQTgY+Lg0WPk= +-----END RSA PRIVATE KEY----- diff --git a/harness/client.pem b/harness/certs/client.pem similarity index 100% rename from harness/client.pem rename to harness/certs/client.pem diff --git a/harness/certs/client.req b/harness/certs/client.req new file mode 100644 index 000000000..e44feb4e8 --- /dev/null +++ b/harness/certs/client.req @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICoTCCAYkCAQAwXDELMAkGA1UEBhMCR08xDDAKBgNVBAgMA01HTzEMMAoGA1UE +BwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBkNsaWVudDESMBAGA1UEAwwJ +bG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtFIkIZk/ +h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7wQidZwLul+cyDfPRDzzo3za4 +GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJr4f/tItg0riOEBbLslQDzNTt +CAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJQ6DYEQgCa2BTIWq0Uw3WO20M +3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AIKBhAZwa7vND0RaRYqpO9kyZF +zh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5Hx+ftNTXnl/69TnxG44BP8M8 +8ZfDWlpzwpsTXwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAKbOFblIscxlXalV +sEGNm2oz380RN2QoLhN6nKtAiv0jWm6iKhdAhOIQIeaRPhUP3cyi8bcBvLdMeQ3d +ZYIByB55/R0VSP1vs4qkXJCQegHcpMpyuIzsMV8p3Q4lxzGKyKtPA6Bb5c49p8Sk +ncD+LL4ymrMEia4cBPsHL9hhFOm4gqDacbU8+ETLTpuoSvUZiw7OwngqhE2r+kMv +KDweq5TOPeb+ftKzQKrrfB+XVdBoTKYw6CwARpogbc0/7mvottVcJ/0yAgC1fBbM +vupkohkXwKfjxKl6nKNL3R2GkzHQOh91hglAx5zyybKQn2YMM328Vk4X6csBg+pg +tb1s0MA= +-----END CERTIFICATE REQUEST----- diff --git a/harness/certs/server.crt b/harness/certs/server.crt new file mode 100644 index 000000000..4515f5592 --- /dev/null +++ b/harness/certs/server.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDjTCCAnWgAwIBAgIJAMW+wDfcdzC+MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAkdPMQwwCgYDVQQIDANNR08xDDAKBgNVBAcMA01HTzEMMAoGA1UECgwDTUdP +MQ8wDQYDVQQLDAZTZXJ2ZXIxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNTA5Mjkw +ODM0MTBaGA8yMTE1MDkwNTA4MzQxMFowXDELMAkGA1UEBhMCR08xDDAKBgNVBAgM +A01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNlcnZl +cjESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA/T5W1vTsAF+2gTXP1JKygjM7T/2BXHiJc6DRKVjlshTtPYuC3rpTddDm +6d86d17LWEo+T2bCT4MzZJhSGAun9peFvehdElRMr57xs7j5V1QYjwadMTBkLQuK +IAg6cISN1KPUzpUTUKsWIsbx97sA0t0wiEPifROb7nfSMIVQsdz/c9LlY2UNYI+5 +GiU88iDGg2wrdsa3U+l2G2KSx/9uE3c5iFki6bdequLiWmBZ6rxfoaLe4gk1INji +fKssNsn2i3uJ4i4Tmr3PUc4kxx0mMKuWK3HdlQsMqtpq++HQmHSvsPrbgcjl9HyP +JiHDsoJ+4O5bbtcE51oQbLh1bZAhYwIDAQABo1AwTjAdBgNVHQ4EFgQUhku/u9Kd +OAc1L0OR649vCCuQT+0wHwYDVR0jBBgwFoAUhku/u9KdOAc1L0OR649vCCuQT+0w +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAw7Bgw3hlWXWSZjLhnSOu +2mW/UJ2Sj31unHngmgtXwW/04cyzoULb+qmzPe/Z06QMgGIsku1jFBcu0JabQtUG +TyalpfW77tfnvz238CYdImYwE9ZcIGuZGfhs6ySFN9XpW43B8YM7R8wTNPvOcSPw +nfjqU6kueN4TTspQg9cKhDss5DcMTIdgJgLbITXhIsrCu6GlKOgtX3HrdMGpQX7s +UoMXtZVG8pK32vxKWGTZ6DPqESeKjjq74NbYnB3H5U/kDU2dt7LF90C/Umdr9y+C +W2OJb1WBrf6RTcbt8D6d7P9kOfLPOtyn/cbaA/pfXBMQMHqr7XNXzjnaNU+jB7hL +yQ== +-----END CERTIFICATE----- diff --git a/harness/certs/server.key b/harness/certs/server.key new file mode 100644 index 000000000..082d093e9 --- /dev/null +++ b/harness/certs/server.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD9PlbW9OwAX7aB +Nc/UkrKCMztP/YFceIlzoNEpWOWyFO09i4LeulN10Obp3zp3XstYSj5PZsJPgzNk +mFIYC6f2l4W96F0SVEyvnvGzuPlXVBiPBp0xMGQtC4ogCDpwhI3Uo9TOlRNQqxYi +xvH3uwDS3TCIQ+J9E5vud9IwhVCx3P9z0uVjZQ1gj7kaJTzyIMaDbCt2xrdT6XYb +YpLH/24TdzmIWSLpt16q4uJaYFnqvF+hot7iCTUg2OJ8qyw2yfaLe4niLhOavc9R +ziTHHSYwq5Yrcd2VCwyq2mr74dCYdK+w+tuByOX0fI8mIcOygn7g7ltu1wTnWhBs +uHVtkCFjAgMBAAECggEASRAfRc1L+Z+jrAu2doIMdnwJdL6S//bW0UFolyFKw+I9 +wC/sBg6D3c3zkS4SVDZJPKPO7mGbVg1oWnGH3eAfCYoV0ACmOY+QwGp/GXcYmRVu +MHWcDIEFpelaZHt7QNM9iEfsMd3YwMFblZUIYozVZADk66uKQMPTjS2Muur7qRSi +wuVfSmsVZ5afH3B1Tr96BbmPsHrXLjvNpjO44k2wrnnSPQjUL7+YiZPvtnNW8Fby +yuo2uoAyjg3+68PYZftOvvNneMsv1uyGlUs6Bk+DVWaqofIztWFdFZyXbHnK2PTk +eGQt5EsL+RwIck5eoqd5vSE+KyzhhydL0zcpngVQoQKBgQD/Yelvholbz5NQtSy3 +ZoiW1y7hL1BKzvVNHuAMKJ5WOnj5szhjhKxt/wZ+hk0qcAmlV9WAPbf4izbEwPRC +tnMBQzf1uBxqqbLL6WZ4YAyGrcX3UrT7GXsGfVT4zJjz7oYSw8aPircecw5V4exB +xa4NF+ki8IycXSkHwvW2R56fRwKBgQD92xpxXtte/rUnmENbQmr0aKg7JEfMoih6 +MdX+f6mfgjMmqj+L4jPTI8/ql8HEy13SQS1534aDSHO+nBqBK5aHUCRMIgSLnTP9 +Xyx9Ngg03SZIkPfykqxQmnZgWkTPMhYS+K1Ao9FGVs8W5jVi7veyAdhHptAcxhP3 +IuxvrxVTBQKBgQCluMPiu0snaOwP04HRAZhhSgIB3tIbuXE1OnPpb/JPwmH+p25Q +Jig+uN9d+4jXoRyhTv4c2fAoOS6xPwVCxWKbzyLhMTg/fx+ncy4rryhxvRJaDDGl +QEO1Ul9xlFMs9/vI8YJIY5uxBrimwpStmbn4hSukoLSeQ1X802bfglpMwQKBgD8z +GTY4Y20XBIrDAaHquy32EEwJEEcF6AXj+l7N8bDgfVOW9xMgUb6zH8RL29Xeu5Do +4SWCXL66fvZpbr/R1jwB28eIgJExpgvicfUKSqi+lhVi4hfmJDg8/FOopZDf61b1 +ykxZfHSCkDQnRAtJaylKBEpyYUWImtfgPfTgJfLxAoGAc8A/Tl2h/DsdTA+cA5d7 +1e0l64m13ObruSWRczyru4hy8Yq6E/K2rOFw8cYCcFpy24NqNlk+2iXPLRpWm2zt +9R497zAPvhK/bfPXjvm0j/VjB44lvRTC9hby/RRMHy9UJk4o/UQaD+1IodxZovvk +SruEA1+5bfBRMW0P+h7Qfe4= +-----END PRIVATE KEY----- diff --git a/harness/server.pem b/harness/certs/server.pem similarity index 100% rename from harness/server.pem rename to harness/certs/server.pem diff --git a/harness/daemons/cfg1/db/journal/tempLatencyTest b/harness/daemons/cfg1/db/journal/tempLatencyTest new file mode 100644 index 0000000000000000000000000000000000000000..52972ec9e05fbb995e2efd1ed651077d4cd226a8 GIT binary patch literal 204800 zcmeIu0Sy2E0K%a6Pi+qe5hx58Fkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj zFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r z3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@ z0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VK zfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5 zV8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM z7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b* z1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd z0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwA zz<>b*1`HT5V8DO@0|pEjFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*1`HT5V8DO@0|pEj fFkrxd0RsjM7%*VKfB^#r3>YwAz<>b*2A&26AT0m^ literal 0 HcmV?d00001 diff --git a/harness/daemons/cfg1/db/mongod.lock b/harness/daemons/cfg1/db/mongod.lock new file mode 100755 index 000000000..e69de29bb diff --git a/harness/daemons/cfg3/run b/harness/daemons/cfg3/run index 62603b98c..bd812fa3e 100755 --- a/harness/daemons/cfg3/run +++ b/harness/daemons/cfg3/run @@ -6,4 +6,4 @@ exec mongod $COMMONCOPTS \ --port 40103 \ --configsvr \ --auth \ - --keyFile=../../keyfile + --keyFile=../../certs/keyfile diff --git a/harness/daemons/db3/run b/harness/daemons/db3/run index 67a8284cf..539da5fb2 100755 --- a/harness/daemons/db3/run +++ b/harness/daemons/db3/run @@ -7,6 +7,6 @@ exec mongod $COMMONDOPTS \ --port 40003 \ --auth \ --sslMode preferSSL \ - --sslCAFile ../../server.pem \ - --sslPEMKeyFile ../../server.pem + --sslCAFile ../../certs/server.pem \ + --sslPEMKeyFile ../../certs/server.pem diff --git a/harness/daemons/rs3a/run b/harness/daemons/rs3a/run index 47924e126..002fbaf8e 100755 --- a/harness/daemons/rs3a/run +++ b/harness/daemons/rs3a/run @@ -6,4 +6,4 @@ exec mongod $COMMONDOPTS \ --shardsvr \ --replSet rs3 \ --port 40031 \ - --keyFile=../../keyfile + --keyFile=../../certs/keyfile diff --git a/harness/daemons/rs3b/run b/harness/daemons/rs3b/run index ad596c2ce..69825843e 100755 --- a/harness/daemons/rs3b/run +++ b/harness/daemons/rs3b/run @@ -6,4 +6,4 @@ exec mongod $COMMONDOPTS \ --shardsvr \ --replSet rs3 \ --port 40032 \ - --keyFile=../../keyfile + --keyFile=../../certs/keyfile diff --git a/harness/daemons/rs3c/run b/harness/daemons/rs3c/run index 8f250c95b..97b32c927 100755 --- a/harness/daemons/rs3c/run +++ b/harness/daemons/rs3c/run @@ -6,4 +6,4 @@ exec mongod $COMMONDOPTS \ --shardsvr \ --replSet rs3 \ --port 40033 \ - --keyFile=../../keyfile + --keyFile=../../certs/keyfile diff --git a/harness/daemons/s3/run b/harness/daemons/s3/run index ce03a77cc..fde6e479b 100755 --- a/harness/daemons/s3/run +++ b/harness/daemons/s3/run @@ -5,4 +5,4 @@ exec mongos $COMMONSOPTS \ --port 40203 \ --configdb 127.0.0.1:40103 \ - --keyFile=../../keyfile + --keyFile=../../certs/keyfile diff --git a/harness/dropall.js b/harness/mongojs/dropall.js similarity index 100% rename from harness/dropall.js rename to harness/mongojs/dropall.js diff --git a/harness/init.js b/harness/mongojs/init.js similarity index 100% rename from harness/init.js rename to harness/mongojs/init.js diff --git a/harness/wait.js b/harness/mongojs/wait.js similarity index 100% rename from harness/wait.js rename to harness/mongojs/wait.js diff --git a/harness/setup.sh b/harness/setup.sh index 2c164255b..2fd36215b 100755 --- a/harness/setup.sh +++ b/harness/setup.sh @@ -3,12 +3,12 @@ LINE="---------------" start() { - mkdir _harness + mkdir -p _harness cd _harness - echo keyfile > keyfile - chmod 600 keyfile - cp ../harness/server.pem server.pem cp -a ../harness/daemons . + cp -a ../harness/certs . + echo keyfile > certs/keyfile + chmod 600 certs/keyfile if ! mongod --help | grep -q -- --ssl; then rm -rf daemons/db3 fi @@ -27,7 +27,7 @@ start() { echo "$UP processes up..." if [ x$COUNT = x$UP ]; then echo "Running setup.js with mongo..." - mongo --nodb ../harness/init.js + mongo --nodb ../harness/mongojs/init.js exit 0 fi sleep 1 diff --git a/suite_test.go b/suite_test.go index 3dba583ad..809f4d97c 100644 --- a/suite_test.go +++ b/suite_test.go @@ -93,7 +93,7 @@ func (s *S) SetUpSuite(c *C) { } func (s *S) SetUpTest(c *C) { - err := run("mongo --nodb harness/dropall.js") + err := run("mongo --nodb harness/mongojs/dropall.js") if err != nil { panic(err.Error()) } @@ -186,7 +186,7 @@ func (s *S) StartAll() { if s.stopped { // Restart any stopped nodes. run("svc -u _harness/daemons/*") - err := run("mongo --nodb harness/wait.js") + err := run("mongo --nodb harness/mongojs/wait.js") if err != nil { panic(err) } From fca8faf4657bffc4313337622b263aa5790da64d Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Thu, 3 Mar 2016 00:57:12 -0300 Subject: [PATCH 246/305] Drop shortlived testserver package (see dbtest). --- testserver/export_test.go | 12 --- testserver/testserver.go | 168 ---------------------------------- testserver/testserver_test.go | 108 ---------------------- 3 files changed, 288 deletions(-) delete mode 100644 testserver/export_test.go delete mode 100644 testserver/testserver.go delete mode 100644 testserver/testserver_test.go diff --git a/testserver/export_test.go b/testserver/export_test.go deleted file mode 100644 index 2b2e093fc..000000000 --- a/testserver/export_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package testserver - -import ( - "os" -) - -func (ts *TestServer) ProcessTest() *os.Process { - if ts.server == nil { - return nil - } - return ts.server.Process -} diff --git a/testserver/testserver.go b/testserver/testserver.go deleted file mode 100644 index 7c7b713e4..000000000 --- a/testserver/testserver.go +++ /dev/null @@ -1,168 +0,0 @@ -// WARNING: This package was replaced by mgo.v2/dbtest. -package testserver - -import ( - "bytes" - "fmt" - "net" - "os" - "os/exec" - "strconv" - "time" - - "gopkg.in/mgo.v2-unstable" - "gopkg.in/tomb.v2" -) - -// WARNING: This package was replaced by mgo.v2/dbtest. -type TestServer struct { - session *mgo.Session - output bytes.Buffer - server *exec.Cmd - dbpath string - host string - tomb tomb.Tomb -} - -// WARNING: This package was replaced by mgo.v2/dbtest. -func (ts *TestServer) SetPath(dbpath string) { - ts.dbpath = dbpath -} - -func (ts *TestServer) start() { - if ts.server != nil { - panic("TestServer already started") - } - if ts.dbpath == "" { - panic("TestServer.SetPath must be called before using the server") - } - mgo.SetStats(true) - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - panic("unable to listen on a local address: " + err.Error()) - } - addr := l.Addr().(*net.TCPAddr) - l.Close() - ts.host = addr.String() - - args := []string{ - "--dbpath", ts.dbpath, - "--bind_ip", "127.0.0.1", - "--port", strconv.Itoa(addr.Port), - "--nssize", "1", - "--noprealloc", - "--smallfiles", - "--nojournal", - } - ts.tomb = tomb.Tomb{} - ts.server = exec.Command("mongod", args...) - ts.server.Stdout = &ts.output - ts.server.Stderr = &ts.output - err = ts.server.Start() - if err != nil { - panic(err) - } - ts.tomb.Go(ts.monitor) - ts.Wipe() -} - -func (ts *TestServer) monitor() error { - ts.server.Process.Wait() - if ts.tomb.Alive() { - // Present some debugging information. - fmt.Fprintf(os.Stderr, "---- mongod process died unexpectedly:\n") - fmt.Fprintf(os.Stderr, "%s", ts.output.Bytes()) - fmt.Fprintf(os.Stderr, "---- mongod processes running right now:\n") - cmd := exec.Command("/bin/sh", "-c", "ps auxw | grep mongod") - cmd.Stdout = os.Stderr - cmd.Stderr = os.Stderr - cmd.Run() - fmt.Fprintf(os.Stderr, "----------------------------------------\n") - - panic("mongod process died unexpectedly") - } - return nil -} - -// WARNING: This package was replaced by mgo.v2/dbtest. -func (ts *TestServer) Stop() { - if ts.session != nil { - ts.checkSessions() - if ts.session != nil { - ts.session.Close() - ts.session = nil - } - } - if ts.server != nil { - ts.tomb.Kill(nil) - ts.server.Process.Kill() - select { - case <-ts.tomb.Dead(): - case <-time.After(5 * time.Second): - panic("timeout waiting for mongod process to die") - } - ts.server = nil - } -} - -// WARNING: This package was replaced by mgo.v2/dbtest. -func (ts *TestServer) Session() *mgo.Session { - if ts.server == nil { - ts.start() - } - if ts.session == nil { - mgo.ResetStats() - var err error - ts.session, err = mgo.Dial(ts.host + "/test") - if err != nil { - panic(err) - } - } - return ts.session.Copy() -} - -// WARNING: This package was replaced by mgo.v2/dbtest. -func (ts *TestServer) checkSessions() { - if check := os.Getenv("CHECK_SESSIONS"); check == "0" || ts.server == nil || ts.session == nil { - return - } - ts.session.Close() - ts.session = nil - for i := 0; i < 100; i++ { - stats := mgo.GetStats() - if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 { - return - } - time.Sleep(100 * time.Millisecond) - } - panic("There are mgo sessions still alive.") -} - -// WARNING: This package was replaced by mgo.v2/dbtest. -func (ts *TestServer) Wipe() { - if ts.server == nil || ts.session == nil { - return - } - ts.checkSessions() - sessionUnset := ts.session == nil - session := ts.Session() - defer session.Close() - if sessionUnset { - ts.session.Close() - ts.session = nil - } - names, err := session.DatabaseNames() - if err != nil { - panic(err) - } - for _, name := range names { - switch name { - case "admin", "local", "config": - default: - err = session.DB(name).DropDatabase() - if err != nil { - panic(err) - } - } - } -} diff --git a/testserver/testserver_test.go b/testserver/testserver_test.go deleted file mode 100644 index 4fb3f2837..000000000 --- a/testserver/testserver_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package testserver_test - -import ( - "os" - "testing" - "time" - - . "gopkg.in/check.v1" - - "gopkg.in/mgo.v2-unstable" - "gopkg.in/mgo.v2-unstable/testserver" -) - -type M map[string]interface{} - -func TestAll(t *testing.T) { - TestingT(t) -} - -type S struct { - oldCheckSessions string -} - -var _ = Suite(&S{}) - -func (s *S) SetUpTest(c *C) { - s.oldCheckSessions = os.Getenv("CHECK_SESSIONS") - os.Setenv("CHECK_SESSIONS", "") -} - -func (s *S) TearDownTest(c *C) { - os.Setenv("CHECK_SESSIONS", s.oldCheckSessions) -} - -func (s *S) TestWipeData(c *C) { - var server testserver.TestServer - server.SetPath(c.MkDir()) - defer server.Stop() - - session := server.Session() - err := session.DB("mydb").C("mycoll").Insert(M{"a": 1}) - session.Close() - c.Assert(err, IsNil) - - server.Wipe() - - session = server.Session() - names, err := session.DatabaseNames() - session.Close() - c.Assert(err, IsNil) - for _, name := range names { - if name != "local" && name != "admin" { - c.Fatalf("Wipe should have removed this database: %s", name) - } - } -} - -func (s *S) TestStop(c *C) { - var server testserver.TestServer - server.SetPath(c.MkDir()) - defer server.Stop() - - // Server should not be running. - process := server.ProcessTest() - c.Assert(process, IsNil) - - session := server.Session() - addr := session.LiveServers()[0] - session.Close() - - // Server should be running now. - process = server.ProcessTest() - p, err := os.FindProcess(process.Pid) - c.Assert(err, IsNil) - p.Release() - - server.Stop() - - // Server should not be running anymore. - session, err = mgo.DialWithTimeout(addr, 500*time.Millisecond) - if session != nil { - session.Close() - c.Fatalf("Stop did not stop the server") - } -} - -func (s *S) TestCheckSessions(c *C) { - var server testserver.TestServer - server.SetPath(c.MkDir()) - defer server.Stop() - - session := server.Session() - defer session.Close() - c.Assert(server.Wipe, PanicMatches, "There are mgo sessions still alive.") -} - -func (s *S) TestCheckSessionsDisabled(c *C) { - var server testserver.TestServer - server.SetPath(c.MkDir()) - defer server.Stop() - - os.Setenv("CHECK_SESSIONS", "0") - - // Should not panic, although it looks to Wipe like this session will leak. - session := server.Session() - defer session.Close() - server.Wipe() -} From 277c0a0be4f4b54853d18cc313bca9f9b42f8465 Mon Sep 17 00:00:00 2001 From: Arno Leist Date: Fri, 4 Mar 2016 14:26:38 +1300 Subject: [PATCH 247/305] Fixed error iteration in writeOp The for-range loop incorrectly iterated over lerr.ecases when indexing into oplerr.ecases, causing out of bounds panics. --- session.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/session.go b/session.go index 8e4d447a7..80cf01e07 100644 --- a/session.go +++ b/session.go @@ -4525,7 +4525,7 @@ func (c *Collection) writeOp(op interface{}, ordered bool) (lerr *LastError, err lerr.N += oplerr.N lerr.modified += oplerr.modified if err != nil { - for ei := range lerr.ecases { + for ei := range oplerr.ecases { oplerr.ecases[ei].Index += i } lerr.ecases = append(lerr.ecases, oplerr.ecases...) From 9d3f72fa080495523f087b148ceb502e1673d6a3 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 14 Mar 2016 09:37:19 -0300 Subject: [PATCH 248/305] Prevent attempt to double-start test daemons. --- harness/setup.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/harness/setup.sh b/harness/setup.sh index 2fd36215b..e5db78a78 100755 --- a/harness/setup.sh +++ b/harness/setup.sh @@ -3,6 +3,10 @@ LINE="---------------" start() { + if [ -d _harness ]; then + echo "Daemon setup already in place, stop it first." + exit 1 + fi mkdir -p _harness cd _harness cp -a ../harness/daemons . From 9aff4bc9b9b01ee9a239435335448ee1af7062a5 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 16 Mar 2016 02:31:40 -0300 Subject: [PATCH 249/305] Fix error on Apply when result contains errmsg. Fixes #221. --- session.go | 4 ---- session_test.go | 14 ++++++++++++++ 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/session.go b/session.go index 8e4d447a7..4d224a5dc 100644 --- a/session.go +++ b/session.go @@ -2333,7 +2333,6 @@ type queryError struct { Assertion string Code int AssertionCode int "assertionCode" - LastError *LastError "lastErrorObject" } type QueryError struct { @@ -2983,9 +2982,6 @@ func checkQueryError(fullname string, d []byte) error { Error: result := &queryError{} bson.Unmarshal(d, result) - if result.LastError != nil { - return result.LastError - } if result.Err == "" && result.ErrMsg == "" { return nil } diff --git a/session_test.go b/session_test.go index 042bcf602..93dcb885e 100644 --- a/session_test.go +++ b/session_test.go @@ -1097,6 +1097,20 @@ func (s *S) TestFindAndModifyBug997828(c *C) { } } +func (s *S) TestFindAndModifyErrmsgDoc(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"errmsg": "an error"}) + + var result M + _, err = coll.Find(M{}).Apply(mgo.Change{Update: M{"$set": M{"n": 1}}}, &result) + c.Assert(err, IsNil) +} + func (s *S) TestCountCollection(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) From b6e2fa371e64216a45e61072a96d4e3859f169da Mon Sep 17 00:00:00 2001 From: Livio Soares Date: Tue, 23 Feb 2016 08:38:14 -0500 Subject: [PATCH 250/305] Merge embedded structs fix for Go 1.6 from unstable. --- bson/bson.go | 2 +- bson/bson_test.go | 8 ++++++++ bson/encode.go | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/bson/bson.go b/bson/bson.go index ac1c02c7f..579aec13f 100644 --- a/bson/bson.go +++ b/bson/bson.go @@ -627,7 +627,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) { inlineMap := -1 for i := 0; i != n; i++ { field := st.Field(i) - if field.PkgPath != "" { + if field.PkgPath != "" && !field.Anonymous { continue // Private field } diff --git a/bson/bson_test.go b/bson/bson_test.go index 306af003c..36d7c6ebd 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -1053,6 +1053,13 @@ type inlineDupMap struct { type inlineBadKeyMap struct { M map[int]int ",inline" } +type inlineUnexported struct { + M map[string]interface{} ",inline" + unexported ",inline" +} +type unexported struct { + A int +} type getterSetterD bson.D @@ -1284,6 +1291,7 @@ var twoWayCrossItems = []crossTypeItem{ {&inlineMapInt{A: 1, M: map[string]int{"b": 2}}, map[string]int{"a": 1, "b": 2}}, {&inlineMapInt{A: 1, M: nil}, map[string]int{"a": 1}}, {&inlineMapMyM{A: 1, M: MyM{"b": MyM{"c": 3}}}, map[string]interface{}{"a": 1, "b": map[string]interface{}{"c": 3}}}, + {&inlineUnexported{M: map[string]interface{}{"b": 1}, unexported: unexported{A: 2}}, map[string]interface{}{"b": 1, "a": 2}}, // []byte <=> Binary {&struct{ B []byte }{[]byte("abc")}, map[string]bson.Binary{"b": bson.Binary{Data: []byte("abc")}}}, diff --git a/bson/encode.go b/bson/encode.go index 36eb29ce6..c228e28d3 100644 --- a/bson/encode.go +++ b/bson/encode.go @@ -188,7 +188,7 @@ func isZero(v reflect.Value) bool { return v.Interface().(time.Time).IsZero() } for i := 0; i < v.NumField(); i++ { - if vt.Field(i).PkgPath != "" { + if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous { continue // Private field } if !isZero(v.Field(i)) { From 72aab81a5dece0687b5fc6323b37521741501e1a Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 23 Mar 2016 18:47:08 -0300 Subject: [PATCH 251/305] Partial Decimal128 implementation. --- bson/bson.go | 102 ++++++++++ bson/bson_test.go | 47 ++++- bson/decimal_test.go | 451 +++++++++++++++++++++++++++++++++++++++++++ bson/decode.go | 5 + 4 files changed, 604 insertions(+), 1 deletion(-) create mode 100644 bson/decimal_test.go diff --git a/bson/bson.go b/bson/bson.go index 579aec13f..c7c96c5a7 100644 --- a/bson/bson.go +++ b/bson/bson.go @@ -389,6 +389,108 @@ type undefined struct{} // Undefined represents the undefined BSON value. var Undefined undefined +// Decimal128 holds decimal128 BSON values. +type Decimal128 struct { + l, h uint64 +} + +func (d Decimal128) String() string { + var pos int // positive sign + var e int // exponent + var h, l uint64 // significand high/low + + if d.h>>63&1 == 0 { + pos = 1 + } + + switch d.h >> 58 & (1<<5 - 1) { + case 0x1F: + return "NaN" + case 0x1E: + return "-Inf"[pos:] + } + + l = d.l + if d.h>>61&3 == 3 { + // Bits: 1*sign 2*ignored 14*exponent 111*significand. + // Implicit 0b100 prefix in significand. + e = int(d.h>>47&(1<<14-1)) - 6176 + //h = 4<<47 | d.h&(1<<47-1) + // Spec says all of these values are out of range. + h, l = 0, 0 + } else { + // Bits: 1*sign 14*exponent 113*significand + e = int(d.h>>49&(1<<14-1)) - 6176 + h = d.h & (1<<49 - 1) + } + + // Would be handled by the logic below, but that's trivial and common. + if h == 0 && l == 0 && e == 0 { + return "-0"[pos:] + } + + var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero. + var last = len(repr) + var i = len(repr) + var dot = len(repr) + e + var rem uint32 +Loop: + for d9 := 0; d9 < 5; d9++ { + h, l, rem = divmod(h, l, 1e9) + for d1 := 0; d1 < 9; d1++ { + c := '0' + byte(rem%10) + rem /= 10 + i-- + repr[i] = c + // Handle "0E+3", "1E+3", etc. + if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-6 || e > 0) { + last = i + break Loop + } + if c != '0' { + last = i + } + // Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc. + if dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0) { + e += len(repr) - i + i-- + repr[i] = '.' + repr[i-1] = '0' + last = i - 1 + dot = len(repr) // Unmark. + } + // Break early. Works without it, but why. + if dot > i && l == 0 && h == 0 && rem == 0 { + break Loop + } + } + } + repr[last-1] = '-' + last-- + + if e != 0 { + return fmt.Sprintf("%sE%+d", repr[last+pos:], e) + } + return string(repr[last+pos:]) +} + +func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) { + div64 := uint64(div) + a := h >> 32 + aq := a / div64 + ar := a % div64 + b := ar<<32 + h&(1<<32-1) + bq := b / div64 + br := b % div64 + c := br<<32 + l>>32 + cq := c / div64 + cr := c % div64 + d := cr<<32 + l&(1<<32-1) + dq := d / div64 + dr := d % div64 + return (aq<<32 | bq), (cq<<32 | dq), uint32(dr) +} + // Binary is a representation for non-standard binary values. Any kind should // work, but the following are known as of this writing: // diff --git a/bson/bson_test.go b/bson/bson_test.go index bff0d3ab5..a7f13dd67 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -1055,7 +1055,7 @@ type inlineBadKeyMap struct { } type inlineUnexported struct { M map[string]interface{} ",inline" - unexported ",inline" + unexported ",inline" } type unexported struct { A int @@ -1580,6 +1580,9 @@ func (s *S) TestObjectIdJSONMarshaling(c *C) { } } +// -------------------------------------------------------------------------- +// Spec tests + type specTest struct { Description string Documents []struct { @@ -1636,6 +1639,48 @@ func (s *S) TestSpecTests(c *C) { } } +// -------------------------------------------------------------------------- +// Decimal tests + +type decimalTests struct { + Valid []struct { + Description string `json:"description"` + Subject string `json:"subject"` + String string `json:"string"` + ExtJSON struct { + D struct { + NumberDecimal string `json:"$numberDecimal"` + } `json:"d"` + } `json:"extjson"` + } `json:"valid"` +} + +func (s *S) TestDecimalTests(c *C) { + var tests decimalTests + err := json.Unmarshal([]byte(decimalTestsJSON), &tests) + c.Assert(err, IsNil) + + // These also conform to the spec and are used by Go elsewhere. + goStr := map[string]string{ + "Infinity": "Inf", + "-Infinity": "-Inf", + } + + for _, test := range tests.Valid { + c.Logf("Running decimal128 test: %s", test.Description) + subject, err := hex.DecodeString(test.Subject) + var value struct{ D interface{} } + err = bson.Unmarshal(subject, &value) + c.Assert(err, IsNil) + d, isDecimal := value.D.(bson.Decimal128) + c.Assert(isDecimal, Equals, true) + if s, ok := goStr[test.String]; ok { + test.String = s + } + c.Assert(d.String(), Equals, test.String) + } +} + // -------------------------------------------------------------------------- // ObjectId Text encoding.TextUnmarshaler. diff --git a/bson/decimal_test.go b/bson/decimal_test.go new file mode 100644 index 000000000..3886c057e --- /dev/null +++ b/bson/decimal_test.go @@ -0,0 +1,451 @@ +package bson_test + +var decimalTestsJSON = ` +{ + "description": "Decimal128", + "valid": [ + { + "description": "Special - Canonical NaN", + "subject": "180000001364000000000000000000000000000000007C00", + "string": "NaN", + "extjson": { + "d": { "$numberDecimal": "NaN" } + } + }, + { + "description": "Special - Negative NaN", + "subject": "18000000136400000000000000000000000000000000FC00", + "string": "NaN" + }, + { + "description": "Special - Canonical SNaN", + "subject": "180000001364000000000000000000000000000000007E00", + "string": "NaN" + }, + { + "description": "Special - Negative SNaN", + "subject": "18000000136400000000000000000000000000000000FE00", + "string": "NaN" + }, + { + "description": "Special - NaN with a payload", + "subject": "180000001364001200000000000000000000000000007E00", + "string": "NaN" + }, + { + "description": "Special - Canonical Positive Infinity", + "subject": "180000001364000000000000000000000000000000007800", + "string": "Infinity", + "extjson": { + "d": { "$numberDecimal": "Infinity" } + } + }, + { + "description": "Special - Canonical Negative Infinity", + "subject": "18000000136400000000000000000000000000000000F800", + "string": "-Infinity", + "extjson": { + "d": { "$numberDecimal": "-Infinity" } + } + }, + { + "description": "Special - Invalid representation treated as 0", + "subject": "180000001364000000000000000000000000000000106C00", + "string": "0" + }, + { + "description": "Special - Invalid representation treated as -0", + "subject": "18000000136400DCBA9876543210DEADBEEF00000010EC00", + "string": "-0" + }, + { + "description": "Special - Invalid representation treated as 0E3", + "subject": "18000000136400FFFFFFFFFFFFFFFFFFFFFFFFFFFF116C00", + "string": "0E+3" + }, + { + "description": "Regular - Smallest", + "subject": "18000000136400D204000000000000000000000000343000", + "string": "0.001234", + "extjson": { + "d": { "$numberDecimal": "0.001234" } + } + }, + { + "description": "Regular - Smallest with Trailing Zeros", + "subject": "1800000013640040EF5A07000000000000000000002A3000", + "string": "0.00123400000", + "extjson": { + "d": { "$numberDecimal": "0.00123400000" } + } + }, + { + "description": "Regular - 0.1", + "subject": "1800000013640001000000000000000000000000003E3000", + "string": "0.1", + "extjson": { + "d": { "$numberDecimal": "0.1" } + } + }, + { + "description": "Regular - 0.1234567890123456789012345678901234", + "subject": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFC2F00", + "string": "0.1234567890123456789012345678901234", + "extjson": { + "d": { "$numberDecimal": "0.1234567890123456789012345678901234" } + } + }, + { + "MGO TEST": "MGO TEST", + "description": "Regular - Adjusted Exponent Limit", + "subject": "18000000136400F2AF967ED05C82DE3297FF6FDE3CF22F00", + "string": "0.000001234567890123456789012345678901234", + "extjson": { + "d": { "$numberDecimal": "0.1234567890123456789012345678901234" } + } + }, + { + "MGO TEST": "MGO TEST", + "description": "Scientific - Adjusted Exponent Limit", + "subject": "18000000136400F2AF967ED05C82DE3297FF6FDE3CF02F00", + "string": "1.234567890123456789012345678901234E-7", + "extjson": { + "d": { "$numberDecimal": "0.1234567890123456789012345678901234" } + } + }, + { + "description": "Regular - 0", + "subject": "180000001364000000000000000000000000000000403000", + "string": "0", + "extjson": { + "d": { "$numberDecimal": "0" } + } + }, + { + "description": "Regular - -0", + "subject": "18000000136400000000000000000000000000000040B000", + "string": "-0", + "extjson": { + "d": { "$numberDecimal": "-0" } + } + }, + { + "description": "Regular - -0.0", + "subject": "1800000013640000000000000000000000000000003EB000", + "string": "-0.0", + "extjson": { + "d": { "$numberDecimal": "-0.0" } + } + }, + { + "description": "Regular - 2", + "subject": "180000001364000200000000000000000000000000403000", + "string": "2", + "extjson": { + "d": { "$numberDecimal": "2" } + } + }, + { + "description": "Regular - 2.000", + "subject": "18000000136400D0070000000000000000000000003A3000", + "string": "2.000", + "extjson": { + "d": { "$numberDecimal": "2.000" } + } + }, + { + "description": "Regular - Largest", + "subject": "18000000136400141A99BE1C000000000000000000403000", + "string": "123456789012", + "extjson": { + "d": { "$numberDecimal": "123456789012" } + } + }, + { + "description": "Scientific - Tiniest", + "subject": "18000000136400FFFFFFFF638E8D37C087ADBE09ED010000", + "string": "9.999999999999999999999999999999999E-6143", + "extjson": { + "d": { "$numberDecimal": "9.999999999999999999999999999999999E-6143" } + } + }, + { + "description": "Scientific - Tiny", + "subject": "180000001364000100000000000000000000000000000000", + "string": "1E-6176", + "extjson": { + "d": { "$numberDecimal": "1E-6176" } + } + }, + { + "description": "Scientific - Negative Tiny", + "subject": "180000001364000100000000000000000000000000008000", + "string": "-1E-6176", + "extjson": { + "d": { "$numberDecimal": "-1E-6176" } + } + }, + { + "description": "Scientific - Fractional", + "subject": "1800000013640064000000000000000000000000002CB000", + "string": "-1.00E-8", + "extjson": { + "d": { "$numberDecimal": "-1.00E-8" } + } + }, + { + "description": "Scientific - 0 with Exponent", + "subject": "180000001364000000000000000000000000000000205F00", + "string": "0E+6000", + "extjson": { + "d": { "$numberDecimal": "0E+6000" } + } + }, + { + "description": "Scientific - 0 with Negative Exponent", + "subject": "1800000013640000000000000000000000000000007A2B00", + "string": "0E-611", + "extjson": { + "d": { "$numberDecimal": "0E-611" } + } + }, + { + "description": "Scientific - No Decimal with Signed Exponent", + "subject": "180000001364000100000000000000000000000000463000", + "string": "1E+3", + "extjson": { + "d": { "$numberDecimal": "1E+3" } + } + }, + { + "description": "Scientific - Trailing Zero", + "subject": "180000001364001A04000000000000000000000000423000", + "string": "1.050E+4", + "extjson": { + "d": { "$numberDecimal": "1.050E+4" } + } + }, + { + "description": "Scientific - With Decimal", + "subject": "180000001364006900000000000000000000000000423000", + "string": "1.05E+3", + "extjson": { + "d": { "$numberDecimal": "1.05E+3" } + } + }, + { + "description": "Scientific - Full", + "subject": "18000000136400FFFFFFFFFFFFFFFFFFFFFFFFFFFF403000", + "_string": "5.192296858534827628530496329220095E+33", + "string": "5192296858534827628530496329220095", + "extjson": { + "d": { "$numberDecimal": "5.192296858534827628530496329220095E+33" } + } + }, + { + "description": "Scientific - Large", + "subject": "18000000136400000000000A5BC138938D44C64D31FE5F00", + "string": "1.000000000000000000000000000000000E+6144", + "extjson": { + "d": { "$numberDecimal": "1.000000000000000000000000000000000E+6144" } + } + }, + { + "description": "Scientific - Largest", + "subject": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFF5F00", + "string": "9.999999999999999999999999999999999E+6144", + "extjson": { + "d": { "$numberDecimal": "9.999999999999999999999999999999999E+6144" } + } + }, + { + "description": "Non-Canonical Parsing - Exponent Normalization", + "subject": "1800000013640064000000000000000000000000002CB000", + "string": "-1.00E-8", + "extjson": { + "d": { "$numberDecimal": "-100E-10" } + } + }, + { + "description": "Non-Canonical Parsing - Unsigned Positive Exponent", + "subject": "180000001364000100000000000000000000000000463000", + "string": "1E+3", + "extjson": { + "d": { "$numberDecimal": "1E3" } + } + }, + { + "description": "Non-Canonical Parsing - Lowercase Exponent Identifier", + "subject": "180000001364000100000000000000000000000000463000", + "string": "1E+3", + "extjson": { + "d": { "$numberDecimal": "1e+3" } + } + }, + { + "description": "Non-Canonical Parsing - Long Significand with Exponent", + "subject": "1800000013640079D9E0F9763ADA429D0200000000583000", + "string": "1.2345689012345789012345E+34", + "extjson": { + "d": { "$numberDecimal": "12345689012345789012345E+12" } + } + }, + { + "description": "Non-Canonical Parsing - Positive Sign", + "subject": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000", + "_string": "1.234567890123456789012345678901234E+33", + "string": "1234567890123456789012345678901234", + "extjson": { + "d": { "$numberDecimal": "+1234567890123456789012345678901234" } + } + }, + { + "description": "Non-Canonical Parsing - Long Significand", + "subject": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000", + "_string": "1.234567890123456789012345678901234E+33", + "string": "1234567890123456789012345678901234", + "extjson": { + "d": { "$numberDecimal": "1234567890123456789012345678901234" } + } + }, + { + "description": "Non-Canonical Parsing - Long Decimal String", + "subject": "180000001364000100000000000000000000000000722800", + "string": "1E-999", + "extjson": { + "d": { "$numberDecimal": ".000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001" } + } + } + ], + "parseErrors": [ + { + "description": "Too many significand digits", + "subject": "100000000000000000000000000000000000000000000000000000000001" + }, + { + "description": "Too many significand digits", + "subject": "1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + { + "description": "Too many significand digits", + "subject": ".100000000000000000000000000000000000000000000000000000000000" + }, + { + "description": "Incomplete Exponent", + "subject": "1e" + }, + { + "description": "Exponent at the beginning", + "subject": "E01" + }, + { + "description": "Invalid NaN specification", + "subject": "nan" + }, + { + "description": "Just a decimal place", + "subject": "." + }, + { + "description": "2 decimal places", + "subject": "..3" + }, + { + "description": "2 decimal places", + "subject": ".13.3" + }, + { + "description": "2 decimal places", + "subject": "1..3" + }, + { + "description": "2 decimal places", + "subject": "1.3.4" + }, + { + "description": "2 decimal places", + "subject": "1.34." + }, + { + "description": "Decimal with no digits", + "subject": ".e" + }, + { + "description": "2 signs", + "subject": "+-32.4" + }, + { + "description": "2 signs", + "subject": "-+32.4" + }, + { + "description": "2 negative signs", + "subject": "--32.4" + }, + { + "description": "2 negative signs", + "subject": "-32.-4" + }, + { + "description": "End in negative sign", + "subject": "32.0-" + }, + { + "description": "2 negative signs", + "subject": "32.4E--21" + }, + { + "description": "2 negative signs", + "subject": "32.4E-2-1" + }, + { + "description": "2 signs", + "subject": "32.4E+-21" + }, + { + "description": "Empty string", + "subject": "" + }, + { + "description": "Invalid", + "subject": "E" + }, + { + "description": "Invalid", + "subject": "invalid" + }, + { + "description": "Invalid", + "subject": "i" + }, + { + "description": "Invalid", + "subject": "in" + }, + { + "description": "Invalid", + "subject": "-in" + }, + { + "description": "Invalid", + "subject": "Na" + }, + { + "description": "Invalid", + "subject": "-Na" + }, + { + "description": "Invalid", + "subject": "1.23abc" + }, + { + "description": "Invalid", + "subject": "1.23abcE+02" + }, + { + "description": "Invalid", + "subject": "1.23E+0aabs2" + } + ] +} +` diff --git a/bson/decode.go b/bson/decode.go index 9bd73f966..7c2d8416a 100644 --- a/bson/decode.go +++ b/bson/decode.go @@ -539,6 +539,11 @@ func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { in = MongoTimestamp(d.readInt64()) case 0x12: // Int64 in = d.readInt64() + case 0x13: // Decimal128 + in = Decimal128{ + l: uint64(d.readInt64()), + h: uint64(d.readInt64()), + } case 0x7F: // Max key in = MaxKey case 0xFF: // Min key From 14a4475d4d32049500ac89e1499c46be62bf7bdf Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Tue, 5 Apr 2016 22:25:33 -0300 Subject: [PATCH 252/305] Complete and optimized Decimal128 support. Parsing in ~170ns, string in ~700ns. --- bson/bson.go | 102 ---------- bson/bson_test.go | 42 ---- bson/decimal.go | 268 ++++++++++++++++++++++++++ bson/decimal_test.go | 449 ++++++++++++++++++++++++++++++------------- bson/encode.go | 71 +++---- 5 files changed, 621 insertions(+), 311 deletions(-) create mode 100644 bson/decimal.go diff --git a/bson/bson.go b/bson/bson.go index c7c96c5a7..579aec13f 100644 --- a/bson/bson.go +++ b/bson/bson.go @@ -389,108 +389,6 @@ type undefined struct{} // Undefined represents the undefined BSON value. var Undefined undefined -// Decimal128 holds decimal128 BSON values. -type Decimal128 struct { - l, h uint64 -} - -func (d Decimal128) String() string { - var pos int // positive sign - var e int // exponent - var h, l uint64 // significand high/low - - if d.h>>63&1 == 0 { - pos = 1 - } - - switch d.h >> 58 & (1<<5 - 1) { - case 0x1F: - return "NaN" - case 0x1E: - return "-Inf"[pos:] - } - - l = d.l - if d.h>>61&3 == 3 { - // Bits: 1*sign 2*ignored 14*exponent 111*significand. - // Implicit 0b100 prefix in significand. - e = int(d.h>>47&(1<<14-1)) - 6176 - //h = 4<<47 | d.h&(1<<47-1) - // Spec says all of these values are out of range. - h, l = 0, 0 - } else { - // Bits: 1*sign 14*exponent 113*significand - e = int(d.h>>49&(1<<14-1)) - 6176 - h = d.h & (1<<49 - 1) - } - - // Would be handled by the logic below, but that's trivial and common. - if h == 0 && l == 0 && e == 0 { - return "-0"[pos:] - } - - var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero. - var last = len(repr) - var i = len(repr) - var dot = len(repr) + e - var rem uint32 -Loop: - for d9 := 0; d9 < 5; d9++ { - h, l, rem = divmod(h, l, 1e9) - for d1 := 0; d1 < 9; d1++ { - c := '0' + byte(rem%10) - rem /= 10 - i-- - repr[i] = c - // Handle "0E+3", "1E+3", etc. - if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-6 || e > 0) { - last = i - break Loop - } - if c != '0' { - last = i - } - // Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc. - if dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0) { - e += len(repr) - i - i-- - repr[i] = '.' - repr[i-1] = '0' - last = i - 1 - dot = len(repr) // Unmark. - } - // Break early. Works without it, but why. - if dot > i && l == 0 && h == 0 && rem == 0 { - break Loop - } - } - } - repr[last-1] = '-' - last-- - - if e != 0 { - return fmt.Sprintf("%sE%+d", repr[last+pos:], e) - } - return string(repr[last+pos:]) -} - -func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) { - div64 := uint64(div) - a := h >> 32 - aq := a / div64 - ar := a % div64 - b := ar<<32 + h&(1<<32-1) - bq := b / div64 - br := b % div64 - c := br<<32 + l>>32 - cq := c / div64 - cr := c % div64 - d := cr<<32 + l&(1<<32-1) - dq := d / div64 - dr := d % div64 - return (aq<<32 | bq), (cq<<32 | dq), uint32(dr) -} - // Binary is a representation for non-standard binary values. Any kind should // work, but the following are known as of this writing: // diff --git a/bson/bson_test.go b/bson/bson_test.go index a7f13dd67..fb7fc0230 100644 --- a/bson/bson_test.go +++ b/bson/bson_test.go @@ -1639,48 +1639,6 @@ func (s *S) TestSpecTests(c *C) { } } -// -------------------------------------------------------------------------- -// Decimal tests - -type decimalTests struct { - Valid []struct { - Description string `json:"description"` - Subject string `json:"subject"` - String string `json:"string"` - ExtJSON struct { - D struct { - NumberDecimal string `json:"$numberDecimal"` - } `json:"d"` - } `json:"extjson"` - } `json:"valid"` -} - -func (s *S) TestDecimalTests(c *C) { - var tests decimalTests - err := json.Unmarshal([]byte(decimalTestsJSON), &tests) - c.Assert(err, IsNil) - - // These also conform to the spec and are used by Go elsewhere. - goStr := map[string]string{ - "Infinity": "Inf", - "-Infinity": "-Inf", - } - - for _, test := range tests.Valid { - c.Logf("Running decimal128 test: %s", test.Description) - subject, err := hex.DecodeString(test.Subject) - var value struct{ D interface{} } - err = bson.Unmarshal(subject, &value) - c.Assert(err, IsNil) - d, isDecimal := value.D.(bson.Decimal128) - c.Assert(isDecimal, Equals, true) - if s, ok := goStr[test.String]; ok { - test.String = s - } - c.Assert(d.String(), Equals, test.String) - } -} - // -------------------------------------------------------------------------- // ObjectId Text encoding.TextUnmarshaler. diff --git a/bson/decimal.go b/bson/decimal.go new file mode 100644 index 000000000..c365e32ff --- /dev/null +++ b/bson/decimal.go @@ -0,0 +1,268 @@ +// BSON library for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package bson + +import ( + "fmt" + "strconv" + "strings" +) + +// Decimal128 holds decimal128 BSON values. +type Decimal128 struct { + h, l uint64 +} + +func (d Decimal128) String() string { + var pos int // positive sign + var e int // exponent + var h, l uint64 // significand high/low + + if d.h>>63&1 == 0 { + pos = 1 + } + + switch d.h >> 58 & (1<<5 - 1) { + case 0x1F: + return "NaN" + case 0x1E: + return "-Inf"[pos:] + } + + l = d.l + if d.h>>61&3 == 3 { + // Bits: 1*sign 2*ignored 14*exponent 111*significand. + // Implicit 0b100 prefix in significand. + e = int(d.h>>47&(1<<14-1)) - 6176 + //h = 4<<47 | d.h&(1<<47-1) + // Spec says all of these values are out of range. + h, l = 0, 0 + } else { + // Bits: 1*sign 14*exponent 113*significand + e = int(d.h>>49&(1<<14-1)) - 6176 + h = d.h & (1<<49 - 1) + } + + // Would be handled by the logic below, but that's trivial and common. + if h == 0 && l == 0 && e == 0 { + return "-0"[pos:] + } + + var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero. + var last = len(repr) + var i = len(repr) + var dot = len(repr) + e + var rem uint32 +Loop: + for d9 := 0; d9 < 5; d9++ { + h, l, rem = divmod(h, l, 1e9) + for d1 := 0; d1 < 9; d1++ { + c := '0' + byte(rem%10) + rem /= 10 + i-- + repr[i] = c + // Handle "0E+3", "1E+3", etc. + if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-6 || e > 0) { + last = i + break Loop + } + if c != '0' { + last = i + } + // Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc. + if dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0) { + e += len(repr) - i + i-- + repr[i] = '.' + repr[i-1] = '0' + last = i - 1 + dot = len(repr) // Unmark. + } + // Break early. Works without it, but why. + if dot > i && l == 0 && h == 0 && rem == 0 { + break Loop + } + } + } + repr[last-1] = '-' + last-- + + if e > 0 { + return string(repr[last+pos:]) + "E+" + strconv.Itoa(e) + } + if e < 0 { + return string(repr[last+pos:]) + "E" + strconv.Itoa(e) + } + return string(repr[last+pos:]) +} + +func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) { + div64 := uint64(div) + a := h >> 32 + aq := a / div64 + ar := a % div64 + b := ar<<32 + h&(1<<32-1) + bq := b / div64 + br := b % div64 + c := br<<32 + l>>32 + cq := c / div64 + cr := c % div64 + d := cr<<32 + l&(1<<32-1) + dq := d / div64 + dr := d % div64 + return (aq<<32 | bq), (cq<<32 | dq), uint32(dr) +} + +var dNaN = Decimal128{0x1F << 58, 0} +var dPosInf = Decimal128{0x1E << 58, 0} +var dNegInf = Decimal128{0x3E << 58, 0} + +func dErr(s string) (Decimal128, error) { + return dNaN, fmt.Errorf("cannot parse %q as a Decimal128", s) +} + +func ParseDecimal128(s string) (Decimal128, error) { + orig := s + if s == "" { + return dErr(orig) + } + neg := s[0] == '-' + if neg || s[0] == '+' { + s = s[1:] + } + + if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') { + if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") { + return dNaN, nil + } + if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") { + if neg { + return dNegInf, nil + } + return dPosInf, nil + } + return dErr(orig) + } + + var h, l uint64 + var e int + + var add, ovr uint32 + var mul uint32 = 1 + var dot = -1 + var i = 0 + for i < len(s) { + c := s[i] + if i == 34 && dot < 0 { + return dErr(orig) + } + if mul == 1e9 { + h, l, ovr = muladd(h, l, mul, add) + mul, add = 1, 0 + if ovr > 0 || h&((1<<15-1)<<49) > 0 { + return dErr(orig) + } + } + if c >= '0' && c <= '9' { + i++ + mul *= 10 + add *= 10 + add += uint32(c - '0') + continue + } + if c == '.' { + i++ + if dot >= 0 || i == len(s) || s[i] < '0' || s[i] > '9' { + return dErr(orig) + } + dot = i + continue + } + break + } + if i == 0 { + return dErr(orig) + } + if mul > 1 { + h, l, ovr = muladd(h, l, mul, add) + if ovr > 0 || h&((1<<15-1)<<49) > 0 { + return dErr(orig) + } + } + if dot >= 0 { + e = dot - i + } + if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') { + i++ + eneg := s[i] == '-' + if eneg || s[i] == '+' { + i++ + } + n := 0 + for i < len(s) && n < 1e4 { + c := s[i] + i++ + if c < '0' || c > '9' { + break + } + n *= 10 + n += int(c - '0') + } + if eneg { + n = -n + } + e += n + if e < -6176 || e > 6111 { + return dErr(orig) + } + } + + if i < len(s) { + return dErr(orig) + } + + h |= uint64(e+6176) & uint64(1<<14-1) << 49 + if neg { + h |= 1 << 63 + } + return Decimal128{h, l}, nil +} + +func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) { + mul64 := uint64(mul) + a := mul64 * (l & (1<<32 - 1)) + b := a>>32 + mul64*(l>>32) + c := b>>32 + mul64*(h&(1<<32-1)) + d := c>>32 + mul64*(h>>32) + + a = a&(1<<32-1) + uint64(add) + b = b&(1<<32-1) + a>>32 + c = c&(1<<32-1) + b>>32 + d = d&(1<<32-1) + c>>32 + + return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32) +} diff --git a/bson/decimal_test.go b/bson/decimal_test.go index 3886c057e..e222c234d 100644 --- a/bson/decimal_test.go +++ b/bson/decimal_test.go @@ -1,320 +1,497 @@ -package bson_test +// BSON library for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package bson_test + +import ( + "encoding/hex" + "encoding/json" + + "gopkg.in/mgo.v2-unstable/bson" + + . "gopkg.in/check.v1" + "regexp" + "strings" +) + +// -------------------------------------------------------------------------- +// Decimal tests + +type decimalTests struct { + Valid []struct { + Description string `json:"description"` + Subject string `json:"subject"` + String string `json:"string"` + ExtJSON string `json:"extjson"` + } `json:"valid"` + + ParseErrors []struct{ + Description string `json:"description"` + Subject string `json:"subject"` + } `json:"parseErrors"` +} + +func extJSONRepr(s string) string { + var value struct { + D struct { + Repr string `json:"$numberDecimal"` + } `json:"d"` + } + err := json.Unmarshal([]byte(s), &value) + if err != nil { + panic(err) + } + return value.D.Repr +} + +func (s *S) TestDecimalTests(c *C) { + var tests decimalTests + err := json.Unmarshal([]byte(decimalTestsJSON), &tests) + c.Assert(err, IsNil) + + // These also conform to the spec and are used by Go elsewhere. + goStr := map[string]string{ + "Infinity": "Inf", + "-Infinity": "-Inf", + } + + for _, test := range tests.Valid { + c.Logf("Running decimal128 test: %s (string %q)", test.Description, test.String) + subject, err := hex.DecodeString(test.Subject) + var value struct{ D interface{} } + + // Unmarshal value from BSON data. + err = bson.Unmarshal(subject, &value) + c.Assert(err, IsNil) + d, isDecimal := value.D.(bson.Decimal128) + c.Assert(isDecimal, Equals, true) + + wantStr := test.String + if s, ok := goStr[test.String]; ok { + wantStr = s + } + c.Assert(d.String(), Equals, wantStr) + + // Parse Go variant representation (Inf vs. Infinity). + parsed, err := bson.ParseDecimal128(wantStr) + c.Assert(err, IsNil) + c.Assert(parsed.String(), Equals, wantStr) + + // Parse original output representation. + parsed, err = bson.ParseDecimal128(test.String) + c.Assert(err, IsNil) + c.Assert(parsed.String(), Equals, wantStr) + + // Parse non-canonical input representation. + parsed, err = bson.ParseDecimal128(extJSONRepr(test.ExtJSON)) + c.Assert(err, IsNil) + c.Assert(parsed.String(), Equals, wantStr) + + // Marshal back into BSON data. + data, err := bson.Marshal(value) + c.Assert(err, IsNil) + c.Assert(strings.ToUpper(hex.EncodeToString(data)), Equals, test.Subject) + } + + for _, test := range tests.ParseErrors { + c.Logf("Running decimal128 parse error test: %s (string %q)", test.Description, test.Subject) + + _, err := bson.ParseDecimal128(test.Subject) + c.Assert(err, ErrorMatches, `cannot parse "`+regexp.QuoteMeta(test.Subject)+`" as a Decimal128`) + } +} + +const decBenchNum = "9.999999999999999999999999999999999E+6144" + +func (s *S) BenchmarkDecimal128String(c *C) { + d, err := bson.ParseDecimal128(decBenchNum) + c.Assert(err, IsNil) + c.Assert(d.String(), Equals, decBenchNum) + + c.ResetTimer() + for i := 0; i < c.N; i++ { + d.String() + } +} + +func (s *S) BenchmarkDecimal128Parse(c *C) { + var err error + c.ResetTimer() + for i := 0; i < c.N; i++ { + _, err = bson.ParseDecimal128(decBenchNum) + } + if err != nil { + panic(err) + } +} + +// Originally from https://goo.gl/HWFTAw var decimalTestsJSON = ` { "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", "valid": [ { "description": "Special - Canonical NaN", "subject": "180000001364000000000000000000000000000000007C00", "string": "NaN", - "extjson": { - "d": { "$numberDecimal": "NaN" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" }, { "description": "Special - Negative NaN", "subject": "18000000136400000000000000000000000000000000FC00", - "string": "NaN" + "string": "NaN", + "from_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" }, { "description": "Special - Canonical SNaN", "subject": "180000001364000000000000000000000000000000007E00", - "string": "NaN" + "string": "NaN", + "from_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" }, { "description": "Special - Negative SNaN", "subject": "18000000136400000000000000000000000000000000FE00", - "string": "NaN" + "string": "NaN", + "from_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" }, { "description": "Special - NaN with a payload", "subject": "180000001364001200000000000000000000000000007E00", - "string": "NaN" + "string": "NaN", + "from_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" }, { "description": "Special - Canonical Positive Infinity", "subject": "180000001364000000000000000000000000000000007800", "string": "Infinity", - "extjson": { - "d": { "$numberDecimal": "Infinity" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" }, { "description": "Special - Canonical Negative Infinity", "subject": "18000000136400000000000000000000000000000000F800", "string": "-Infinity", - "extjson": { - "d": { "$numberDecimal": "-Infinity" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" }, { "description": "Special - Invalid representation treated as 0", "subject": "180000001364000000000000000000000000000000106C00", - "string": "0" + "string": "0", + "from_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" }, { "description": "Special - Invalid representation treated as -0", "subject": "18000000136400DCBA9876543210DEADBEEF00000010EC00", - "string": "-0" + "string": "-0", + "from_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" }, { "description": "Special - Invalid representation treated as 0E3", "subject": "18000000136400FFFFFFFFFFFFFFFFFFFFFFFFFFFF116C00", - "string": "0E+3" + "string": "0E+3", + "from_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}" + }, + { + "description": "Regular - Adjusted Exponent Limit", + "subject": "18000000136400F2AF967ED05C82DE3297FF6FDE3CF22F00", + "string": "0.000001234567890123456789012345678901234", + "extjson": "{\"d\": { \"$numberDecimal\": \"0.000001234567890123456789012345678901234\" }}" }, { "description": "Regular - Smallest", "subject": "18000000136400D204000000000000000000000000343000", "string": "0.001234", - "extjson": { - "d": { "$numberDecimal": "0.001234" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001234\"}}" }, { "description": "Regular - Smallest with Trailing Zeros", "subject": "1800000013640040EF5A07000000000000000000002A3000", "string": "0.00123400000", - "extjson": { - "d": { "$numberDecimal": "0.00123400000" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00123400000\"}}" }, { "description": "Regular - 0.1", "subject": "1800000013640001000000000000000000000000003E3000", "string": "0.1", - "extjson": { - "d": { "$numberDecimal": "0.1" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1\"}}" }, { "description": "Regular - 0.1234567890123456789012345678901234", "subject": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFC2F00", "string": "0.1234567890123456789012345678901234", - "extjson": { - "d": { "$numberDecimal": "0.1234567890123456789012345678901234" } - } - }, - { - "MGO TEST": "MGO TEST", - "description": "Regular - Adjusted Exponent Limit", - "subject": "18000000136400F2AF967ED05C82DE3297FF6FDE3CF22F00", - "string": "0.000001234567890123456789012345678901234", - "extjson": { - "d": { "$numberDecimal": "0.1234567890123456789012345678901234" } - } - }, - { - "MGO TEST": "MGO TEST", - "description": "Scientific - Adjusted Exponent Limit", - "subject": "18000000136400F2AF967ED05C82DE3297FF6FDE3CF02F00", - "string": "1.234567890123456789012345678901234E-7", - "extjson": { - "d": { "$numberDecimal": "0.1234567890123456789012345678901234" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1234567890123456789012345678901234\"}}" }, { "description": "Regular - 0", "subject": "180000001364000000000000000000000000000000403000", "string": "0", - "extjson": { - "d": { "$numberDecimal": "0" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" }, { "description": "Regular - -0", "subject": "18000000136400000000000000000000000000000040B000", "string": "-0", - "extjson": { - "d": { "$numberDecimal": "-0" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" }, { "description": "Regular - -0.0", "subject": "1800000013640000000000000000000000000000003EB000", "string": "-0.0", - "extjson": { - "d": { "$numberDecimal": "-0.0" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}" }, { "description": "Regular - 2", "subject": "180000001364000200000000000000000000000000403000", "string": "2", - "extjson": { - "d": { "$numberDecimal": "2" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"2\"}}" }, { "description": "Regular - 2.000", "subject": "18000000136400D0070000000000000000000000003A3000", "string": "2.000", - "extjson": { - "d": { "$numberDecimal": "2.000" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"2.000\"}}" }, { "description": "Regular - Largest", - "subject": "18000000136400141A99BE1C000000000000000000403000", - "string": "123456789012", - "extjson": { - "d": { "$numberDecimal": "123456789012" } - } + "subject": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000", + "string": "1234567890123456789012345678901234", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1234567890123456789012345678901234\"}}" }, { "description": "Scientific - Tiniest", "subject": "18000000136400FFFFFFFF638E8D37C087ADBE09ED010000", "string": "9.999999999999999999999999999999999E-6143", - "extjson": { - "d": { "$numberDecimal": "9.999999999999999999999999999999999E-6143" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E-6143\"}}" }, { "description": "Scientific - Tiny", "subject": "180000001364000100000000000000000000000000000000", "string": "1E-6176", - "extjson": { - "d": { "$numberDecimal": "1E-6176" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" }, { "description": "Scientific - Negative Tiny", "subject": "180000001364000100000000000000000000000000008000", "string": "-1E-6176", - "extjson": { - "d": { "$numberDecimal": "-1E-6176" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}" + }, + { + "description": "Scientific - Adjusted Exponent Limit", + "subject": "18000000136400F2AF967ED05C82DE3297FF6FDE3CF02F00", + "string": "1.234567890123456789012345678901234E-7", + "extjson": "{\"d\": { \"$numberDecimal\": \"1.234567890123456789012345678901234E-7\" }}" }, { "description": "Scientific - Fractional", "subject": "1800000013640064000000000000000000000000002CB000", "string": "-1.00E-8", - "extjson": { - "d": { "$numberDecimal": "-1.00E-8" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00E-8\"}}" }, { "description": "Scientific - 0 with Exponent", "subject": "180000001364000000000000000000000000000000205F00", "string": "0E+6000", - "extjson": { - "d": { "$numberDecimal": "0E+6000" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6000\"}}" }, { "description": "Scientific - 0 with Negative Exponent", "subject": "1800000013640000000000000000000000000000007A2B00", "string": "0E-611", - "extjson": { - "d": { "$numberDecimal": "0E-611" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-611\"}}" }, { "description": "Scientific - No Decimal with Signed Exponent", "subject": "180000001364000100000000000000000000000000463000", "string": "1E+3", - "extjson": { - "d": { "$numberDecimal": "1E+3" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+3\"}}" }, { "description": "Scientific - Trailing Zero", "subject": "180000001364001A04000000000000000000000000423000", "string": "1.050E+4", - "extjson": { - "d": { "$numberDecimal": "1.050E+4" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.050E+4\"}}" }, { "description": "Scientific - With Decimal", "subject": "180000001364006900000000000000000000000000423000", "string": "1.05E+3", - "extjson": { - "d": { "$numberDecimal": "1.05E+3" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.05E+3\"}}" }, { "description": "Scientific - Full", "subject": "18000000136400FFFFFFFFFFFFFFFFFFFFFFFFFFFF403000", - "_string": "5.192296858534827628530496329220095E+33", "string": "5192296858534827628530496329220095", - "extjson": { - "d": { "$numberDecimal": "5.192296858534827628530496329220095E+33" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"5192296858534827628530496329220095\"}}" }, { "description": "Scientific - Large", "subject": "18000000136400000000000A5BC138938D44C64D31FE5F00", "string": "1.000000000000000000000000000000000E+6144", - "extjson": { - "d": { "$numberDecimal": "1.000000000000000000000000000000000E+6144" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}" }, { "description": "Scientific - Largest", "subject": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFF5F00", "string": "9.999999999999999999999999999999999E+6144", - "extjson": { - "d": { "$numberDecimal": "9.999999999999999999999999999999999E+6144" } - } + "extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E+6144\"}}" }, { "description": "Non-Canonical Parsing - Exponent Normalization", "subject": "1800000013640064000000000000000000000000002CB000", "string": "-1.00E-8", - "extjson": { - "d": { "$numberDecimal": "-100E-10" } - } + "to_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-100E-10\"}}" }, { "description": "Non-Canonical Parsing - Unsigned Positive Exponent", "subject": "180000001364000100000000000000000000000000463000", "string": "1E+3", - "extjson": { - "d": { "$numberDecimal": "1E3" } - } + "to_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E3\"}}" }, { "description": "Non-Canonical Parsing - Lowercase Exponent Identifier", "subject": "180000001364000100000000000000000000000000463000", "string": "1E+3", - "extjson": { - "d": { "$numberDecimal": "1e+3" } - } + "to_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e+3\"}}" }, { "description": "Non-Canonical Parsing - Long Significand with Exponent", "subject": "1800000013640079D9E0F9763ADA429D0200000000583000", "string": "1.2345689012345789012345E+34", - "extjson": { - "d": { "$numberDecimal": "12345689012345789012345E+12" } - } + "to_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"12345689012345789012345E+12\"}}" }, { "description": "Non-Canonical Parsing - Positive Sign", "subject": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000", - "_string": "1.234567890123456789012345678901234E+33", "string": "1234567890123456789012345678901234", - "extjson": { - "d": { "$numberDecimal": "+1234567890123456789012345678901234" } - } - }, - { - "description": "Non-Canonical Parsing - Long Significand", - "subject": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000", - "_string": "1.234567890123456789012345678901234E+33", - "string": "1234567890123456789012345678901234", - "extjson": { - "d": { "$numberDecimal": "1234567890123456789012345678901234" } - } + "to_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"+1234567890123456789012345678901234\"}}" }, { "description": "Non-Canonical Parsing - Long Decimal String", "subject": "180000001364000100000000000000000000000000722800", "string": "1E-999", - "extjson": { - "d": { "$numberDecimal": ".000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001" } - } + "to_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \".000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001\"}}" + }, + { + "description": "Non-Canonical Parsing - nan", + "subject": "180000001364000000000000000000000000000000007C00", + "string": "NaN", + "to_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"nan\"}}" + }, + { + "description": "Non-Canonical Parsing - nAn", + "subject": "180000001364000000000000000000000000000000007C00", + "string": "NaN", + "to_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"nAn\"}}" + }, + { + "description": "Non-Canonical Parsing - +infinity", + "subject": "180000001364000000000000000000000000000000007800", + "string": "Infinity", + "to_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"+infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - infinity", + "subject": "180000001364000000000000000000000000000000007800", + "string": "Infinity", + "to_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - infiniTY", + "subject": "180000001364000000000000000000000000000000007800", + "string": "Infinity", + "to_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"infiniTY\"}}" + }, + { + "description": "Non-Canonical Parsing - inf", + "subject": "180000001364000000000000000000000000000000007800", + "string": "Infinity", + "to_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"inf\"}}" + }, + { + "description": "Non-Canonical Parsing - inF", + "subject": "180000001364000000000000000000000000000000007800", + "string": "Infinity", + "to_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"inF\"}}" + }, + { + "description": "Non-Canonical Parsing - -infinity", + "subject": "18000000136400000000000000000000000000000000F800", + "string": "-Infinity", + "to_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - -infiniTy", + "subject": "18000000136400000000000000000000000000000000F800", + "string": "-Infinity", + "to_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-infiniTy\"}}" + }, + { + "description": "Non-Canonical Parsing - -Inf", + "subject": "18000000136400000000000000000000000000000000F800", + "string": "-Infinity", + "to_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-Inf\"}}" + }, + { + "description": "Non-Canonical Parsing - -inf", + "subject": "18000000136400000000000000000000000000000000F800", + "string": "-Infinity", + "to_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-inf\"}}" + }, + { + "description": "Non-Canonical Parsing - -inF", + "subject": "18000000136400000000000000000000000000000000F800", + "string": "-Infinity", + "to_extjson": false, + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-inF\"}}" } ], "parseErrors": [ @@ -339,8 +516,12 @@ var decimalTestsJSON = ` "subject": "E01" }, { - "description": "Invalid NaN specification", - "subject": "nan" + "description": "Exponent too large", + "subject": "1E6112" + }, + { + "description": "Exponent too small", + "subject": "1E-6177" }, { "description": "Just a decimal place", diff --git a/bson/encode.go b/bson/encode.go index c228e28d3..add39e865 100644 --- a/bson/encode.go +++ b/bson/encode.go @@ -247,7 +247,7 @@ func (e *encoder) addElemName(kind byte, name string) { func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { if !v.IsValid() { - e.addElemName('\x0A', name) + e.addElemName(0x0A, name) return } @@ -276,29 +276,29 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { panic("ObjectIDs must be exactly 12 bytes long (got " + strconv.Itoa(len(s)) + ")") } - e.addElemName('\x07', name) + e.addElemName(0x07, name) e.addBytes([]byte(s)...) case typeSymbol: - e.addElemName('\x0E', name) + e.addElemName(0x0E, name) e.addStr(s) case typeJSONNumber: n := v.Interface().(json.Number) if i, err := n.Int64(); err == nil { - e.addElemName('\x12', name) + e.addElemName(0x12, name) e.addInt64(i) } else if f, err := n.Float64(); err == nil { - e.addElemName('\x01', name) + e.addElemName(0x01, name) e.addFloat64(f) } else { panic("failed to convert json.Number to a number: " + s) } default: - e.addElemName('\x02', name) + e.addElemName(0x02, name) e.addStr(s) } case reflect.Float32, reflect.Float64: - e.addElemName('\x01', name) + e.addElemName(0x01, name) e.addFloat64(v.Float()) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: @@ -306,40 +306,40 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { if int64(u) < 0 { panic("BSON has no uint64 type, and value is too large to fit correctly in an int64") } else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) { - e.addElemName('\x10', name) + e.addElemName(0x10, name) e.addInt32(int32(u)) } else { - e.addElemName('\x12', name) + e.addElemName(0x12, name) e.addInt64(int64(u)) } case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: switch v.Type() { case typeMongoTimestamp: - e.addElemName('\x11', name) + e.addElemName(0x11, name) e.addInt64(v.Int()) case typeOrderKey: if v.Int() == int64(MaxKey) { - e.addElemName('\x7F', name) + e.addElemName(0x7F, name) } else { - e.addElemName('\xFF', name) + e.addElemName(0xFF, name) } default: i := v.Int() if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 { // It fits into an int32, encode as such. - e.addElemName('\x10', name) + e.addElemName(0x10, name) e.addInt32(int32(i)) } else { - e.addElemName('\x12', name) + e.addElemName(0x12, name) e.addInt64(i) } } case reflect.Bool: - e.addElemName('\x08', name) + e.addElemName(0x08, name) if v.Bool() { e.addBytes(1) } else { @@ -347,40 +347,40 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { } case reflect.Map: - e.addElemName('\x03', name) + e.addElemName(0x03, name) e.addDoc(v) case reflect.Slice: vt := v.Type() et := vt.Elem() if et.Kind() == reflect.Uint8 { - e.addElemName('\x05', name) - e.addBinary('\x00', v.Bytes()) + e.addElemName(0x05, name) + e.addBinary(0x00, v.Bytes()) } else if et == typeDocElem || et == typeRawDocElem { - e.addElemName('\x03', name) + e.addElemName(0x03, name) e.addDoc(v) } else { - e.addElemName('\x04', name) + e.addElemName(0x04, name) e.addDoc(v) } case reflect.Array: et := v.Type().Elem() if et.Kind() == reflect.Uint8 { - e.addElemName('\x05', name) + e.addElemName(0x05, name) if v.CanAddr() { - e.addBinary('\x00', v.Slice(0, v.Len()).Interface().([]byte)) + e.addBinary(0x00, v.Slice(0, v.Len()).Interface().([]byte)) } else { n := v.Len() e.addInt32(int32(n)) - e.addBytes('\x00') + e.addBytes(0x00) for i := 0; i < n; i++ { el := v.Index(i) e.addBytes(byte(el.Uint())) } } } else { - e.addElemName('\x04', name) + e.addElemName(0x04, name) e.addDoc(v) } @@ -399,11 +399,16 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { e.addBytes(s.Data...) case Binary: - e.addElemName('\x05', name) + e.addElemName(0x05, name) e.addBinary(s.Kind, s.Data) + case Decimal128: + e.addElemName(0x13, name) + e.addInt64(int64(s.l)) + e.addInt64(int64(s.h)) + case DBPointer: - e.addElemName('\x0C', name) + e.addElemName(0x0C, name) e.addStr(s.Namespace) if len(s.Id) != 12 { panic("ObjectIDs must be exactly 12 bytes long (got " + @@ -412,16 +417,16 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { e.addBytes([]byte(s.Id)...) case RegEx: - e.addElemName('\x0B', name) + e.addElemName(0x0B, name) e.addCStr(s.Pattern) e.addCStr(s.Options) case JavaScript: if s.Scope == nil { - e.addElemName('\x0D', name) + e.addElemName(0x0D, name) e.addStr(s.Code) } else { - e.addElemName('\x0F', name) + e.addElemName(0x0F, name) start := e.reserveInt32() e.addStr(s.Code) e.addDoc(reflect.ValueOf(s.Scope)) @@ -430,18 +435,18 @@ func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { case time.Time: // MongoDB handles timestamps as milliseconds. - e.addElemName('\x09', name) + e.addElemName(0x09, name) e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6)) case url.URL: - e.addElemName('\x02', name) + e.addElemName(0x02, name) e.addStr(s.String()) case undefined: - e.addElemName('\x06', name) + e.addElemName(0x06, name) default: - e.addElemName('\x03', name) + e.addElemName(0x03, name) e.addDoc(v) } From f9669fa0ae9e319774e7b392aed73554486d425f Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 6 Apr 2016 15:09:30 -0300 Subject: [PATCH 253/305] Update JSON decimal tests from upstream. --- bson/decimal_test.go | 42 +++++++++++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/bson/decimal_test.go b/bson/decimal_test.go index e222c234d..c5a043103 100644 --- a/bson/decimal_test.go +++ b/bson/decimal_test.go @@ -193,14 +193,14 @@ var decimalTestsJSON = ` { "description": "Special - Canonical Positive Infinity", "subject": "180000001364000000000000000000000000000000007800", - "string": "Infinity", - "extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + "string": "Inf", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"Inf\"}}" }, { "description": "Special - Canonical Negative Infinity", "subject": "18000000136400000000000000000000000000000000F800", - "string": "-Infinity", - "extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + "string": "-Inf", + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-Inf\"}}" }, { "description": "Special - Invalid representation treated as 0", @@ -426,70 +426,70 @@ var decimalTestsJSON = ` { "description": "Non-Canonical Parsing - +infinity", "subject": "180000001364000000000000000000000000000000007800", - "string": "Infinity", + "string": "Inf", "to_extjson": false, "extjson": "{\"d\" : {\"$numberDecimal\" : \"+infinity\"}}" }, { "description": "Non-Canonical Parsing - infinity", "subject": "180000001364000000000000000000000000000000007800", - "string": "Infinity", + "string": "Inf", "to_extjson": false, "extjson": "{\"d\" : {\"$numberDecimal\" : \"infinity\"}}" }, { "description": "Non-Canonical Parsing - infiniTY", "subject": "180000001364000000000000000000000000000000007800", - "string": "Infinity", + "string": "Inf", "to_extjson": false, "extjson": "{\"d\" : {\"$numberDecimal\" : \"infiniTY\"}}" }, { "description": "Non-Canonical Parsing - inf", "subject": "180000001364000000000000000000000000000000007800", - "string": "Infinity", + "string": "Inf", "to_extjson": false, "extjson": "{\"d\" : {\"$numberDecimal\" : \"inf\"}}" }, { "description": "Non-Canonical Parsing - inF", "subject": "180000001364000000000000000000000000000000007800", - "string": "Infinity", + "string": "Inf", "to_extjson": false, "extjson": "{\"d\" : {\"$numberDecimal\" : \"inF\"}}" }, { "description": "Non-Canonical Parsing - -infinity", "subject": "18000000136400000000000000000000000000000000F800", - "string": "-Infinity", + "string": "-Inf", "to_extjson": false, "extjson": "{\"d\" : {\"$numberDecimal\" : \"-infinity\"}}" }, { "description": "Non-Canonical Parsing - -infiniTy", "subject": "18000000136400000000000000000000000000000000F800", - "string": "-Infinity", + "string": "-Inf", "to_extjson": false, "extjson": "{\"d\" : {\"$numberDecimal\" : \"-infiniTy\"}}" }, { "description": "Non-Canonical Parsing - -Inf", "subject": "18000000136400000000000000000000000000000000F800", - "string": "-Infinity", + "string": "-Inf", "to_extjson": false, - "extjson": "{\"d\" : {\"$numberDecimal\" : \"-Inf\"}}" + "extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" }, { "description": "Non-Canonical Parsing - -inf", "subject": "18000000136400000000000000000000000000000000F800", - "string": "-Infinity", + "string": "-Inf", "to_extjson": false, "extjson": "{\"d\" : {\"$numberDecimal\" : \"-inf\"}}" }, { "description": "Non-Canonical Parsing - -inF", "subject": "18000000136400000000000000000000000000000000F800", - "string": "-Infinity", + "string": "-Inf", "to_extjson": false, "extjson": "{\"d\" : {\"$numberDecimal\" : \"-inF\"}}" } @@ -587,6 +587,18 @@ var decimalTestsJSON = ` "description": "Empty string", "subject": "" }, + { + "description": "leading white space positive number", + "subject": " 1" + }, + { + "description": "leading white space negative number", + "subject": " -1" + }, + { + "description": "trailing white space", + "subject": "1 " + }, { "description": "Invalid", "subject": "E" From 827127599980e87abccea6a90309604be8539f03 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Wed, 6 Apr 2016 15:13:28 -0300 Subject: [PATCH 254/305] Decimal JSON tests now use Inf as well. --- bson/decimal_test.go | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/bson/decimal_test.go b/bson/decimal_test.go index c5a043103..6e112c690 100644 --- a/bson/decimal_test.go +++ b/bson/decimal_test.go @@ -72,12 +72,6 @@ func (s *S) TestDecimalTests(c *C) { err := json.Unmarshal([]byte(decimalTestsJSON), &tests) c.Assert(err, IsNil) - // These also conform to the spec and are used by Go elsewhere. - goStr := map[string]string{ - "Infinity": "Inf", - "-Infinity": "-Inf", - } - for _, test := range tests.Valid { c.Logf("Running decimal128 test: %s (string %q)", test.Description, test.String) subject, err := hex.DecodeString(test.Subject) @@ -89,26 +83,23 @@ func (s *S) TestDecimalTests(c *C) { d, isDecimal := value.D.(bson.Decimal128) c.Assert(isDecimal, Equals, true) - wantStr := test.String - if s, ok := goStr[test.String]; ok { - wantStr = s - } - c.Assert(d.String(), Equals, wantStr) + // Generate canonical representation. + c.Assert(d.String(), Equals, test.String) // Parse Go variant representation (Inf vs. Infinity). - parsed, err := bson.ParseDecimal128(wantStr) + parsed, err := bson.ParseDecimal128(test.String) c.Assert(err, IsNil) - c.Assert(parsed.String(), Equals, wantStr) + c.Assert(parsed.String(), Equals, test.String) // Parse original output representation. parsed, err = bson.ParseDecimal128(test.String) c.Assert(err, IsNil) - c.Assert(parsed.String(), Equals, wantStr) + c.Assert(parsed.String(), Equals, test.String) // Parse non-canonical input representation. parsed, err = bson.ParseDecimal128(extJSONRepr(test.ExtJSON)) c.Assert(err, IsNil) - c.Assert(parsed.String(), Equals, wantStr) + c.Assert(parsed.String(), Equals, test.String) // Marshal back into BSON data. data, err := bson.Marshal(value) From 2451555cc2651b3754ebb21df4b34f47af971482 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Sun, 24 Apr 2016 19:58:36 -0300 Subject: [PATCH 255/305] Fix Secondary mode over mongos. Reported by Gabriel Russell. --- cluster.go | 5 ++- cluster_test.go | 112 ++++++++++++++++++++++++++++++++++++++++++++++++ server.go | 11 +++++ 3 files changed, 127 insertions(+), 1 deletion(-) diff --git a/cluster.go b/cluster.go index f97320e8d..4ed29bea1 100644 --- a/cluster.go +++ b/cluster.go @@ -588,7 +588,10 @@ func (cluster *mongoCluster) AcquireSocket(mode Mode, slaveOk bool, syncTimeout mastersLen := cluster.masters.Len() slavesLen := cluster.servers.Len() - mastersLen debugf("Cluster has %d known masters and %d known slaves.", mastersLen, slavesLen) - if !(slaveOk && mode == Secondary) && mastersLen > 0 || slaveOk && slavesLen > 0 { + if mastersLen > 0 && !(slaveOk && mode == Secondary) || slavesLen > 0 && slaveOk { + break + } + if mastersLen > 0 && mode == Secondary && cluster.masters.HasMongos() { break } if started.IsZero() { diff --git a/cluster_test.go b/cluster_test.go index 3fbb6a8ef..524acbc93 100644 --- a/cluster_test.go +++ b/cluster_test.go @@ -1366,6 +1366,118 @@ func (s *S) TestMonotonicSlaveOkFlagWithMongos(c *C) { c.Check(slaveDelta, Equals, 5) // The counting for both, plus 5 queries above. } +func (s *S) TestSecondaryModeWithMongos(c *C) { + session, err := mgo.Dial("localhost:40021") + c.Assert(err, IsNil) + defer session.Close() + + ssresult := &struct{ Host string }{} + imresult := &struct{ IsMaster bool }{} + + // Figure the master while still using the strong session. + err = session.Run("serverStatus", ssresult) + c.Assert(err, IsNil) + err = session.Run("isMaster", imresult) + c.Assert(err, IsNil) + master := ssresult.Host + c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master)) + + // Ensure mongos is aware about the current topology. + s.Stop(":40201") + s.StartAll() + + mongos, err := mgo.Dial("localhost:40202") + c.Assert(err, IsNil) + defer mongos.Close() + + mongos.SetSyncTimeout(5 * time.Second) + + // Insert some data as otherwise 3.2+ doesn't seem to run the query at all. + err = mongos.DB("mydb").C("mycoll").Insert(bson.M{"n": 1}) + c.Assert(err, IsNil) + + // Wait until all servers see the data. + for _, addr := range []string{"localhost:40021", "localhost:40022", "localhost:40023"} { + session, err := mgo.Dial(addr + "?connect=direct") + c.Assert(err, IsNil) + defer session.Close() + session.SetMode(mgo.Monotonic, true) + for i := 300; i >= 0; i-- { + n, err := session.DB("mydb").C("mycoll").Find(nil).Count() + c.Assert(err, IsNil) + if n == 1 { + break + } + if i == 0 { + c.Fatalf("Inserted data never reached " + addr) + } + time.Sleep(100 * time.Millisecond) + } + } + + // Collect op counters for everyone. + q21a := s.countQueries(c, "localhost:40021") + q22a := s.countQueries(c, "localhost:40022") + q23a := s.countQueries(c, "localhost:40023") + + // Do a Secondary query through MongoS + + mongos.SetMode(mgo.Secondary, true) + + coll := mongos.DB("mydb").C("mycoll") + var result struct{ N int } + for i := 0; i != 5; i++ { + err = coll.Find(nil).One(&result) + c.Assert(err, IsNil) + c.Assert(result.N, Equals, 1) + } + + // Collect op counters for everyone again. + q21b := s.countQueries(c, "localhost:40021") + q22b := s.countQueries(c, "localhost:40022") + q23b := s.countQueries(c, "localhost:40023") + + var masterDelta, slaveDelta int + switch hostPort(master) { + case "40021": + masterDelta = q21b - q21a + slaveDelta = (q22b - q22a) + (q23b - q23a) + case "40022": + masterDelta = q22b - q22a + slaveDelta = (q21b - q21a) + (q23b - q23a) + case "40023": + masterDelta = q23b - q23a + slaveDelta = (q21b - q21a) + (q22b - q22a) + default: + c.Fatal("Uh?") + } + + c.Check(masterDelta, Equals, 0) // Just the counting itself. + c.Check(slaveDelta, Equals, 5) // The counting for both, plus 5 queries above. +} + +func (s *S) TestSecondaryModeWithMongosInsert(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40202") + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.Secondary, true) + session.SetSyncTimeout(4 * time.Second) + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + + var result struct{ A int } + coll.Find(nil).One(&result) + c.Assert(result.A, Equals, 1) +} + + func (s *S) TestRemovalOfClusterMember(c *C) { if *fast { c.Skip("-fast") diff --git a/server.go b/server.go index fcac1e7de..ba0480e58 100644 --- a/server.go +++ b/server.go @@ -402,6 +402,15 @@ func (servers *mongoServers) Empty() bool { return len(servers.slice) == 0 } +func (servers *mongoServers) HasMongos() bool { + for _, s := range servers.slice { + if s.Info().Mongos { + return true + } + } + return false +} + // BestFit returns the best guess of what would be the most interesting // server to perform operations on at this point in time. func (servers *mongoServers) BestFit(mode Mode, serverTags []bson.D) *mongoServer { @@ -421,6 +430,8 @@ func (servers *mongoServers) BestFit(mode Mode, serverTags []bson.D) *mongoServe switch { case serverTags != nil && !next.info.Mongos && !next.hasTags(serverTags): // Must have requested tags. + case mode == Secondary && next.info.Master && !next.info.Mongos: + // Must be a secondary or mongos. case next.info.Master != best.info.Master && mode != Nearest: // Prefer slaves, unless the mode is PrimaryPreferred. swap = (mode == PrimaryPreferred) != best.info.Master From 860dc8c56b29492c6281a1e6c7884475046d3797 Mon Sep 17 00:00:00 2001 From: "Jay R. Wren" Date: Thu, 28 Apr 2016 14:21:13 -0400 Subject: [PATCH 256/305] fix doc comment --- session.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/session.go b/session.go index 8312924e3..a8ad115ef 100644 --- a/session.go +++ b/session.go @@ -3003,7 +3003,7 @@ Error: // unmarshalled into by gobson. This function blocks until either a result // is available or an error happens. For example: // -// err := collection.Find(bson.M{"a", 1}).One(&result) +// err := collection.Find(bson.M{"a": 1}).One(&result) // // In case the resulting document includes a field named $err or errmsg, which // are standard ways for MongoDB to return query errors, the returned err will From ab443792a5ca8357163bc3ff58d524501caa59e0 Mon Sep 17 00:00:00 2001 From: Gustavo Niemeyer Date: Mon, 2 May 2016 09:16:27 -0300 Subject: [PATCH 257/305] Import json package from Go under internal. --- internal/json/LICENSE | 27 + internal/json/bench_test.go | 223 ++++ internal/json/decode.go | 1183 +++++++++++++++++++++ internal/json/decode_test.go | 1512 +++++++++++++++++++++++++++ internal/json/encode.go | 1239 ++++++++++++++++++++++ internal/json/encode_test.go | 613 +++++++++++ internal/json/example_test.go | 252 +++++ internal/json/fold.go | 143 +++ internal/json/fold_test.go | 116 ++ internal/json/indent.go | 141 +++ internal/json/number_test.go | 133 +++ internal/json/scanner.go | 623 +++++++++++ internal/json/scanner_test.go | 316 ++++++ internal/json/stream.go | 507 +++++++++ internal/json/stream_test.go | 417 ++++++++ internal/json/tagkey_test.go | 115 ++ internal/json/tags.go | 44 + internal/json/tags_test.go | 28 + internal/json/testdata/code.json.gz | Bin 0 -> 120432 bytes 19 files changed, 7632 insertions(+) create mode 100644 internal/json/LICENSE create mode 100644 internal/json/bench_test.go create mode 100644 internal/json/decode.go create mode 100644 internal/json/decode_test.go create mode 100644 internal/json/encode.go create mode 100644 internal/json/encode_test.go create mode 100644 internal/json/example_test.go create mode 100644 internal/json/fold.go create mode 100644 internal/json/fold_test.go create mode 100644 internal/json/indent.go create mode 100644 internal/json/number_test.go create mode 100644 internal/json/scanner.go create mode 100644 internal/json/scanner_test.go create mode 100644 internal/json/stream.go create mode 100644 internal/json/stream_test.go create mode 100644 internal/json/tagkey_test.go create mode 100644 internal/json/tags.go create mode 100644 internal/json/tags_test.go create mode 100644 internal/json/testdata/code.json.gz diff --git a/internal/json/LICENSE b/internal/json/LICENSE new file mode 100644 index 000000000..744875676 --- /dev/null +++ b/internal/json/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/internal/json/bench_test.go b/internal/json/bench_test.go new file mode 100644 index 000000000..cd7380b1e --- /dev/null +++ b/internal/json/bench_test.go @@ -0,0 +1,223 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Large data benchmark. +// The JSON data is a summary of agl's changes in the +// go, webkit, and chromium open source projects. +// We benchmark converting between the JSON form +// and in-memory data structures. + +package json + +import ( + "bytes" + "compress/gzip" + "io/ioutil" + "os" + "strings" + "testing" +) + +type codeResponse struct { + Tree *codeNode `json:"tree"` + Username string `json:"username"` +} + +type codeNode struct { + Name string `json:"name"` + Kids []*codeNode `json:"kids"` + CLWeight float64 `json:"cl_weight"` + Touches int `json:"touches"` + MinT int64 `json:"min_t"` + MaxT int64 `json:"max_t"` + MeanT int64 `json:"mean_t"` +} + +var codeJSON []byte +var codeStruct codeResponse + +func codeInit() { + f, err := os.Open("testdata/code.json.gz") + if err != nil { + panic(err) + } + defer f.Close() + gz, err := gzip.NewReader(f) + if err != nil { + panic(err) + } + data, err := ioutil.ReadAll(gz) + if err != nil { + panic(err) + } + + codeJSON = data + + if err := Unmarshal(codeJSON, &codeStruct); err != nil { + panic("unmarshal code.json: " + err.Error()) + } + + if data, err = Marshal(&codeStruct); err != nil { + panic("marshal code.json: " + err.Error()) + } + + if !bytes.Equal(data, codeJSON) { + println("different lengths", len(data), len(codeJSON)) + for i := 0; i < len(data) && i < len(codeJSON); i++ { + if data[i] != codeJSON[i] { + println("re-marshal: changed at byte", i) + println("orig: ", string(codeJSON[i-10:i+10])) + println("new: ", string(data[i-10:i+10])) + break + } + } + panic("re-marshal code.json: different result") + } +} + +func BenchmarkCodeEncoder(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + enc := NewEncoder(ioutil.Discard) + for i := 0; i < b.N; i++ { + if err := enc.Encode(&codeStruct); err != nil { + b.Fatal("Encode:", err) + } + } + b.SetBytes(int64(len(codeJSON))) +} + +func BenchmarkCodeMarshal(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + for i := 0; i < b.N; i++ { + if _, err := Marshal(&codeStruct); err != nil { + b.Fatal("Marshal:", err) + } + } + b.SetBytes(int64(len(codeJSON))) +} + +func BenchmarkCodeDecoder(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + var buf bytes.Buffer + dec := NewDecoder(&buf) + var r codeResponse + for i := 0; i < b.N; i++ { + buf.Write(codeJSON) + // hide EOF + buf.WriteByte('\n') + buf.WriteByte('\n') + buf.WriteByte('\n') + if err := dec.Decode(&r); err != nil { + b.Fatal("Decode:", err) + } + } + b.SetBytes(int64(len(codeJSON))) +} + +func BenchmarkDecoderStream(b *testing.B) { + b.StopTimer() + var buf bytes.Buffer + dec := NewDecoder(&buf) + buf.WriteString(`"` + strings.Repeat("x", 1000000) + `"` + "\n\n\n") + var x interface{} + if err := dec.Decode(&x); err != nil { + b.Fatal("Decode:", err) + } + ones := strings.Repeat(" 1\n", 300000) + "\n\n\n" + b.StartTimer() + for i := 0; i < b.N; i++ { + if i%300000 == 0 { + buf.WriteString(ones) + } + x = nil + if err := dec.Decode(&x); err != nil || x != 1.0 { + b.Fatalf("Decode: %v after %d", err, i) + } + } +} + +func BenchmarkCodeUnmarshal(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + for i := 0; i < b.N; i++ { + var r codeResponse + if err := Unmarshal(codeJSON, &r); err != nil { + b.Fatal("Unmarshal:", err) + } + } + b.SetBytes(int64(len(codeJSON))) +} + +func BenchmarkCodeUnmarshalReuse(b *testing.B) { + if codeJSON == nil { + b.StopTimer() + codeInit() + b.StartTimer() + } + var r codeResponse + for i := 0; i < b.N; i++ { + if err := Unmarshal(codeJSON, &r); err != nil { + b.Fatal("Unmarshal:", err) + } + } +} + +func BenchmarkUnmarshalString(b *testing.B) { + data := []byte(`"hello, world"`) + var s string + + for i := 0; i < b.N; i++ { + if err := Unmarshal(data, &s); err != nil { + b.Fatal("Unmarshal:", err) + } + } +} + +func BenchmarkUnmarshalFloat64(b *testing.B) { + var f float64 + data := []byte(`3.14`) + + for i := 0; i < b.N; i++ { + if err := Unmarshal(data, &f); err != nil { + b.Fatal("Unmarshal:", err) + } + } +} + +func BenchmarkUnmarshalInt64(b *testing.B) { + var x int64 + data := []byte(`3`) + + for i := 0; i < b.N; i++ { + if err := Unmarshal(data, &x); err != nil { + b.Fatal("Unmarshal:", err) + } + } +} + +func BenchmarkIssue10335(b *testing.B) { + b.ReportAllocs() + var s struct{} + j := []byte(`{"a":{ }}`) + for n := 0; n < b.N; n++ { + if err := Unmarshal(j, &s); err != nil { + b.Fatal(err) + } + } +} diff --git a/internal/json/decode.go b/internal/json/decode.go new file mode 100644 index 000000000..434edf8ea --- /dev/null +++ b/internal/json/decode.go @@ -0,0 +1,1183 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "errors" + "fmt" + "reflect" + "runtime" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// Unmarshal will only set exported fields of the struct. +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a map, Unmarshal first establishes a map to +// use, If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores key- +// value pairs from the JSON object into the map. The map's key type must +// either be a string or implement encoding.TextUnmarshaler. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by types +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes +} + +func (e *UnmarshalTypeError) Error() string { + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// (No longer used; kept for compatibility.) +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.value(rv) + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// isValidNumber reports whether s is a valid JSON number literal. +func isValidNumber(s string) bool { + // This function implements the JSON numbers grammar. + // See https://tools.ietf.org/html/rfc7159#section-6 + // and http://json.org/number.gif + + if s == "" { + return false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + + // Digits + switch { + default: + return false + + case s[0] == '0': + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + if s[0] == '+' || s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // Make sure we are at the end. + return s == "" +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // read offset in data + scan scanner + nextscan scanner // for calls to nextValue + savedError error + useNumber bool +} + +// errPhase is used for errors that should not happen unless +// there is a bug in the JSON decoder or something is editing +// the data slice while the decoder executes. +var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + return d +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(err) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = err + } +} + +// next cuts off and returns the next full JSON value in d.data[d.off:]. +// The next value is known to be an object or array, not a literal. +func (d *decodeState) next() []byte { + c := d.data[d.off] + item, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // Our scanner has seen the opening brace/bracket + // and thinks we're still in the middle of the object. + // invent a closing brace/bracket to get it out. + if c == '{' { + d.scan.step(&d.scan, '}') + } else { + d.scan.step(&d.scan, ']') + } + + return item +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +// It updates d.off and returns the new scan code. +func (d *decodeState) scanWhile(op int) int { + var newOp int + for { + if d.off >= len(d.data) { + newOp = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } else { + c := d.data[d.off] + d.off++ + newOp = d.scan.step(&d.scan, c) + } + if newOp != op { + break + } + } + return newOp +} + +// value decodes a JSON value from d.data[d.off:] into the value. +// it updates d.off to point past the decoded value. +func (d *decodeState) value(v reflect.Value) { + if !v.IsValid() { + _, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // d.scan thinks we're still at the beginning of the item. + // Feed in an empty string - the shortest, simplest value - + // so that it knows we got to the end of the value. + if d.scan.redo { + // rewind. + d.scan.redo = false + d.scan.step = stateBeginValue + } + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + + n := len(d.scan.parseState) + if n > 0 && d.scan.parseState[n-1] == parseObjectKey { + // d.scan thinks we just read an object key; finish the object + d.scan.step(&d.scan, ':') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '}') + } + + return + } + + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(v) + + case scanBeginObject: + d.object(v) + + case scanBeginLiteral: + d.literal(v) + } +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(reflect.Value{}) + + case scanBeginObject: + d.object(reflect.Value{}) + + case scanBeginLiteral: + switch v := d.literalInterface().(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface())) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +var nullLiteral = []byte("null") +var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() + +// object consumes an object from d.data[d.off-1:], decoding into the value v. +// the first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface())) + return + } + + // Check type of target: + // struct or + // map[string]T or map[encoding.TextUnmarshaler]T + switch v.Kind() { + case reflect.Map: + // Map key must either have string kind or be an encoding.TextUnmarshaler. + t := v.Type() + if t.Key().Kind() != reflect.String && + !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + + var mapElem reflect.Value + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquoteBytes(item) + if !ok { + d.error(errPhase) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, key) { + f = ff + break + } + if f == nil && ff.equalFold(ff.nameBytes, key) { + f = ff + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kt := v.Type().Key() + var kv reflect.Value + switch { + case kt.Kind() == reflect.String: + kv = reflect.ValueOf(key).Convert(v.Type().Key()) + case reflect.PtrTo(kt).Implements(textUnmarshalerType): + kv = reflect.New(v.Type().Key()) + d.literalStore(item, kv, true) + kv = kv.Elem() + default: + panic("json: Unexpected key type") // should never occur + } + v.SetMapIndex(kv, subv) + } + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } +} + +// literal consumes a literal from d.data[d.off-1:], decoding into the value v. +// The first byte of the literal has been read already +// (that's how the caller knows it's a literal). +func (d *decodeState) literal(v reflect.Value) { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + + d.literalStore(d.data[start:d.off], v, false) +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + if d.useNumber { + return Number(s), nil + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + return f, nil +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return + } + wantptr := item[0] == 'n' // null + u, ut, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalJSON(item) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + return + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + err := ut.UnmarshalText(s) + if err != nil { + d.error(err) + } + return + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := c == 't' + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + if !isValidNumber(s) { + d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) + } + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + } + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetFloat(n) + } + } +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() interface{} { + switch d.scanWhile(scanSkipSpace) { + default: + d.error(errPhase) + panic("unreachable") + case scanBeginArray: + return d.arrayInterface() + case scanBeginObject: + return d.objectInterface() + case scanBeginLiteral: + return d.literalInterface() + } +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read string key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + item := d.data[start:d.off] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + d.error(errPhase) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + d.error(errPhase) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/internal/json/decode_test.go b/internal/json/decode_test.go new file mode 100644 index 000000000..30e46ca44 --- /dev/null +++ b/internal/json/decode_test.go @@ -0,0 +1,1512 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "image" + "net" + "reflect" + "strings" + "testing" + "time" +) + +type T struct { + X string + Y int + Z int `json:"-"` +} + +type U struct { + Alphabet string `json:"alpha"` +} + +type V struct { + F1 interface{} + F2 int32 + F3 Number +} + +// ifaceNumAsFloat64/ifaceNumAsNumber are used to test unmarshaling with and +// without UseNumber +var ifaceNumAsFloat64 = map[string]interface{}{ + "k1": float64(1), + "k2": "s", + "k3": []interface{}{float64(1), float64(2.0), float64(3e-3)}, + "k4": map[string]interface{}{"kk1": "s", "kk2": float64(2)}, +} + +var ifaceNumAsNumber = map[string]interface{}{ + "k1": Number("1"), + "k2": "s", + "k3": []interface{}{Number("1"), Number("2.0"), Number("3e-3")}, + "k4": map[string]interface{}{"kk1": "s", "kk2": Number("2")}, +} + +type tx struct { + x int +} + +// A type that can unmarshal itself. + +type unmarshaler struct { + T bool +} + +func (u *unmarshaler) UnmarshalJSON(b []byte) error { + *u = unmarshaler{true} // All we need to see that UnmarshalJSON is called. + return nil +} + +type ustruct struct { + M unmarshaler +} + +type unmarshalerText struct { + A, B string +} + +// needed for re-marshaling tests +func (u unmarshalerText) MarshalText() ([]byte, error) { + return []byte(u.A + ":" + u.B), nil +} + +func (u *unmarshalerText) UnmarshalText(b []byte) error { + pos := bytes.Index(b, []byte(":")) + if pos == -1 { + return errors.New("missing separator") + } + u.A, u.B = string(b[:pos]), string(b[pos+1:]) + return nil +} + +var _ encoding.TextUnmarshaler = (*unmarshalerText)(nil) + +type ustructText struct { + M unmarshalerText +} + +var ( + um0, um1 unmarshaler // target2 of unmarshaling + ump = &um1 + umtrue = unmarshaler{true} + umslice = []unmarshaler{{true}} + umslicep = new([]unmarshaler) + umstruct = ustruct{unmarshaler{true}} + + um0T, um1T unmarshalerText // target2 of unmarshaling + umpType = &um1T + umtrueXY = unmarshalerText{"x", "y"} + umsliceXY = []unmarshalerText{{"x", "y"}} + umslicepType = new([]unmarshalerText) + umstructType = new(ustructText) + umstructXY = ustructText{unmarshalerText{"x", "y"}} + + ummapType = map[unmarshalerText]bool{} + ummapXY = map[unmarshalerText]bool{unmarshalerText{"x", "y"}: true} +) + +// Test data structures for anonymous fields. + +type Point struct { + Z int +} + +type Top struct { + Level0 int + Embed0 + *Embed0a + *Embed0b `json:"e,omitempty"` // treated as named + Embed0c `json:"-"` // ignored + Loop + Embed0p // has Point with X, Y, used + Embed0q // has Point with Z, used + embed // contains exported field +} + +type Embed0 struct { + Level1a int // overridden by Embed0a's Level1a with json tag + Level1b int // used because Embed0a's Level1b is renamed + Level1c int // used because Embed0a's Level1c is ignored + Level1d int // annihilated by Embed0a's Level1d + Level1e int `json:"x"` // annihilated by Embed0a.Level1e +} + +type Embed0a struct { + Level1a int `json:"Level1a,omitempty"` + Level1b int `json:"LEVEL1B,omitempty"` + Level1c int `json:"-"` + Level1d int // annihilated by Embed0's Level1d + Level1f int `json:"x"` // annihilated by Embed0's Level1e +} + +type Embed0b Embed0 + +type Embed0c Embed0 + +type Embed0p struct { + image.Point +} + +type Embed0q struct { + Point +} + +type embed struct { + Q int +} + +type Loop struct { + Loop1 int `json:",omitempty"` + Loop2 int `json:",omitempty"` + *Loop +} + +// From reflect test: +// The X in S6 and S7 annihilate, but they also block the X in S8.S9. +type S5 struct { + S6 + S7 + S8 +} + +type S6 struct { + X int +} + +type S7 S6 + +type S8 struct { + S9 +} + +type S9 struct { + X int + Y int +} + +// From reflect test: +// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9. +type S10 struct { + S11 + S12 + S13 +} + +type S11 struct { + S6 +} + +type S12 struct { + S6 +} + +type S13 struct { + S8 +} + +type unmarshalTest struct { + in string + ptr interface{} + out interface{} + err error + useNumber bool +} + +type Ambig struct { + // Given "hello", the first match should win. + First int `json:"HELLO"` + Second int `json:"Hello"` +} + +type XYZ struct { + X interface{} + Y interface{} + Z interface{} +} + +func sliceAddr(x []int) *[]int { return &x } +func mapAddr(x map[string]int) *map[string]int { return &x } + +var unmarshalTests = []unmarshalTest{ + // basic types + {in: `true`, ptr: new(bool), out: true}, + {in: `1`, ptr: new(int), out: 1}, + {in: `1.2`, ptr: new(float64), out: 1.2}, + {in: `-5`, ptr: new(int16), out: int16(-5)}, + {in: `2`, ptr: new(Number), out: Number("2"), useNumber: true}, + {in: `2`, ptr: new(Number), out: Number("2")}, + {in: `2`, ptr: new(interface{}), out: float64(2.0)}, + {in: `2`, ptr: new(interface{}), out: Number("2"), useNumber: true}, + {in: `"a\u1234"`, ptr: new(string), out: "a\u1234"}, + {in: `"http:\/\/"`, ptr: new(string), out: "http://"}, + {in: `"g-clef: \uD834\uDD1E"`, ptr: new(string), out: "g-clef: \U0001D11E"}, + {in: `"invalid: \uD834x\uDD1E"`, ptr: new(string), out: "invalid: \uFFFDx\uFFFD"}, + {in: "null", ptr: new(interface{}), out: nil}, + {in: `{"X": [1,2,3], "Y": 4}`, ptr: new(T), out: T{Y: 4}, err: &UnmarshalTypeError{"array", reflect.TypeOf(""), 7}}, + {in: `{"x": 1}`, ptr: new(tx), out: tx{}}, + {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: float64(1), F2: int32(2), F3: Number("3")}}, + {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: Number("1"), F2: int32(2), F3: Number("3")}, useNumber: true}, + {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsFloat64}, + {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsNumber, useNumber: true}, + + // raw values with whitespace + {in: "\n true ", ptr: new(bool), out: true}, + {in: "\t 1 ", ptr: new(int), out: 1}, + {in: "\r 1.2 ", ptr: new(float64), out: 1.2}, + {in: "\t -5 \n", ptr: new(int16), out: int16(-5)}, + {in: "\t \"a\\u1234\" \n", ptr: new(string), out: "a\u1234"}, + + // Z has a "-" tag. + {in: `{"Y": 1, "Z": 2}`, ptr: new(T), out: T{Y: 1}}, + + {in: `{"alpha": "abc", "alphabet": "xyz"}`, ptr: new(U), out: U{Alphabet: "abc"}}, + {in: `{"alpha": "abc"}`, ptr: new(U), out: U{Alphabet: "abc"}}, + {in: `{"alphabet": "xyz"}`, ptr: new(U), out: U{}}, + + // syntax errors + {in: `{"X": "foo", "Y"}`, err: &SyntaxError{"invalid character '}' after object key", 17}}, + {in: `[1, 2, 3+]`, err: &SyntaxError{"invalid character '+' after array element", 9}}, + {in: `{"X":12x}`, err: &SyntaxError{"invalid character 'x' after object key:value pair", 8}, useNumber: true}, + + // raw value errors + {in: "\x01 42", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, + {in: " 42 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 5}}, + {in: "\x01 true", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, + {in: " false \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 8}}, + {in: "\x01 1.2", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, + {in: " 3.4 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 6}}, + {in: "\x01 \"string\"", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}}, + {in: " \"string\" \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 11}}, + + // array tests + {in: `[1, 2, 3]`, ptr: new([3]int), out: [3]int{1, 2, 3}}, + {in: `[1, 2, 3]`, ptr: new([1]int), out: [1]int{1}}, + {in: `[1, 2, 3]`, ptr: new([5]int), out: [5]int{1, 2, 3, 0, 0}}, + + // empty array to interface test + {in: `[]`, ptr: new([]interface{}), out: []interface{}{}}, + {in: `null`, ptr: new([]interface{}), out: []interface{}(nil)}, + {in: `{"T":[]}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": []interface{}{}}}, + {in: `{"T":null}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": interface{}(nil)}}, + + // composite tests + {in: allValueIndent, ptr: new(All), out: allValue}, + {in: allValueCompact, ptr: new(All), out: allValue}, + {in: allValueIndent, ptr: new(*All), out: &allValue}, + {in: allValueCompact, ptr: new(*All), out: &allValue}, + {in: pallValueIndent, ptr: new(All), out: pallValue}, + {in: pallValueCompact, ptr: new(All), out: pallValue}, + {in: pallValueIndent, ptr: new(*All), out: &pallValue}, + {in: pallValueCompact, ptr: new(*All), out: &pallValue}, + + // unmarshal interface test + {in: `{"T":false}`, ptr: &um0, out: umtrue}, // use "false" so test will fail if custom unmarshaler is not called + {in: `{"T":false}`, ptr: &ump, out: &umtrue}, + {in: `[{"T":false}]`, ptr: &umslice, out: umslice}, + {in: `[{"T":false}]`, ptr: &umslicep, out: &umslice}, + {in: `{"M":{"T":"x:y"}}`, ptr: &umstruct, out: umstruct}, + + // UnmarshalText interface test + {in: `"x:y"`, ptr: &um0T, out: umtrueXY}, + {in: `"x:y"`, ptr: &umpType, out: &umtrueXY}, + {in: `["x:y"]`, ptr: &umsliceXY, out: umsliceXY}, + {in: `["x:y"]`, ptr: &umslicepType, out: &umsliceXY}, + {in: `{"M":"x:y"}`, ptr: umstructType, out: umstructXY}, + + // Map keys can be encoding.TextUnmarshalers + {in: `{"x:y":true}`, ptr: &ummapType, out: ummapXY}, + // If multiple values for the same key exists, only the most recent value is used. + {in: `{"x:y":false,"x:y":true}`, ptr: &ummapType, out: ummapXY}, + + // Overwriting of data. + // This is different from package xml, but it's what we've always done. + // Now documented and tested. + {in: `[2]`, ptr: sliceAddr([]int{1}), out: []int{2}}, + {in: `{"key": 2}`, ptr: mapAddr(map[string]int{"old": 0, "key": 1}), out: map[string]int{"key": 2}}, + + { + in: `{ + "Level0": 1, + "Level1b": 2, + "Level1c": 3, + "x": 4, + "Level1a": 5, + "LEVEL1B": 6, + "e": { + "Level1a": 8, + "Level1b": 9, + "Level1c": 10, + "Level1d": 11, + "x": 12 + }, + "Loop1": 13, + "Loop2": 14, + "X": 15, + "Y": 16, + "Z": 17, + "Q": 18 + }`, + ptr: new(Top), + out: Top{ + Level0: 1, + Embed0: Embed0{ + Level1b: 2, + Level1c: 3, + }, + Embed0a: &Embed0a{ + Level1a: 5, + Level1b: 6, + }, + Embed0b: &Embed0b{ + Level1a: 8, + Level1b: 9, + Level1c: 10, + Level1d: 11, + Level1e: 12, + }, + Loop: Loop{ + Loop1: 13, + Loop2: 14, + }, + Embed0p: Embed0p{ + Point: image.Point{X: 15, Y: 16}, + }, + Embed0q: Embed0q{ + Point: Point{Z: 17}, + }, + embed: embed{ + Q: 18, + }, + }, + }, + { + in: `{"hello": 1}`, + ptr: new(Ambig), + out: Ambig{First: 1}, + }, + + { + in: `{"X": 1,"Y":2}`, + ptr: new(S5), + out: S5{S8: S8{S9: S9{Y: 2}}}, + }, + { + in: `{"X": 1,"Y":2}`, + ptr: new(S10), + out: S10{S13: S13{S8: S8{S9: S9{Y: 2}}}}, + }, + + // invalid UTF-8 is coerced to valid UTF-8. + { + in: "\"hello\xffworld\"", + ptr: new(string), + out: "hello\ufffdworld", + }, + { + in: "\"hello\xc2\xc2world\"", + ptr: new(string), + out: "hello\ufffd\ufffdworld", + }, + { + in: "\"hello\xc2\xffworld\"", + ptr: new(string), + out: "hello\ufffd\ufffdworld", + }, + { + in: "\"hello\\ud800world\"", + ptr: new(string), + out: "hello\ufffdworld", + }, + { + in: "\"hello\\ud800\\ud800world\"", + ptr: new(string), + out: "hello\ufffd\ufffdworld", + }, + { + in: "\"hello\\ud800\\ud800world\"", + ptr: new(string), + out: "hello\ufffd\ufffdworld", + }, + { + in: "\"hello\xed\xa0\x80\xed\xb0\x80world\"", + ptr: new(string), + out: "hello\ufffd\ufffd\ufffd\ufffd\ufffd\ufffdworld", + }, + + // Used to be issue 8305, but time.Time implements encoding.TextUnmarshaler so this works now. + { + in: `{"2009-11-10T23:00:00Z": "hello world"}`, + ptr: &map[time.Time]string{}, + out: map[time.Time]string{time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC): "hello world"}, + }, + + // issue 8305 + { + in: `{"2009-11-10T23:00:00Z": "hello world"}`, + ptr: &map[Point]string{}, + err: &UnmarshalTypeError{"object", reflect.TypeOf(map[Point]string{}), 1}, + }, + { + in: `{"asdf": "hello world"}`, + ptr: &map[unmarshaler]string{}, + err: &UnmarshalTypeError{"object", reflect.TypeOf(map[unmarshaler]string{}), 1}, + }, +} + +func TestMarshal(t *testing.T) { + b, err := Marshal(allValue) + if err != nil { + t.Fatalf("Marshal allValue: %v", err) + } + if string(b) != allValueCompact { + t.Errorf("Marshal allValueCompact") + diff(t, b, []byte(allValueCompact)) + return + } + + b, err = Marshal(pallValue) + if err != nil { + t.Fatalf("Marshal pallValue: %v", err) + } + if string(b) != pallValueCompact { + t.Errorf("Marshal pallValueCompact") + diff(t, b, []byte(pallValueCompact)) + return + } +} + +var badUTF8 = []struct { + in, out string +}{ + {"hello\xffworld", `"hello\ufffdworld"`}, + {"", `""`}, + {"\xff", `"\ufffd"`}, + {"\xff\xff", `"\ufffd\ufffd"`}, + {"a\xffb", `"a\ufffdb"`}, + {"\xe6\x97\xa5\xe6\x9c\xac\xff\xaa\x9e", `"日本\ufffd\ufffd\ufffd"`}, +} + +func TestMarshalBadUTF8(t *testing.T) { + for _, tt := range badUTF8 { + b, err := Marshal(tt.in) + if string(b) != tt.out || err != nil { + t.Errorf("Marshal(%q) = %#q, %v, want %#q, nil", tt.in, b, err, tt.out) + } + } +} + +func TestMarshalNumberZeroVal(t *testing.T) { + var n Number + out, err := Marshal(n) + if err != nil { + t.Fatal(err) + } + outStr := string(out) + if outStr != "0" { + t.Fatalf("Invalid zero val for Number: %q", outStr) + } +} + +func TestMarshalEmbeds(t *testing.T) { + top := &Top{ + Level0: 1, + Embed0: Embed0{ + Level1b: 2, + Level1c: 3, + }, + Embed0a: &Embed0a{ + Level1a: 5, + Level1b: 6, + }, + Embed0b: &Embed0b{ + Level1a: 8, + Level1b: 9, + Level1c: 10, + Level1d: 11, + Level1e: 12, + }, + Loop: Loop{ + Loop1: 13, + Loop2: 14, + }, + Embed0p: Embed0p{ + Point: image.Point{X: 15, Y: 16}, + }, + Embed0q: Embed0q{ + Point: Point{Z: 17}, + }, + embed: embed{ + Q: 18, + }, + } + b, err := Marshal(top) + if err != nil { + t.Fatal(err) + } + want := "{\"Level0\":1,\"Level1b\":2,\"Level1c\":3,\"Level1a\":5,\"LEVEL1B\":6,\"e\":{\"Level1a\":8,\"Level1b\":9,\"Level1c\":10,\"Level1d\":11,\"x\":12},\"Loop1\":13,\"Loop2\":14,\"X\":15,\"Y\":16,\"Z\":17,\"Q\":18}" + if string(b) != want { + t.Errorf("Wrong marshal result.\n got: %q\nwant: %q", b, want) + } +} + +func TestUnmarshal(t *testing.T) { + for i, tt := range unmarshalTests { + var scan scanner + in := []byte(tt.in) + if err := checkValid(in, &scan); err != nil { + if !reflect.DeepEqual(err, tt.err) { + t.Errorf("#%d: checkValid: %#v", i, err) + continue + } + } + if tt.ptr == nil { + continue + } + + // v = new(right-type) + v := reflect.New(reflect.TypeOf(tt.ptr).Elem()) + dec := NewDecoder(bytes.NewReader(in)) + if tt.useNumber { + dec.UseNumber() + } + if err := dec.Decode(v.Interface()); !reflect.DeepEqual(err, tt.err) { + t.Errorf("#%d: %v, want %v", i, err, tt.err) + continue + } else if err != nil { + continue + } + if !reflect.DeepEqual(v.Elem().Interface(), tt.out) { + t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), tt.out) + data, _ := Marshal(v.Elem().Interface()) + println(string(data)) + data, _ = Marshal(tt.out) + println(string(data)) + continue + } + + // Check round trip. + if tt.err == nil { + enc, err := Marshal(v.Interface()) + if err != nil { + t.Errorf("#%d: error re-marshaling: %v", i, err) + continue + } + vv := reflect.New(reflect.TypeOf(tt.ptr).Elem()) + dec = NewDecoder(bytes.NewReader(enc)) + if tt.useNumber { + dec.UseNumber() + } + if err := dec.Decode(vv.Interface()); err != nil { + t.Errorf("#%d: error re-unmarshaling %#q: %v", i, enc, err) + continue + } + if !reflect.DeepEqual(v.Elem().Interface(), vv.Elem().Interface()) { + t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), vv.Elem().Interface()) + t.Errorf(" In: %q", strings.Map(noSpace, string(in))) + t.Errorf("Marshal: %q", strings.Map(noSpace, string(enc))) + continue + } + } + } +} + +func TestUnmarshalMarshal(t *testing.T) { + initBig() + var v interface{} + if err := Unmarshal(jsonBig, &v); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + b, err := Marshal(v) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if !bytes.Equal(jsonBig, b) { + t.Errorf("Marshal jsonBig") + diff(t, b, jsonBig) + return + } +} + +var numberTests = []struct { + in string + i int64 + intErr string + f float64 + floatErr string +}{ + {in: "-1.23e1", intErr: "strconv.ParseInt: parsing \"-1.23e1\": invalid syntax", f: -1.23e1}, + {in: "-12", i: -12, f: -12.0}, + {in: "1e1000", intErr: "strconv.ParseInt: parsing \"1e1000\": invalid syntax", floatErr: "strconv.ParseFloat: parsing \"1e1000\": value out of range"}, +} + +// Independent of Decode, basic coverage of the accessors in Number +func TestNumberAccessors(t *testing.T) { + for _, tt := range numberTests { + n := Number(tt.in) + if s := n.String(); s != tt.in { + t.Errorf("Number(%q).String() is %q", tt.in, s) + } + if i, err := n.Int64(); err == nil && tt.intErr == "" && i != tt.i { + t.Errorf("Number(%q).Int64() is %d", tt.in, i) + } else if (err == nil && tt.intErr != "") || (err != nil && err.Error() != tt.intErr) { + t.Errorf("Number(%q).Int64() wanted error %q but got: %v", tt.in, tt.intErr, err) + } + if f, err := n.Float64(); err == nil && tt.floatErr == "" && f != tt.f { + t.Errorf("Number(%q).Float64() is %g", tt.in, f) + } else if (err == nil && tt.floatErr != "") || (err != nil && err.Error() != tt.floatErr) { + t.Errorf("Number(%q).Float64() wanted error %q but got: %v", tt.in, tt.floatErr, err) + } + } +} + +func TestLargeByteSlice(t *testing.T) { + s0 := make([]byte, 2000) + for i := range s0 { + s0[i] = byte(i) + } + b, err := Marshal(s0) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + var s1 []byte + if err := Unmarshal(b, &s1); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !bytes.Equal(s0, s1) { + t.Errorf("Marshal large byte slice") + diff(t, s0, s1) + } +} + +type Xint struct { + X int +} + +func TestUnmarshalInterface(t *testing.T) { + var xint Xint + var i interface{} = &xint + if err := Unmarshal([]byte(`{"X":1}`), &i); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if xint.X != 1 { + t.Fatalf("Did not write to xint") + } +} + +func TestUnmarshalPtrPtr(t *testing.T) { + var xint Xint + pxint := &xint + if err := Unmarshal([]byte(`{"X":1}`), &pxint); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if xint.X != 1 { + t.Fatalf("Did not write to xint") + } +} + +func TestEscape(t *testing.T) { + const input = `"foobar"` + " [\u2028 \u2029]" + const expected = `"\"foobar\"\u003chtml\u003e [\u2028 \u2029]"` + b, err := Marshal(input) + if err != nil { + t.Fatalf("Marshal error: %v", err) + } + if s := string(b); s != expected { + t.Errorf("Encoding of [%s]:\n got [%s]\nwant [%s]", input, s, expected) + } +} + +// WrongString is a struct that's misusing the ,string modifier. +type WrongString struct { + Message string `json:"result,string"` +} + +type wrongStringTest struct { + in, err string +} + +var wrongStringTests = []wrongStringTest{ + {`{"result":"x"}`, `json: invalid use of ,string struct tag, trying to unmarshal "x" into string`}, + {`{"result":"foo"}`, `json: invalid use of ,string struct tag, trying to unmarshal "foo" into string`}, + {`{"result":"123"}`, `json: invalid use of ,string struct tag, trying to unmarshal "123" into string`}, + {`{"result":123}`, `json: invalid use of ,string struct tag, trying to unmarshal unquoted value into string`}, +} + +// If people misuse the ,string modifier, the error message should be +// helpful, telling the user that they're doing it wrong. +func TestErrorMessageFromMisusedString(t *testing.T) { + for n, tt := range wrongStringTests { + r := strings.NewReader(tt.in) + var s WrongString + err := NewDecoder(r).Decode(&s) + got := fmt.Sprintf("%v", err) + if got != tt.err { + t.Errorf("%d. got err = %q, want %q", n, got, tt.err) + } + } +} + +func noSpace(c rune) rune { + if isSpace(byte(c)) { //only used for ascii + return -1 + } + return c +} + +type All struct { + Bool bool + Int int + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + Uint uint + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + Uintptr uintptr + Float32 float32 + Float64 float64 + + Foo string `json:"bar"` + Foo2 string `json:"bar2,dummyopt"` + + IntStr int64 `json:",string"` + + PBool *bool + PInt *int + PInt8 *int8 + PInt16 *int16 + PInt32 *int32 + PInt64 *int64 + PUint *uint + PUint8 *uint8 + PUint16 *uint16 + PUint32 *uint32 + PUint64 *uint64 + PUintptr *uintptr + PFloat32 *float32 + PFloat64 *float64 + + String string + PString *string + + Map map[string]Small + MapP map[string]*Small + PMap *map[string]Small + PMapP *map[string]*Small + + EmptyMap map[string]Small + NilMap map[string]Small + + Slice []Small + SliceP []*Small + PSlice *[]Small + PSliceP *[]*Small + + EmptySlice []Small + NilSlice []Small + + StringSlice []string + ByteSlice []byte + + Small Small + PSmall *Small + PPSmall **Small + + Interface interface{} + PInterface *interface{} + + unexported int +} + +type Small struct { + Tag string +} + +var allValue = All{ + Bool: true, + Int: 2, + Int8: 3, + Int16: 4, + Int32: 5, + Int64: 6, + Uint: 7, + Uint8: 8, + Uint16: 9, + Uint32: 10, + Uint64: 11, + Uintptr: 12, + Float32: 14.1, + Float64: 15.1, + Foo: "foo", + Foo2: "foo2", + IntStr: 42, + String: "16", + Map: map[string]Small{ + "17": {Tag: "tag17"}, + "18": {Tag: "tag18"}, + }, + MapP: map[string]*Small{ + "19": {Tag: "tag19"}, + "20": nil, + }, + EmptyMap: map[string]Small{}, + Slice: []Small{{Tag: "tag20"}, {Tag: "tag21"}}, + SliceP: []*Small{{Tag: "tag22"}, nil, {Tag: "tag23"}}, + EmptySlice: []Small{}, + StringSlice: []string{"str24", "str25", "str26"}, + ByteSlice: []byte{27, 28, 29}, + Small: Small{Tag: "tag30"}, + PSmall: &Small{Tag: "tag31"}, + Interface: 5.2, +} + +var pallValue = All{ + PBool: &allValue.Bool, + PInt: &allValue.Int, + PInt8: &allValue.Int8, + PInt16: &allValue.Int16, + PInt32: &allValue.Int32, + PInt64: &allValue.Int64, + PUint: &allValue.Uint, + PUint8: &allValue.Uint8, + PUint16: &allValue.Uint16, + PUint32: &allValue.Uint32, + PUint64: &allValue.Uint64, + PUintptr: &allValue.Uintptr, + PFloat32: &allValue.Float32, + PFloat64: &allValue.Float64, + PString: &allValue.String, + PMap: &allValue.Map, + PMapP: &allValue.MapP, + PSlice: &allValue.Slice, + PSliceP: &allValue.SliceP, + PPSmall: &allValue.PSmall, + PInterface: &allValue.Interface, +} + +var allValueIndent = `{ + "Bool": true, + "Int": 2, + "Int8": 3, + "Int16": 4, + "Int32": 5, + "Int64": 6, + "Uint": 7, + "Uint8": 8, + "Uint16": 9, + "Uint32": 10, + "Uint64": 11, + "Uintptr": 12, + "Float32": 14.1, + "Float64": 15.1, + "bar": "foo", + "bar2": "foo2", + "IntStr": "42", + "PBool": null, + "PInt": null, + "PInt8": null, + "PInt16": null, + "PInt32": null, + "PInt64": null, + "PUint": null, + "PUint8": null, + "PUint16": null, + "PUint32": null, + "PUint64": null, + "PUintptr": null, + "PFloat32": null, + "PFloat64": null, + "String": "16", + "PString": null, + "Map": { + "17": { + "Tag": "tag17" + }, + "18": { + "Tag": "tag18" + } + }, + "MapP": { + "19": { + "Tag": "tag19" + }, + "20": null + }, + "PMap": null, + "PMapP": null, + "EmptyMap": {}, + "NilMap": null, + "Slice": [ + { + "Tag": "tag20" + }, + { + "Tag": "tag21" + } + ], + "SliceP": [ + { + "Tag": "tag22" + }, + null, + { + "Tag": "tag23" + } + ], + "PSlice": null, + "PSliceP": null, + "EmptySlice": [], + "NilSlice": null, + "StringSlice": [ + "str24", + "str25", + "str26" + ], + "ByteSlice": "Gxwd", + "Small": { + "Tag": "tag30" + }, + "PSmall": { + "Tag": "tag31" + }, + "PPSmall": null, + "Interface": 5.2, + "PInterface": null +}` + +var allValueCompact = strings.Map(noSpace, allValueIndent) + +var pallValueIndent = `{ + "Bool": false, + "Int": 0, + "Int8": 0, + "Int16": 0, + "Int32": 0, + "Int64": 0, + "Uint": 0, + "Uint8": 0, + "Uint16": 0, + "Uint32": 0, + "Uint64": 0, + "Uintptr": 0, + "Float32": 0, + "Float64": 0, + "bar": "", + "bar2": "", + "IntStr": "0", + "PBool": true, + "PInt": 2, + "PInt8": 3, + "PInt16": 4, + "PInt32": 5, + "PInt64": 6, + "PUint": 7, + "PUint8": 8, + "PUint16": 9, + "PUint32": 10, + "PUint64": 11, + "PUintptr": 12, + "PFloat32": 14.1, + "PFloat64": 15.1, + "String": "", + "PString": "16", + "Map": null, + "MapP": null, + "PMap": { + "17": { + "Tag": "tag17" + }, + "18": { + "Tag": "tag18" + } + }, + "PMapP": { + "19": { + "Tag": "tag19" + }, + "20": null + }, + "EmptyMap": null, + "NilMap": null, + "Slice": null, + "SliceP": null, + "PSlice": [ + { + "Tag": "tag20" + }, + { + "Tag": "tag21" + } + ], + "PSliceP": [ + { + "Tag": "tag22" + }, + null, + { + "Tag": "tag23" + } + ], + "EmptySlice": null, + "NilSlice": null, + "StringSlice": null, + "ByteSlice": null, + "Small": { + "Tag": "" + }, + "PSmall": null, + "PPSmall": { + "Tag": "tag31" + }, + "Interface": null, + "PInterface": 5.2 +}` + +var pallValueCompact = strings.Map(noSpace, pallValueIndent) + +func TestRefUnmarshal(t *testing.T) { + type S struct { + // Ref is defined in encode_test.go. + R0 Ref + R1 *Ref + R2 RefText + R3 *RefText + } + want := S{ + R0: 12, + R1: new(Ref), + R2: 13, + R3: new(RefText), + } + *want.R1 = 12 + *want.R3 = 13 + + var got S + if err := Unmarshal([]byte(`{"R0":"ref","R1":"ref","R2":"ref","R3":"ref"}`), &got); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +// Test that the empty string doesn't panic decoding when ,string is specified +// Issue 3450 +func TestEmptyString(t *testing.T) { + type T2 struct { + Number1 int `json:",string"` + Number2 int `json:",string"` + } + data := `{"Number1":"1", "Number2":""}` + dec := NewDecoder(strings.NewReader(data)) + var t2 T2 + err := dec.Decode(&t2) + if err == nil { + t.Fatal("Decode: did not return error") + } + if t2.Number1 != 1 { + t.Fatal("Decode: did not set Number1") + } +} + +// Test that a null for ,string is not replaced with the previous quoted string (issue 7046). +// It should also not be an error (issue 2540, issue 8587). +func TestNullString(t *testing.T) { + type T struct { + A int `json:",string"` + B int `json:",string"` + C *int `json:",string"` + } + data := []byte(`{"A": "1", "B": null, "C": null}`) + var s T + s.B = 1 + s.C = new(int) + *s.C = 2 + err := Unmarshal(data, &s) + if err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if s.B != 1 || s.C != nil { + t.Fatalf("after Unmarshal, s.B=%d, s.C=%p, want 1, nil", s.B, s.C) + } +} + +func intp(x int) *int { + p := new(int) + *p = x + return p +} + +func intpp(x *int) **int { + pp := new(*int) + *pp = x + return pp +} + +var interfaceSetTests = []struct { + pre interface{} + json string + post interface{} +}{ + {"foo", `"bar"`, "bar"}, + {"foo", `2`, 2.0}, + {"foo", `true`, true}, + {"foo", `null`, nil}, + + {nil, `null`, nil}, + {new(int), `null`, nil}, + {(*int)(nil), `null`, nil}, + {new(*int), `null`, new(*int)}, + {(**int)(nil), `null`, nil}, + {intp(1), `null`, nil}, + {intpp(nil), `null`, intpp(nil)}, + {intpp(intp(1)), `null`, intpp(nil)}, +} + +func TestInterfaceSet(t *testing.T) { + for _, tt := range interfaceSetTests { + b := struct{ X interface{} }{tt.pre} + blob := `{"X":` + tt.json + `}` + if err := Unmarshal([]byte(blob), &b); err != nil { + t.Errorf("Unmarshal %#q: %v", blob, err) + continue + } + if !reflect.DeepEqual(b.X, tt.post) { + t.Errorf("Unmarshal %#q into %#v: X=%#v, want %#v", blob, tt.pre, b.X, tt.post) + } + } +} + +// JSON null values should be ignored for primitives and string values instead of resulting in an error. +// Issue 2540 +func TestUnmarshalNulls(t *testing.T) { + jsonData := []byte(`{ + "Bool" : null, + "Int" : null, + "Int8" : null, + "Int16" : null, + "Int32" : null, + "Int64" : null, + "Uint" : null, + "Uint8" : null, + "Uint16" : null, + "Uint32" : null, + "Uint64" : null, + "Float32" : null, + "Float64" : null, + "String" : null}`) + + nulls := All{ + Bool: true, + Int: 2, + Int8: 3, + Int16: 4, + Int32: 5, + Int64: 6, + Uint: 7, + Uint8: 8, + Uint16: 9, + Uint32: 10, + Uint64: 11, + Float32: 12.1, + Float64: 13.1, + String: "14"} + + err := Unmarshal(jsonData, &nulls) + if err != nil { + t.Errorf("Unmarshal of null values failed: %v", err) + } + if !nulls.Bool || nulls.Int != 2 || nulls.Int8 != 3 || nulls.Int16 != 4 || nulls.Int32 != 5 || nulls.Int64 != 6 || + nulls.Uint != 7 || nulls.Uint8 != 8 || nulls.Uint16 != 9 || nulls.Uint32 != 10 || nulls.Uint64 != 11 || + nulls.Float32 != 12.1 || nulls.Float64 != 13.1 || nulls.String != "14" { + + t.Errorf("Unmarshal of null values affected primitives") + } +} + +func TestStringKind(t *testing.T) { + type stringKind string + + var m1, m2 map[stringKind]int + m1 = map[stringKind]int{ + "foo": 42, + } + + data, err := Marshal(m1) + if err != nil { + t.Errorf("Unexpected error marshaling: %v", err) + } + + err = Unmarshal(data, &m2) + if err != nil { + t.Errorf("Unexpected error unmarshaling: %v", err) + } + + if !reflect.DeepEqual(m1, m2) { + t.Error("Items should be equal after encoding and then decoding") + } +} + +// Custom types with []byte as underlying type could not be marshalled +// and then unmarshalled. +// Issue 8962. +func TestByteKind(t *testing.T) { + type byteKind []byte + + a := byteKind("hello") + + data, err := Marshal(a) + if err != nil { + t.Error(err) + } + var b byteKind + err = Unmarshal(data, &b) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(a, b) { + t.Errorf("expected %v == %v", a, b) + } +} + +// The fix for issue 8962 introduced a regression. +// Issue 12921. +func TestSliceOfCustomByte(t *testing.T) { + type Uint8 uint8 + + a := []Uint8("hello") + + data, err := Marshal(a) + if err != nil { + t.Fatal(err) + } + var b []Uint8 + err = Unmarshal(data, &b) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(a, b) { + t.Fatalf("expected %v == %v", a, b) + } +} + +var decodeTypeErrorTests = []struct { + dest interface{} + src string +}{ + {new(string), `{"user": "name"}`}, // issue 4628. + {new(error), `{}`}, // issue 4222 + {new(error), `[]`}, + {new(error), `""`}, + {new(error), `123`}, + {new(error), `true`}, +} + +func TestUnmarshalTypeError(t *testing.T) { + for _, item := range decodeTypeErrorTests { + err := Unmarshal([]byte(item.src), item.dest) + if _, ok := err.(*UnmarshalTypeError); !ok { + t.Errorf("expected type error for Unmarshal(%q, type %T): got %T", + item.src, item.dest, err) + } + } +} + +var unmarshalSyntaxTests = []string{ + "tru", + "fals", + "nul", + "123e", + `"hello`, + `[1,2,3`, + `{"key":1`, + `{"key":1,`, +} + +func TestUnmarshalSyntax(t *testing.T) { + var x interface{} + for _, src := range unmarshalSyntaxTests { + err := Unmarshal([]byte(src), &x) + if _, ok := err.(*SyntaxError); !ok { + t.Errorf("expected syntax error for Unmarshal(%q): got %T", src, err) + } + } +} + +// Test handling of unexported fields that should be ignored. +// Issue 4660 +type unexportedFields struct { + Name string + m map[string]interface{} `json:"-"` + m2 map[string]interface{} `json:"abcd"` +} + +func TestUnmarshalUnexported(t *testing.T) { + input := `{"Name": "Bob", "m": {"x": 123}, "m2": {"y": 456}, "abcd": {"z": 789}}` + want := &unexportedFields{Name: "Bob"} + + out := &unexportedFields{} + err := Unmarshal([]byte(input), out) + if err != nil { + t.Errorf("got error %v, expected nil", err) + } + if !reflect.DeepEqual(out, want) { + t.Errorf("got %q, want %q", out, want) + } +} + +// Time3339 is a time.Time which encodes to and from JSON +// as an RFC 3339 time in UTC. +type Time3339 time.Time + +func (t *Time3339) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("types: failed to unmarshal non-string value %q as an RFC 3339 time", b) + } + tm, err := time.Parse(time.RFC3339, string(b[1:len(b)-1])) + if err != nil { + return err + } + *t = Time3339(tm) + return nil +} + +func TestUnmarshalJSONLiteralError(t *testing.T) { + var t3 Time3339 + err := Unmarshal([]byte(`"0000-00-00T00:00:00Z"`), &t3) + if err == nil { + t.Fatalf("expected error; got time %v", time.Time(t3)) + } + if !strings.Contains(err.Error(), "range") { + t.Errorf("got err = %v; want out of range error", err) + } +} + +// Test that extra object elements in an array do not result in a +// "data changing underfoot" error. +// Issue 3717 +func TestSkipArrayObjects(t *testing.T) { + json := `[{}]` + var dest [0]interface{} + + err := Unmarshal([]byte(json), &dest) + if err != nil { + t.Errorf("got error %q, want nil", err) + } +} + +// Test semantics of pre-filled struct fields and pre-filled map fields. +// Issue 4900. +func TestPrefilled(t *testing.T) { + ptrToMap := func(m map[string]interface{}) *map[string]interface{} { return &m } + + // Values here change, cannot reuse table across runs. + var prefillTests = []struct { + in string + ptr interface{} + out interface{} + }{ + { + in: `{"X": 1, "Y": 2}`, + ptr: &XYZ{X: float32(3), Y: int16(4), Z: 1.5}, + out: &XYZ{X: float64(1), Y: float64(2), Z: 1.5}, + }, + { + in: `{"X": 1, "Y": 2}`, + ptr: ptrToMap(map[string]interface{}{"X": float32(3), "Y": int16(4), "Z": 1.5}), + out: ptrToMap(map[string]interface{}{"X": float64(1), "Y": float64(2), "Z": 1.5}), + }, + } + + for _, tt := range prefillTests { + ptrstr := fmt.Sprintf("%v", tt.ptr) + err := Unmarshal([]byte(tt.in), tt.ptr) // tt.ptr edited here + if err != nil { + t.Errorf("Unmarshal: %v", err) + } + if !reflect.DeepEqual(tt.ptr, tt.out) { + t.Errorf("Unmarshal(%#q, %s): have %v, want %v", tt.in, ptrstr, tt.ptr, tt.out) + } + } +} + +var invalidUnmarshalTests = []struct { + v interface{} + want string +}{ + {nil, "json: Unmarshal(nil)"}, + {struct{}{}, "json: Unmarshal(non-pointer struct {})"}, + {(*int)(nil), "json: Unmarshal(nil *int)"}, +} + +func TestInvalidUnmarshal(t *testing.T) { + buf := []byte(`{"a":"1"}`) + for _, tt := range invalidUnmarshalTests { + err := Unmarshal(buf, tt.v) + if err == nil { + t.Errorf("Unmarshal expecting error, got nil") + continue + } + if got := err.Error(); got != tt.want { + t.Errorf("Unmarshal = %q; want %q", got, tt.want) + } + } +} + +var invalidUnmarshalTextTests = []struct { + v interface{} + want string +}{ + {nil, "json: Unmarshal(nil)"}, + {struct{}{}, "json: Unmarshal(non-pointer struct {})"}, + {(*int)(nil), "json: Unmarshal(nil *int)"}, + {new(net.IP), "json: cannot unmarshal string into Go value of type *net.IP"}, +} + +func TestInvalidUnmarshalText(t *testing.T) { + buf := []byte(`123`) + for _, tt := range invalidUnmarshalTextTests { + err := Unmarshal(buf, tt.v) + if err == nil { + t.Errorf("Unmarshal expecting error, got nil") + continue + } + if got := err.Error(); got != tt.want { + t.Errorf("Unmarshal = %q; want %q", got, tt.want) + } + } +} + +// Test that string option is ignored for invalid types. +// Issue 9812. +func TestInvalidStringOption(t *testing.T) { + num := 0 + item := struct { + T time.Time `json:",string"` + M map[string]string `json:",string"` + S []string `json:",string"` + A [1]string `json:",string"` + I interface{} `json:",string"` + P *int `json:",string"` + }{M: make(map[string]string), S: make([]string, 0), I: num, P: &num} + + data, err := Marshal(item) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + + err = Unmarshal(data, &item) + if err != nil { + t.Fatalf("Unmarshal: %v", err) + } +} diff --git a/internal/json/encode.go b/internal/json/encode.go new file mode 100644 index 000000000..d8c779869 --- /dev/null +++ b/internal/json/encode.go @@ -0,0 +1,1239 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON as defined in +// RFC 4627. The mapping between JSON and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// This escaping can be disabled using an Encoder with DisableHTMLEscaping. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON value. +// +// Struct values encode as JSON objects. Each exported struct field +// becomes a member of the object unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The object's default key string is the struct field name +// but can be specified in the struct field's tag value. The "json" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, dollar signs, percent signs, hyphens, +// underscores and slashes. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. The map's key type must either be a string +// or implement encoding.TextMarshaler. The map keys are used as JSON object +// keys, subject to the UTF-8 coercion described for string values above. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON value. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON value. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + e := &encodeState{} + err := e.marshal(v, encOpts{escapeHTML: true}) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML