forked from m4ksio/testingdock
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsuite.go
174 lines (152 loc) · 4.72 KB
/
suite.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
// Package testingdock simplifies integration testing with docker.
//
// Note: this library spawns containers and networks under the label
// 'owner=testingdock', which may be subject to aggressive manipulation
// and cleanup.
//
// Testingdock also makes use of the 'flag' package to set global variables.
// Run `flag.Parse()` in your test suite main function. Possible flags are:
//
// -testingdock.sequential (spawn containers sequentially instead of parallel)
// -testingdock.verbose (verbose logging)
package testingdock
import (
"context"
"flag"
"testing"
"github.com/docker/docker/client"
"github.com/docker/docker/daemon/logger"
)
func init() {
registry = make(map[string]*Suite)
flag.BoolVar(&SpawnSequential, "testingdock.sequential", false, "Spawn containers sequentially instead of parallel (useful for debugging)")
flag.BoolVar(&Verbose, "testingdock.verbose", false, "Verbose logging")
}
var registry map[string]*Suite
// SpawnSequential controls whether to spawn child containers in parallel
// or sequentially. This doesn't spawn
// all containers in parallel, only the ones that are on the same hierarchy level, e.g.:
//
// // c1 and c2 are started in parallel after the network
// network.After(c1)
// network.After(c2)
// // c3 and c4 are started in parallel after c1
// c1.After(c3)
// c1.After(c4)
var SpawnSequential bool
// Verbose logging
var Verbose bool
// SuiteOpts is an option struct for getting or creating a suite in GetOrCreateSuite.
type SuiteOpts struct {
// optional docker client, if one already exists
Client *client.Client
// whether to fail on instantiation errors
Skip bool
}
// Suite represents a testing suite with a docker setup.
type Suite struct {
name string
t testing.TB
cli *client.Client
network *Network
logWatcher *logger.LogWatcher
}
// GetOrCreateSuite returns a suite with the given name. If such suite is not registered yet it creates it.
// Returns true if the suite was already there, otherwise false.
func GetOrCreateSuite(t testing.TB, name string, opts SuiteOpts) (*Suite, bool) {
if s, ok := registry[name]; ok {
return s, true
}
c := opts.Client
if c == nil {
var err error
c, err = client.NewClientWithOpts(client.FromEnv)
if err != nil {
if opts.Skip {
t.Skipf("docker client instantiation failure: %s", err.Error())
} else {
t.Fatalf("testingdock: docker client instantiation failure: %s", err.Error())
}
}
}
s := &Suite{
cli: c,
t: t,
name: name,
}
registry[s.name] = s
return s, false
}
// UnregisterAll unregisters all suites by closing the networks.
func UnregisterAll() {
printf("(unregi) start")
for name, reg := range registry {
if err := reg.Close(); err != nil {
printf("(unregi) %-25s (%-64s) - suite unregister failure: %s", name, "", err.Error())
} else {
printf("(unregi) %-25s (%-64s) - suite unregistered", name, "")
}
delete(registry, name)
}
printf("(unregi) finished")
}
// Container creates a new docker container configuration with the given options.
func (s *Suite) Container(opts ContainerOpts) *Container {
return newContainer(s.t, s.cli, opts)
}
// Network creates a new docker network configuration with the given options.
func (s *Suite) Network(opts NetworkOpts) *Network {
s.network = newNetwork(s.t, s.cli, opts)
return s.network
}
// Reset "resets" the underlying docker containers in the network. This
// calls the ResetFunc and HealthCheckFunc for each of them. These can be passed in
// ContainerOpts when creating a container.
//
// The context is passed explicitly to ResetFunc, where it can be used and
// implicitly to HealthCheckFunc where it may cancel the blocking health
// check loop.
func (s *Suite) Reset(ctx context.Context) {
if s.network != nil {
s.network.reset(ctx)
}
}
// Start starts the suite. This starts all networks in the suite and the underlying containers,
// as well as the daemon logger, if Verbosity is enabled.
func (s *Suite) Start(ctx context.Context) {
if s.logWatcher == nil && Verbose {
printf("(daemon) starting logging")
s.logWatcher = logger.NewLogWatcher()
go func() {
for {
select {
case <-ctx.Done():
printf("(daemon) stopping logging")
//s.logWatcher.Close()
return
case msg := <-s.logWatcher.Msg:
printf("(daemon) %s", msg.Line)
case err := <-s.logWatcher.Err:
printf("(d err ) %s", err)
}
}
}()
}
if s.network != nil {
s.network.start(ctx)
}
}
// Close stops the suites. This stops all networks in the suite and the underlying containers.
func (s *Suite) Close() error {
if s.network != nil {
return s.network.close()
}
return nil
}
// Remove removes all the containers in the network.
func (s *Suite) Remove() error {
if s.network != nil {
return s.network.remove()
}
return nil
}