@@ -32,27 +32,34 @@ import (
32
32
// in the backup chain that need to be compacted. It sends updates from the
33
33
// BulkProcessor to the provided progress channel. It is the caller's
34
34
// responsibility to close the progress channel.
35
- func ( c * compactionChain ) runCompactionPlan (
35
+ func runCompactionPlan (
36
36
ctx context.Context ,
37
37
execCtx sql.JobExecContext ,
38
38
jobID jobspb.JobID ,
39
- manifest * backuppb.BackupManifest ,
40
39
details jobspb.BackupDetails ,
40
+ compactChain compactionChain ,
41
+ manifest * backuppb.BackupManifest ,
41
42
defaultStore cloud.ExternalStorage ,
42
43
kmsEnv cloud.KMSEnv ,
43
44
progCh chan * execinfrapb.RemoteProducerMetadata_BulkProcessorProgress ,
44
45
) error {
46
+ defer close (progCh )
45
47
log .Infof (
46
48
ctx , "planning compaction of %d backups: %s" ,
47
- len (c .chainToCompact ), util .Map (c .chainToCompact , func (m backuppb.BackupManifest ) string {
49
+ len (compactChain .chainToCompact ),
50
+ util .Map (compactChain .chainToCompact , func (m backuppb.BackupManifest ) string {
48
51
return m .ID .String ()
49
52
}),
50
53
)
51
- backupLocalityMap , err := makeBackupLocalityMap (c .compactedLocalityInfo , execCtx .User ())
54
+ backupLocalityMap , err := makeBackupLocalityMap (
55
+ compactChain .compactedLocalityInfo , execCtx .User (),
56
+ )
52
57
if err != nil {
53
58
return err
54
59
}
55
- introducedSpanFrontier , err := createIntroducedSpanFrontier (c .backupChain , manifest .EndTime )
60
+ introducedSpanFrontier , err := createIntroducedSpanFrontier (
61
+ compactChain .backupChain , manifest .EndTime ,
62
+ )
56
63
if err != nil {
57
64
return err
58
65
}
@@ -72,18 +79,17 @@ func (c *compactionChain) runCompactionPlan(
72
79
}
73
80
74
81
spansToCompact , err := getSpansToCompact (
75
- ctx , execCtx , manifest , c .chainToCompact , details , defaultStore , kmsEnv ,
82
+ ctx , execCtx , manifest , compactChain .chainToCompact , details , defaultStore , kmsEnv ,
76
83
)
77
84
if err != nil {
78
85
return err
79
86
}
80
87
genSpan := func (ctx context.Context , spanCh chan execinfrapb.RestoreSpanEntry ) error {
81
- defer close (spanCh )
82
88
return errors .Wrap (generateAndSendImportSpans (
83
89
ctx ,
84
90
spansToCompact ,
85
- c .chainToCompact ,
86
- c .compactedIterFactory ,
91
+ compactChain .chainToCompact ,
92
+ compactChain .compactedIterFactory ,
87
93
backupLocalityMap ,
88
94
filter ,
89
95
fsc ,
@@ -105,9 +111,10 @@ func (c *compactionChain) runCompactionPlan(
105
111
}
106
112
return nil
107
113
}
114
+ rowResultWriter := sql .NewRowResultWriter (nil )
108
115
recv := sql .MakeDistSQLReceiver (
109
116
ctx ,
110
- sql .NewMetadataCallbackWriter (nil , metaFn ),
117
+ sql .NewMetadataCallbackWriter (rowResultWriter , metaFn ),
111
118
tree .Rows ,
112
119
nil , /* rangeCache */
113
120
nil , /* txn */
@@ -125,7 +132,7 @@ func (c *compactionChain) runCompactionPlan(
125
132
return nil
126
133
}
127
134
128
- // createCompactionPlan creates an unfinalized physical plan that will
135
+ // createCompactionPlan creates an un-finalized physical plan that will
129
136
// distribute spans from a generator across the cluster for compaction.
130
137
func createCompactionPlan (
131
138
ctx context.Context ,
@@ -184,6 +191,7 @@ func countRestoreSpanEntries(
184
191
return nil
185
192
},
186
193
func (ctx context.Context ) error {
194
+ defer close (countSpansCh )
187
195
return genSpan (ctx , countSpansCh )
188
196
},
189
197
}
@@ -194,7 +202,7 @@ func countRestoreSpanEntries(
194
202
}
195
203
196
204
// createCompactionCorePlacements takes spans from a generator and evenly
197
- // distributes them across nodes in the cluster, returning the core core placements
205
+ // distributes them across nodes in the cluster, returning the core placements
198
206
// reflecting that distribution.
199
207
func createCompactionCorePlacements (
200
208
ctx context.Context ,
@@ -209,17 +217,32 @@ func createCompactionCorePlacements(
209
217
) ([]physicalplan.ProcessorCorePlacement , error ) {
210
218
numNodes := len (sqlInstanceIDs )
211
219
corePlacements := make ([]physicalplan.ProcessorCorePlacement , numNodes )
220
+ for i := range corePlacements {
221
+ corePlacements [i ].SQLInstanceID = sqlInstanceIDs [i ]
222
+ corePlacements [i ].Core .CompactBackups = & execinfrapb.CompactBackupsSpec {
223
+ JobID : int64 (jobID ),
224
+ DefaultURI : details .URI ,
225
+ Destination : details .Destination ,
226
+ Encryption : details .EncryptionOptions ,
227
+ StartTime : details .StartTime ,
228
+ EndTime : details .EndTime ,
229
+ ElideMode : elideMode ,
230
+ UserProto : user .EncodeProto (),
231
+ Spans : spansToCompact ,
232
+ }
233
+ }
212
234
213
235
spanEntryCh := make (chan execinfrapb.RestoreSpanEntry , 1000 )
214
236
var tasks []func (ctx context.Context ) error
215
237
tasks = append (tasks , func (ctx context.Context ) error {
238
+ defer close (spanEntryCh )
216
239
return genSpan (ctx , spanEntryCh )
217
240
})
218
241
tasks = append (tasks , func (ctx context.Context ) error {
219
242
numEntriesPerNode := numEntries / numNodes
220
243
leftoverEntries := numEntries % numNodes
221
244
getTargetNumEntries := func (nodeIdx int ) int {
222
- if nodeIdx <= leftoverEntries {
245
+ if nodeIdx < leftoverEntries {
223
246
// This more evenly distributes the leftover entries across the nodes
224
247
// after doing integer division to assign the entries to the nodes.
225
248
return numEntriesPerNode + 1
@@ -231,29 +254,21 @@ func createCompactionCorePlacements(
231
254
targetNumEntries := getTargetNumEntries (currNode )
232
255
233
256
for entry := range spanEntryCh {
257
+ currEntries = append (currEntries , entry )
234
258
if len (currEntries ) == targetNumEntries {
235
259
corePlacements [currNode ].SQLInstanceID = sqlInstanceIDs [currNode ]
236
- corePlacements [currNode ].Core .CompactBackups = & execinfrapb.CompactBackupsSpec {
237
- JobID : int64 (jobID ),
238
- Destination : details .Destination ,
239
- Encryption : details .EncryptionOptions ,
240
- StartTime : details .StartTime ,
241
- EndTime : details .EndTime ,
242
- ElideMode : elideMode ,
243
- UserProto : user .EncodeProto (),
244
- Spans : spansToCompact ,
245
- AssignedSpans : util .Map (currEntries , func (entry execinfrapb.RestoreSpanEntry ) roachpb.Span {
260
+ corePlacements [currNode ].Core .CompactBackups .AssignedSpans = util .Map (
261
+ currEntries ,
262
+ func (entry execinfrapb.RestoreSpanEntry ) roachpb.Span {
246
263
return entry .Span
247
- }),
248
- }
264
+ })
249
265
currNode ++
250
266
targetNumEntries = getTargetNumEntries (currNode )
251
267
currEntries = currEntries [:0 ]
252
268
}
253
269
if currNode == numNodes {
254
270
return nil
255
271
}
256
- currEntries = append (currEntries , entry )
257
272
}
258
273
return nil
259
274
})
0 commit comments