@@ -88,7 +88,7 @@ func (o *ScaleUpOrchestrator) ScaleUp(
88
88
nodeInfos map [string ]* schedulerframework.NodeInfo ,
89
89
) (* status.ScaleUpStatus , errors.AutoscalerError ) {
90
90
if ! o .initialized {
91
- return scaleUpError (& status.ScaleUpStatus {}, errors .NewAutoscalerError (errors .InternalError , "ScaleUpOrchestrator is not initialized" ))
91
+ return status . UpdateScaleUpError (& status.ScaleUpStatus {}, errors .NewAutoscalerError (errors .InternalError , "ScaleUpOrchestrator is not initialized" ))
92
92
}
93
93
94
94
loggingQuota := klogx .PodsLoggingQuota ()
@@ -103,7 +103,7 @@ func (o *ScaleUpOrchestrator) ScaleUp(
103
103
104
104
upcomingNodes , aErr := o .UpcomingNodes (nodeInfos )
105
105
if aErr != nil {
106
- return scaleUpError (& status.ScaleUpStatus {}, aErr .AddPrefix ("could not get upcoming nodes: " ))
106
+ return status . UpdateScaleUpError (& status.ScaleUpStatus {}, aErr .AddPrefix ("could not get upcoming nodes: " ))
107
107
}
108
108
klog .V (4 ).Infof ("Upcoming %d nodes" , len (upcomingNodes ))
109
109
@@ -112,7 +112,7 @@ func (o *ScaleUpOrchestrator) ScaleUp(
112
112
var err error
113
113
nodeGroups , nodeInfos , err = o .processors .NodeGroupListProcessor .Process (o .autoscalingContext , nodeGroups , nodeInfos , unschedulablePods )
114
114
if err != nil {
115
- return scaleUpError (& status.ScaleUpStatus {}, errors .ToAutoscalerError (errors .InternalError , err ))
115
+ return status . UpdateScaleUpError (& status.ScaleUpStatus {}, errors .ToAutoscalerError (errors .InternalError , err ))
116
116
}
117
117
}
118
118
@@ -121,7 +121,7 @@ func (o *ScaleUpOrchestrator) ScaleUp(
121
121
122
122
resourcesLeft , aErr := o .resourceManager .ResourcesLeft (o .autoscalingContext , nodeInfos , nodes )
123
123
if aErr != nil {
124
- return scaleUpError (& status.ScaleUpStatus {}, aErr .AddPrefix ("could not compute total resources: " ))
124
+ return status . UpdateScaleUpError (& status.ScaleUpStatus {}, aErr .AddPrefix ("could not compute total resources: " ))
125
125
}
126
126
127
127
now := time .Now ()
@@ -186,15 +186,15 @@ func (o *ScaleUpOrchestrator) ScaleUp(
186
186
187
187
newNodes , aErr := o .GetCappedNewNodeCount (bestOption .NodeCount , len (nodes )+ len (upcomingNodes ))
188
188
if aErr != nil {
189
- return scaleUpError (& status.ScaleUpStatus {PodsTriggeredScaleUp : bestOption .Pods }, aErr )
189
+ return status . UpdateScaleUpError (& status.ScaleUpStatus {PodsTriggeredScaleUp : bestOption .Pods }, aErr )
190
190
}
191
191
192
192
createNodeGroupResults := make ([]nodegroups.CreateNodeGroupResult , 0 )
193
193
if ! bestOption .NodeGroup .Exist () {
194
194
oldId := bestOption .NodeGroup .Id ()
195
195
createNodeGroupResult , aErr := o .processors .NodeGroupManager .CreateNodeGroup (o .autoscalingContext , bestOption .NodeGroup )
196
196
if aErr != nil {
197
- return scaleUpError (
197
+ return status . UpdateScaleUpError (
198
198
& status.ScaleUpStatus {FailedCreationNodeGroups : []cloudprovider.NodeGroup {bestOption .NodeGroup }, PodsTriggeredScaleUp : bestOption .Pods },
199
199
aErr )
200
200
}
@@ -253,7 +253,7 @@ func (o *ScaleUpOrchestrator) ScaleUp(
253
253
if ! found {
254
254
// This should never happen, as we already should have retrieved nodeInfo for any considered nodegroup.
255
255
klog .Errorf ("No node info for: %s" , bestOption .NodeGroup .Id ())
256
- return scaleUpError (
256
+ return status . UpdateScaleUpError (
257
257
& status.ScaleUpStatus {CreateNodeGroupResults : createNodeGroupResults , PodsTriggeredScaleUp : bestOption .Pods },
258
258
errors .NewAutoscalerError (
259
259
errors .CloudProviderError ,
@@ -263,7 +263,7 @@ func (o *ScaleUpOrchestrator) ScaleUp(
263
263
// Apply upper limits for CPU and memory.
264
264
newNodes , aErr = o .resourceManager .ApplyLimits (o .autoscalingContext , newNodes , resourcesLeft , nodeInfo , bestOption .NodeGroup )
265
265
if aErr != nil {
266
- return scaleUpError (
266
+ return status . UpdateScaleUpError (
267
267
& status.ScaleUpStatus {CreateNodeGroupResults : createNodeGroupResults , PodsTriggeredScaleUp : bestOption .Pods },
268
268
aErr )
269
269
}
@@ -283,15 +283,15 @@ func (o *ScaleUpOrchestrator) ScaleUp(
283
283
284
284
scaleUpInfos , aErr := o .processors .NodeGroupSetProcessor .BalanceScaleUpBetweenGroups (o .autoscalingContext , targetNodeGroups , newNodes )
285
285
if aErr != nil {
286
- return scaleUpError (
286
+ return status . UpdateScaleUpError (
287
287
& status.ScaleUpStatus {CreateNodeGroupResults : createNodeGroupResults , PodsTriggeredScaleUp : bestOption .Pods },
288
288
aErr )
289
289
}
290
290
291
291
klog .V (1 ).Infof ("Final scale-up plan: %v" , scaleUpInfos )
292
292
aErr , failedNodeGroups := o .scaleUpExecutor .ExecuteScaleUps (scaleUpInfos , nodeInfos , now )
293
293
if aErr != nil {
294
- return scaleUpError (
294
+ return status . UpdateScaleUpError (
295
295
& status.ScaleUpStatus {
296
296
CreateNodeGroupResults : createNodeGroupResults ,
297
297
FailedResizeNodeGroups : failedNodeGroups ,
@@ -322,7 +322,7 @@ func (o *ScaleUpOrchestrator) ScaleUpToNodeGroupMinSize(
322
322
nodeInfos map [string ]* schedulerframework.NodeInfo ,
323
323
) (* status.ScaleUpStatus , errors.AutoscalerError ) {
324
324
if ! o .initialized {
325
- return scaleUpError (& status.ScaleUpStatus {}, errors .NewAutoscalerError (errors .InternalError , "ScaleUpOrchestrator is not initialized" ))
325
+ return status . UpdateScaleUpError (& status.ScaleUpStatus {}, errors .NewAutoscalerError (errors .InternalError , "ScaleUpOrchestrator is not initialized" ))
326
326
}
327
327
328
328
now := time .Now ()
@@ -331,7 +331,7 @@ func (o *ScaleUpOrchestrator) ScaleUpToNodeGroupMinSize(
331
331
332
332
resourcesLeft , aErr := o .resourceManager .ResourcesLeft (o .autoscalingContext , nodeInfos , nodes )
333
333
if aErr != nil {
334
- return scaleUpError (& status.ScaleUpStatus {}, aErr .AddPrefix ("could not compute total resources: " ))
334
+ return status . UpdateScaleUpError (& status.ScaleUpStatus {}, aErr .AddPrefix ("could not compute total resources: " ))
335
335
}
336
336
337
337
for _ , ng := range nodeGroups {
@@ -397,7 +397,7 @@ func (o *ScaleUpOrchestrator) ScaleUpToNodeGroupMinSize(
397
397
klog .V (1 ).Infof ("ScaleUpToNodeGroupMinSize: final scale-up plan: %v" , scaleUpInfos )
398
398
aErr , failedNodeGroups := o .scaleUpExecutor .ExecuteScaleUps (scaleUpInfos , nodeInfos , now )
399
399
if aErr != nil {
400
- return scaleUpError (
400
+ return status . UpdateScaleUpError (
401
401
& status.ScaleUpStatus {
402
402
FailedResizeNodeGroups : failedNodeGroups ,
403
403
},
@@ -717,9 +717,3 @@ func GetPodsAwaitingEvaluation(egs []*equivalence.PodGroup, bestOption string) [
717
717
}
718
718
return awaitsEvaluation
719
719
}
720
-
721
- func scaleUpError (s * status.ScaleUpStatus , err errors.AutoscalerError ) (* status.ScaleUpStatus , errors.AutoscalerError ) {
722
- s .ScaleUpError = & err
723
- s .Result = status .ScaleUpError
724
- return s , err
725
- }
0 commit comments