Skip to content

Commit 8b7cc50

Browse files
authored
Merge pull request #878 from googs1025/add_allocatable_integration_test
add more integration test for allocatable plugin
2 parents b9e3420 + f770e9b commit 8b7cc50

File tree

1 file changed

+146
-117
lines changed

1 file changed

+146
-117
lines changed

test/integration/allocatable_test.go

Lines changed: 146 additions & 117 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@ import (
2323
"time"
2424

2525
v1 "k8s.io/api/core/v1"
26-
"k8s.io/apimachinery/pkg/api/resource"
2726
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
27+
"k8s.io/apimachinery/pkg/util/sets"
2828
"k8s.io/apimachinery/pkg/util/uuid"
2929
"k8s.io/apimachinery/pkg/util/wait"
3030
"k8s.io/client-go/kubernetes"
@@ -34,137 +34,166 @@ import (
3434
st "k8s.io/kubernetes/pkg/scheduler/testing"
3535
imageutils "k8s.io/kubernetes/test/utils/image"
3636

37+
schedconfig "sigs.k8s.io/scheduler-plugins/apis/config"
3738
"sigs.k8s.io/scheduler-plugins/pkg/noderesources"
3839
"sigs.k8s.io/scheduler-plugins/test/util"
3940
)
4041

4142
func TestAllocatablePlugin(t *testing.T) {
42-
testCtx := &testContext{}
43-
testCtx.Ctx, testCtx.CancelFn = context.WithCancel(context.Background())
4443

45-
cs := kubernetes.NewForConfigOrDie(globalKubeConfig)
46-
testCtx.ClientSet = cs
47-
testCtx.KubeConfig = globalKubeConfig
48-
49-
cfg, err := util.NewDefaultSchedulerComponentConfig()
50-
if err != nil {
51-
t.Fatal(err)
44+
smallNodeCapacity := map[v1.ResourceName]string{
45+
v1.ResourcePods: "32",
46+
v1.ResourceCPU: "500m",
47+
v1.ResourceMemory: "500",
5248
}
53-
// Work around https://github.com/kubernetes/kubernetes/issues/121630.
54-
cfg.Profiles[0].Plugins.PreScore = schedapi.PluginSet{
55-
Disabled: []schedapi.Plugin{{Name: "*"}},
49+
bigNodeCapacity := map[v1.ResourceName]string{
50+
v1.ResourcePods: "32",
51+
v1.ResourceCPU: "500m",
52+
v1.ResourceMemory: "5000",
5653
}
57-
cfg.Profiles[0].Plugins.Score = schedapi.PluginSet{
58-
Enabled: []schedapi.Plugin{{Name: noderesources.AllocatableName, Weight: 50000}},
59-
Disabled: []schedapi.Plugin{{Name: "*"}},
54+
smallPodReq := map[v1.ResourceName]string{
55+
v1.ResourceMemory: "100",
6056
}
61-
62-
ns := fmt.Sprintf("integration-test-%v", string(uuid.NewUUID()))
63-
createNamespace(t, testCtx, ns)
64-
65-
testCtx = initTestSchedulerWithOptions(
66-
t,
67-
testCtx,
68-
scheduler.WithProfiles(cfg.Profiles...),
69-
scheduler.WithFrameworkOutOfTreeRegistry(fwkruntime.Registry{noderesources.AllocatableName: noderesources.NewAllocatable}),
70-
)
71-
syncInformerFactory(testCtx)
72-
go testCtx.Scheduler.Run(testCtx.Ctx)
73-
defer cleanupTest(t, testCtx)
74-
75-
// Create nodes. First two are small nodes.
76-
bigNodeName := "fake-node-big"
77-
nodeNames := []string{"fake-node-small-1", "fake-node-small-2", bigNodeName}
78-
for _, nodeName := range nodeNames {
79-
var memory int64 = 200
80-
if nodeName == bigNodeName {
81-
memory = 5000
82-
}
83-
node := st.MakeNode().Name(nodeName).Label("node", nodeName).Obj()
84-
node.Status.Allocatable = v1.ResourceList{
85-
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
86-
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
87-
v1.ResourceMemory: *resource.NewQuantity(memory, resource.DecimalSI),
88-
}
89-
node.Status.Capacity = v1.ResourceList{
90-
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
91-
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
92-
v1.ResourceMemory: *resource.NewQuantity(memory, resource.DecimalSI),
93-
}
94-
node, err := cs.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{})
95-
if err != nil {
96-
t.Fatalf("Failed to create Node %q: %v", nodeName, err)
97-
}
57+
bigPodReq := map[v1.ResourceName]string{
58+
v1.ResourceMemory: "2000",
9859
}
9960

100-
// Create Pods.
101-
var pods []*v1.Pod
102-
podNames := []string{"small-1", "small-2", "small-3", "small-4"}
103-
pause := imageutils.GetPauseImageName()
104-
for i := 0; i < len(podNames); i++ {
105-
pod := st.MakePod().Namespace(ns).Name(podNames[i]).Container(pause).Obj()
106-
pod.Spec.Containers[0].Resources = v1.ResourceRequirements{
107-
Requests: v1.ResourceList{
108-
v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI),
61+
testCases := []struct {
62+
name string
63+
pods []*v1.Pod
64+
nodes []*v1.Node
65+
modeType schedconfig.ModeType
66+
expectedNodes map[string]sets.Set[string] // pod name to expected node name mapping
67+
}{
68+
{
69+
name: "least modeType the small pods should land on the small nodes and the big pod should land on the big node",
70+
pods: []*v1.Pod{
71+
st.MakePod().Name("small-1").Container(imageutils.GetPauseImageName()).Req(smallPodReq).Obj(),
72+
st.MakePod().Name("small-2").Container(imageutils.GetPauseImageName()).Req(smallPodReq).Obj(),
73+
st.MakePod().Name("small-3").Container(imageutils.GetPauseImageName()).Req(smallPodReq).Obj(),
74+
st.MakePod().Name("small-4").Container(imageutils.GetPauseImageName()).Req(smallPodReq).Obj(),
75+
st.MakePod().Name("big-1").Container(imageutils.GetPauseImageName()).Req(bigPodReq).Obj(),
10976
},
110-
}
111-
pods = append(pods, pod)
112-
}
113-
114-
// Make a big pod.
115-
podNames = append(podNames, "big-1")
116-
pod := st.MakePod().Namespace(ns).Name("big-1").Container(pause).Obj()
117-
pod.Spec.Containers[0].Resources = v1.ResourceRequirements{
118-
Requests: v1.ResourceList{
119-
v1.ResourceMemory: *resource.NewQuantity(5000, resource.DecimalSI),
77+
nodes: []*v1.Node{
78+
st.MakeNode().Name("fake-node-small-1").Label("node", "fake-node-small-1").Capacity(smallNodeCapacity).Obj(),
79+
st.MakeNode().Name("fake-node-small-2").Label("node", "fake-node-small-2").Capacity(smallNodeCapacity).Obj(),
80+
st.MakeNode().Name("fake-node-big").Label("node", "fake-node-big").Capacity(bigNodeCapacity).Obj(),
81+
},
82+
expectedNodes: map[string]sets.Set[string]{
83+
"small-1": sets.New("fake-node-small-1", "fake-node-small-2"),
84+
"small-2": sets.New("fake-node-small-1", "fake-node-small-2"),
85+
"small-3": sets.New("fake-node-small-1", "fake-node-small-2"),
86+
"small-4": sets.New("fake-node-small-1", "fake-node-small-2"),
87+
"big-1": sets.New("fake-node-big"),
88+
},
89+
modeType: schedconfig.Least,
90+
},
91+
{
92+
name: "most modeType the small pods should land on the big node and the big pod should also land on the big node",
93+
pods: []*v1.Pod{
94+
st.MakePod().Name("small-1").Container(imageutils.GetPauseImageName()).Req(smallPodReq).Obj(),
95+
st.MakePod().Name("small-2").Container(imageutils.GetPauseImageName()).Req(smallPodReq).Obj(),
96+
st.MakePod().Name("small-3").Container(imageutils.GetPauseImageName()).Req(smallPodReq).Obj(),
97+
st.MakePod().Name("small-4").Container(imageutils.GetPauseImageName()).Req(smallPodReq).Obj(),
98+
st.MakePod().Name("big-1").Container(imageutils.GetPauseImageName()).Req(bigPodReq).Obj(),
99+
},
100+
nodes: []*v1.Node{
101+
st.MakeNode().Name("fake-node-small-1").Label("node", "fake-node-small-1").Capacity(smallNodeCapacity).Obj(),
102+
st.MakeNode().Name("fake-node-small-2").Label("node", "fake-node-small-2").Capacity(smallNodeCapacity).Obj(),
103+
st.MakeNode().Name("fake-node-big").Label("node", "fake-node-big").Capacity(bigNodeCapacity).Obj(),
104+
},
105+
expectedNodes: map[string]sets.Set[string]{
106+
"small-1": sets.New("fake-node-big"),
107+
"small-2": sets.New("fake-node-big"),
108+
"small-3": sets.New("fake-node-big"),
109+
"small-4": sets.New("fake-node-big"),
110+
"big-1": sets.New("fake-node-big"),
111+
},
112+
modeType: schedconfig.Most,
120113
},
121114
}
122-
pods = append(pods, pod)
123-
124-
// Create the Pods. By default, the small pods should land on the small nodes.
125-
t.Logf("Start to create 5 Pods.")
126-
for i := range pods {
127-
t.Logf("Creating Pod %q", pods[i].Name)
128-
_, err := cs.CoreV1().Pods(ns).Create(testCtx.Ctx, pods[i], metav1.CreateOptions{})
129-
if err != nil {
130-
t.Fatalf("Failed to create Pod %q: %v", pods[i].Name, err)
131-
}
132-
}
133-
defer cleanupPods(t, testCtx, pods)
134115

135-
for i := range pods {
136-
// Wait for the pod to be scheduled.
137-
err := wait.PollUntilContextTimeout(testCtx.Ctx, 1*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) {
138-
return podScheduled(t, cs, pods[i].Namespace, pods[i].Name), nil
139-
})
140-
if err != nil {
141-
t.Fatalf("Waiting for pod %q to be scheduled, error: %v", pods[i].Name, err.Error())
142-
}
143-
144-
pod, err := cs.CoreV1().Pods(ns).Get(testCtx.Ctx, pods[i].Name, metav1.GetOptions{})
145-
if err != nil {
146-
t.Fatal(err)
147-
}
148-
149-
// The big pod should be scheduled on the big node.
150-
if pod.Name == "big-1" {
151-
if pod.Spec.NodeName == bigNodeName {
152-
t.Logf("Pod %q is on the big node as expected.", pod.Name)
153-
continue
154-
} else {
155-
t.Errorf("Pod %q is expected on node %q, but found on node %q",
156-
pod.Name, bigNodeName, pod.Spec.NodeName)
116+
for _, tc := range testCases {
117+
t.Run(tc.name, func(t *testing.T) {
118+
testCtx := &testContext{}
119+
testCtx.Ctx, testCtx.CancelFn = context.WithCancel(context.Background())
120+
121+
cs := kubernetes.NewForConfigOrDie(globalKubeConfig)
122+
testCtx.ClientSet = cs
123+
testCtx.KubeConfig = globalKubeConfig
124+
125+
cfg, err := util.NewDefaultSchedulerComponentConfig()
126+
if err != nil {
127+
t.Fatal(err)
128+
}
129+
cfg.Profiles[0].Plugins.PreScore = schedapi.PluginSet{Disabled: []schedapi.Plugin{{Name: "*"}}}
130+
cfg.Profiles[0].Plugins.Score = schedapi.PluginSet{
131+
Enabled: []schedapi.Plugin{{Name: noderesources.AllocatableName, Weight: 50000}},
132+
Disabled: []schedapi.Plugin{{Name: "*"}},
133+
}
134+
135+
cfg.Profiles[0].PluginConfig = append(cfg.Profiles[0].PluginConfig, schedapi.PluginConfig{
136+
Name: noderesources.AllocatableName,
137+
Args: &schedconfig.NodeResourcesAllocatableArgs{
138+
Mode: tc.modeType,
139+
Resources: []schedapi.ResourceSpec{
140+
{Name: string(v1.ResourceMemory), Weight: 10},
141+
},
142+
},
143+
})
144+
145+
ns := fmt.Sprintf("integration-test-%v", string(uuid.NewUUID()))
146+
createNamespace(t, testCtx, ns)
147+
148+
testCtx = initTestSchedulerWithOptions(
149+
t,
150+
testCtx,
151+
scheduler.WithProfiles(cfg.Profiles...),
152+
scheduler.WithFrameworkOutOfTreeRegistry(fwkruntime.Registry{noderesources.AllocatableName: noderesources.NewAllocatable}),
153+
)
154+
syncInformerFactory(testCtx)
155+
go testCtx.Scheduler.Run(testCtx.Ctx)
156+
defer cleanupTest(t, testCtx)
157+
158+
// Create nodes.
159+
for _, node := range tc.nodes {
160+
_, err := cs.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{})
161+
if err != nil {
162+
t.Fatalf("Failed to create Node %q: %v", node.Name, err)
163+
}
164+
}
165+
166+
// Create the Pods.
167+
for _, pod := range tc.pods {
168+
// set namespace to pods
169+
pod.SetNamespace(ns)
170+
_, err := cs.CoreV1().Pods(ns).Create(testCtx.Ctx, pod, metav1.CreateOptions{})
171+
if err != nil {
172+
t.Fatalf("Failed to create Pod %q: %v", pod.Name, err)
173+
}
157174
}
158-
}
159-
160-
// The other pods should be scheduled on the small nodes.
161-
if pod.Spec.NodeName == nodeNames[0] ||
162-
pod.Spec.NodeName == nodeNames[1] {
163-
t.Logf("Pod %q is on a small node as expected.", pod.Name)
164-
continue
165-
} else {
166-
t.Errorf("Pod %q is on node %q when it was expected on a small node",
167-
pod.Name, pod.Spec.NodeName)
168-
}
175+
defer cleanupPods(t, testCtx, tc.pods)
176+
177+
for _, pod := range tc.pods {
178+
err := wait.PollUntilContextTimeout(testCtx.Ctx, 1*time.Second, 60*time.Second, false, func(ctx context.Context) (bool, error) {
179+
return podScheduled(t, cs, pod.Namespace, pod.Name), nil
180+
})
181+
if err != nil {
182+
t.Fatalf("Waiting for pod %q to be scheduled, error: %v", pod.Name, err.Error())
183+
}
184+
185+
scheduledPod, err := cs.CoreV1().Pods(ns).Get(testCtx.Ctx, pod.Name, metav1.GetOptions{})
186+
if err != nil {
187+
t.Fatal(err)
188+
}
189+
190+
expectedNodes, exists := tc.expectedNodes[pod.Name]
191+
if !exists || !expectedNodes.Has(scheduledPod.Spec.NodeName) {
192+
t.Errorf("Pod %q is expected on node %q, but found on node %q", pod.Name, expectedNodes.UnsortedList(), scheduledPod.Spec.NodeName)
193+
} else {
194+
t.Logf("Pod %q is on node %q as expected.", pod.Name, scheduledPod.Spec.NodeName)
195+
}
196+
}
197+
})
169198
}
170199
}

0 commit comments

Comments
 (0)