Skip to content

Ship 0039/261 - add --scheduler-name flag #311

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/shp_build_create.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ shp build create <name> [flags]
--retention-succeeded-limit uint number of succeeded BuildRuns to be kept (default 65535)
--retention-ttl-after-failed duration duration to delete a failed BuildRun after completion
--retention-ttl-after-succeeded duration duration to delete a succeeded BuildRun after completion
--scheduler-name string specify the scheduler to be used to dispatch the Pod
--source-context-dir string use a inner directory as context directory
--source-git-clone-secret string name of the secret with credentials to access the git source, e.g. git credentials
--source-git-revision string git repository source revision
Expand Down
1 change: 1 addition & 0 deletions docs/shp_build_run.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ shp build run <name> [flags]
--retention-ttl-after-failed duration duration to delete the BuildRun after it failed
--retention-ttl-after-succeeded duration duration to delete the BuildRun after it succeeded
--sa-name string Kubernetes service-account name
--scheduler-name string specify the scheduler to be used to dispatch the Pod
--timeout duration build process timeout
```

Expand Down
1 change: 1 addition & 0 deletions docs/shp_build_upload.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ shp build upload <build-name> [path/to/source|.] [flags]
--retention-ttl-after-failed duration duration to delete the BuildRun after it failed
--retention-ttl-after-succeeded duration duration to delete the BuildRun after it succeeded
--sa-name string Kubernetes service-account name
--scheduler-name string specify the scheduler to be used to dispatch the Pod
--timeout duration build process timeout
```

Expand Down
1 change: 1 addition & 0 deletions docs/shp_buildrun_create.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ shp buildrun create <name> [flags]
--retention-ttl-after-failed duration duration to delete the BuildRun after it failed
--retention-ttl-after-succeeded duration duration to delete the BuildRun after it succeeded
--sa-name string Kubernetes service-account name
--scheduler-name string specify the scheduler to be used to dispatch the Pod
--timeout duration build process timeout
```

Expand Down
4 changes: 3 additions & 1 deletion pkg/shp/flags/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,8 @@ func BuildSpecFromFlags(flags *pflag.FlagSet) (*buildv1beta1.BuildSpec, *string,
TTLAfterFailed: &metav1.Duration{},
TTLAfterSucceeded: &metav1.Duration{},
},
NodeSelector: map[string]string{},
NodeSelector: map[string]string{},
SchedulerName: ptr.To(""),
}

sourceFlags(flags, spec.Source)
Expand All @@ -57,6 +58,7 @@ func BuildSpecFromFlags(flags *pflag.FlagSet) (*buildv1beta1.BuildSpec, *string,
imageAnnotationsFlags(flags, spec.Output.Annotations)
buildRetentionFlags(flags, spec.Retention)
buildNodeSelectorFlags(flags, spec.NodeSelector)
buildSchedulerNameFlag(flags, spec.SchedulerName)
var dockerfile, builderImage string
dockerfileFlags(flags, &dockerfile)
builderImageFlag(flags, &builderImage)
Expand Down
10 changes: 9 additions & 1 deletion pkg/shp/flags/build_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,8 @@ func TestBuildSpecFromFlags(t *testing.T) {
Duration: 30 * time.Minute,
},
},
NodeSelector: map[string]string{"kubernetes.io/hostname": "worker-1"},
NodeSelector: map[string]string{"kubernetes.io/hostname": "worker-1"},
SchedulerName: ptr.To("dolphinscheduler"),
}

cmd := &cobra.Command{}
Expand Down Expand Up @@ -123,6 +124,13 @@ func TestBuildSpecFromFlags(t *testing.T) {
g.Expect(expected.NodeSelector).To(o.Equal(spec.NodeSelector), ".spec.nodeSelector")
})

t.Run(".spec.schedulerName", func(_ *testing.T) {
err := flags.Set(SchedulerNameFlag, *expected.SchedulerName)
g.Expect(err).To(o.BeNil())

g.Expect(expected.SchedulerName).To(o.Equal(spec.SchedulerName), "spec.schedulerName")
})

t.Run(".spec.timeout", func(_ *testing.T) {
err := flags.Set(TimeoutFlag, expected.Timeout.Duration.String())
g.Expect(err).To(o.BeNil())
Expand Down
4 changes: 3 additions & 1 deletion pkg/shp/flags/buildrun.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@ func BuildRunSpecFromFlags(flags *pflag.FlagSet) *buildv1beta1.BuildRunSpec {
TTLAfterFailed: &metav1.Duration{},
TTLAfterSucceeded: &metav1.Duration{},
},
NodeSelector: map[string]string{},
NodeSelector: map[string]string{},
SchedulerName: ptr.To(""),
}

buildRefFlags(flags, &spec.Build)
Expand All @@ -41,6 +42,7 @@ func BuildRunSpecFromFlags(flags *pflag.FlagSet) *buildv1beta1.BuildRunSpec {
imageAnnotationsFlags(flags, spec.Output.Annotations)
buildRunRetentionFlags(flags, spec.Retention)
buildNodeSelectorFlags(flags, spec.NodeSelector)
buildSchedulerNameFlag(flags, spec.SchedulerName)
return spec
}

Expand Down
10 changes: 9 additions & 1 deletion pkg/shp/flags/buildrun_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ func TestBuildRunSpecFromFlags(t *testing.T) {
Duration: 30 * time.Minute,
},
},
NodeSelector: map[string]string{"kubernetes.io/hostname": "worker-1"},
NodeSelector: map[string]string{"kubernetes.io/hostname": "worker-1"},
SchedulerName: ptr.To("dolphinscheduler"),
}

cmd := &cobra.Command{}
Expand Down Expand Up @@ -83,6 +84,13 @@ func TestBuildRunSpecFromFlags(t *testing.T) {
g.Expect(expected.NodeSelector).To(o.Equal(spec.NodeSelector), ".spec.nodeSelector")
})

t.Run(".spec.schedulerName", func(_ *testing.T) {
err := flags.Set(SchedulerNameFlag, *expected.SchedulerName)
g.Expect(err).To(o.BeNil())

g.Expect(expected.SchedulerName).To(o.Equal(spec.SchedulerName), "spec.schedulerName")
})

t.Run(".spec.retention.ttlAfterFailed", func(_ *testing.T) {
err := flags.Set(RetentionTTLAfterFailedFlag, expected.Retention.TTLAfterFailed.Duration.String())
g.Expect(err).To(o.BeNil())
Expand Down
12 changes: 12 additions & 0 deletions pkg/shp/flags/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,8 @@ const (
RetentionTTLAfterSucceededFlag = "retention-ttl-after-succeeded"
// NodeSelectorFlag command-line flag.
NodeSelectorFlag = "node-selector"
// SchedulerNameFlag command-line flag.
SchedulerNameFlag = "scheduler-name"
)

// sourceFlags flags for ".spec.source"
Expand Down Expand Up @@ -266,6 +268,16 @@ func buildNodeSelectorFlags(flags *pflag.FlagSet, nodeSelectorLabels map[string]
flags.Var(NewMapValue(nodeSelectorLabels), NodeSelectorFlag, "set of key-value pairs that correspond to labels of a node to match")
}

// buildSchedulerNameFlag registers flags for adding BuildSpec.SchedulerName
func buildSchedulerNameFlag(flags *pflag.FlagSet, schedulerName *string) {
flags.StringVar(
schedulerName,
SchedulerNameFlag,
"",
"specify the scheduler to be used to dispatch the Pod",
)
}

// envFlags registers flags for adding corev1.EnvVars.
func envFlags(flags *pflag.FlagSet, envs *[]corev1.EnvVar) {
flags.VarP(
Expand Down
135 changes: 98 additions & 37 deletions test/e2e/node-selector.bats
Original file line number Diff line number Diff line change
Expand Up @@ -3,112 +3,173 @@
source test/e2e/helpers.sh

setup() {
load 'bats/support/load'
load 'bats/assert/load'
load 'bats/file/load'
load 'bats/support/load'
load 'bats/assert/load'
load 'bats/file/load'
}

teardown() {
run kubectl delete builds.shipwright.io --all
run kubectl delete buildruns.shipwright.io --all
run kubectl delete builds.shipwright.io --all
run kubectl delete buildruns.shipwright.io --all
}

scheduler_name="dolphinscheduler"

@test "shp build create --node-selector single label" {
# generate random names for our build
build_name=$(random_name)
build_name=$(random_name)

# create a Build with node selector
# create a Build with node selector
run shp build create ${build_name} --source-git-url=https://github.com/shipwright-io/sample-go --output-image=my-fake-image --node-selector="kubernetes.io/hostname=node-1"
assert_success

# ensure that the build was successfully created
assert_output --partial "Created build \"${build_name}\""
assert_output --partial "Created build \"${build_name}\""

# get the jsonpath of Build object .spec.nodeSelector
run kubectl get builds.shipwright.io/${build_name} -ojsonpath="{.spec.nodeSelector}"
assert_success
run kubectl get builds.shipwright.io/${build_name} -ojsonpath="{.spec.nodeSelector}"
assert_success

assert_output '{"kubernetes.io/hostname":"node-1"}'
}

@test "shp build create --node-selector multiple labels" {
# generate random names for our build
build_name=$(random_name)
build_name=$(random_name)

# create a Build with node selector
run shp build create ${build_name} --source-git-url=https://github.com/shipwright-io/sample-go --output-image=my-fake-image --node-selector="kubernetes.io/hostname=node-1" --node-selector="kubernetes.io/os=linux"
# create a Build with node selector
run shp build create ${build_name} --source-git-url=https://github.com/shipwright-io/sample-go --output-image=my-fake-image --node-selector="kubernetes.io/hostname=node-1" --node-selector="kubernetes.io/os=linux"
assert_success

# ensure that the build was successfully created
assert_output --partial "Created build \"${build_name}\""
assert_output --partial "Created build \"${build_name}\""

# get the jsonpath of Build object .spec.nodeSelector
run kubectl get builds.shipwright.io/${build_name} -ojsonpath="{.spec.nodeSelector}"
assert_success
run kubectl get builds.shipwright.io/${build_name} -ojsonpath="{.spec.nodeSelector}"
assert_success

assert_output --partial '"kubernetes.io/hostname":"node-1"'
assert_output --partial '"kubernetes.io/os":"linux"'
}

@test "shp build create --scheduler-name" {
# generate random names for our build
build_name=$(random_name)

# create a Build with node selector
run shp build create ${build_name} --source-git-url=https://github.com/shipwright-io/sample-go --output-image=my-fake-image --scheduler-name=${scheduler_name}
assert_success

# ensure that the build was successfully created
assert_output --partial "Created build \"${build_name}\""

# get the jsonpath of Build object .spec.nodeSelector
run kubectl get builds.shipwright.io/${build_name} -ojsonpath="{.spec.schedulerName}"
assert_success

assert_output "${scheduler_name}"
}

@test "shp buildrun create --node-selector single label" {
# generate random names for our buildrun
buildrun_name=$(random_name)
build_name=$(random_name)
buildrun_name=$(random_name)
build_name=$(random_name)

# create a Build with node selector
# create a Build with node selector
run shp buildrun create ${buildrun_name} --buildref-name=${build_name} --node-selector="kubernetes.io/hostname=node-1"
assert_success

# ensure that the build was successfully created
assert_output --partial "BuildRun created \"${buildrun_name}\" for Build \"${build_name}\""
assert_output --partial "BuildRun created \"${buildrun_name}\" for Build \"${build_name}\""

# get the jsonpath of Build object .spec.nodeSelector
run kubectl get buildruns.shipwright.io/${buildrun_name} -ojsonpath="{.spec.nodeSelector}"
assert_success
run kubectl get buildruns.shipwright.io/${buildrun_name} -ojsonpath="{.spec.nodeSelector}"
assert_success

assert_output '{"kubernetes.io/hostname":"node-1"}'
}

@test "shp buildrun create --node-selector multiple labels" {
# generate random names for our buildrun
buildrun_name=$(random_name)
build_name=$(random_name)
buildrun_name=$(random_name)
build_name=$(random_name)

# create a Build with node selector
run shp buildrun create ${buildrun_name} --buildref-name=${build_name} --node-selector="kubernetes.io/hostname=node-1" --node-selector="kubernetes.io/os=linux"
# create a Build with node selector
run shp buildrun create ${buildrun_name} --buildref-name=${build_name} --node-selector="kubernetes.io/hostname=node-1" --node-selector="kubernetes.io/os=linux"
assert_success

# ensure that the build was successfully created
assert_output --partial "BuildRun created \"${buildrun_name}\" for Build \"${build_name}\""
assert_output --partial "BuildRun created \"${buildrun_name}\" for Build \"${build_name}\""

# get the jsonpath of Build object .spec.nodeSelector
run kubectl get buildruns.shipwright.io/${buildrun_name} -ojsonpath="{.spec.nodeSelector}"
assert_success
run kubectl get buildruns.shipwright.io/${buildrun_name} -ojsonpath="{.spec.nodeSelector}"
assert_success

assert_output --partial '"kubernetes.io/hostname":"node-1"'
assert_output --partial '"kubernetes.io/os":"linux"'
}

@test "shp buildrun create --scheduler-name" {
# generate random names for our buildrun
buildrun_name=$(random_name)
build_name=$(random_name)

# create a Build with node selector
run shp buildrun create ${buildrun_name} --buildref-name=${build_name} --scheduler-name=${scheduler_name}
assert_success

# ensure that the build was successfully created
assert_output --partial "BuildRun created \"${buildrun_name}\" for Build \"${build_name}\""

# get the jsonpath of Build object .spec.nodeSelector
run kubectl get buildruns.shipwright.io/${buildrun_name} -ojsonpath="{.spec.schedulerName}"
assert_success

assert_output "${scheduler_name}"
}

@test "shp build run --node-selector set" {
# generate random names for our build
build_name=$(random_name)
build_name=$(random_name)

# create a Build with node selector
# create a Build with node selector
run shp build create ${build_name} --source-git-url=https://github.com/shipwright-io/sample-go --output-image=my-fake-image
assert_success

# ensure that the build was successfully created
assert_output --partial "Created build \"${build_name}\""
assert_output --partial "Created build \"${build_name}\""

# get the build object
run kubectl get builds.shipwright.io/${build_name}
assert_success
run kubectl get builds.shipwright.io/${build_name}
assert_success

run shp build run ${build_name} --node-selector="kubernetes.io/hostname=node-1"

# get the jsonpath of Build object .spec.nodeSelector
run kubectl get buildruns.shipwright.io -ojsonpath='{.items[*].spec.nodeSelector}'
assert_success
run kubectl get buildruns.shipwright.io -ojsonpath='{.items[*].spec.nodeSelector}'
assert_success
assert_output --partial '"kubernetes.io/hostname":"node-1"'
}
}

@test "shp build run --scheduler-name" {
# generate random names for our build
build_name=$(random_name)

# create a Build with node selector
run shp build create ${build_name} --source-git-url=https://github.com/shipwright-io/sample-go --output-image=my-fake-image
assert_success

# ensure that the build was successfully created
assert_output --partial "Created build \"${build_name}\""

# get the build object
run kubectl get builds.shipwright.io/${build_name}
assert_success

run shp build run ${build_name} --scheduler-name=${scheduler_name}

# get the jsonpath of BuildRun object .spec.schedulerName
run kubectl get buildruns.shipwright.io -l build.shipwright.io/name=${build_name} -ojsonpath='{.spec.schedulerName}'
assert_success
assert_output --partial "${scheduler_name}"
}
Loading