1
1
pipeline {
2
- agent {
3
- dockerfile {
4
- label 'gpu'
5
- filename 'ci/devel.Dockerfile'
6
- args "-v /var/lib/jenkins/.ccache:/ccache -v /var/lib/jenkins/jobs/deepdetect-prebuilt-cache/branches/master/builds:/prebuilt -e CCACHE_DIR=/ccache --runtime nvidia"
7
- }
2
+ environment {
3
+ DOCKER_IMAGE = "ci-devel:${BRANCH_NAME}.${BUILD_ID}"
4
+ DOCKER_PARAMS = "-v ${WORKSPACE}:/app -v ${WORKSPACE}/build:/app/build -v /var/lib/jenkins/.ccache:/ccache --runtime nvidia"
8
5
}
6
+ agent { node { label 'gpu' } }
9
7
stages {
10
8
stage('Init') {
11
9
steps {
@@ -16,25 +14,47 @@ pipeline {
16
14
sh 'printenv | sort'
17
15
}
18
16
}
17
+ stage('Prepare docker image') {
18
+ steps {
19
+ script {
20
+ docker.build(env.DOCKER_IMAGE, "-f ci/devel.Dockerfile .")
21
+ }
22
+ // post can't access to ${env.XX} so we have to use stupid hack, thx Jenkins...
23
+ sh '''echo ${DOCKER_IMAGE} > docker-image-name'''
24
+ }
25
+ }
19
26
stage('Installing prebuilt cache') {
20
27
when {
21
28
expression { !env.CHANGE_ID || pullRequest.labels.findAll { it == "ci:full-build" }.size() == 0 }
22
29
}
23
30
steps {
24
31
script {
25
32
sh '''
26
- prebuilt_version=$(awk '/^lastSuccessfulBuild/{print $2}' /prebuilt/permalinks)
27
- rsync -a --progress /prebuilt/${prebuilt_version}/archive/build/ build/
33
+ prebuilt_version=$(awk '/^lastSuccessfulBuild/{print $2}' /var/lib/jenkins/jobs/deepdetect-prebuilt-cache/branches/master/builds/permalinks)
34
+ # Create a copy on write filesystem instead of copying data
35
+ mkdir -p build-upper build-work build
36
+ sudo mount -t overlay overlay -o lowerdir=/var/lib/jenkins/jobs/deepdetect-prebuilt-cache/branches/master/builds/${prebuilt_version}/archive/build,upperdir=$(pwd)/build-upper,workdir=$(pwd)/build-work $(pwd)/build
37
+
38
+ # prebuilt_version=$(awk '/^lastSuccessfulBuild/{print $2}' /prebuilt/permalinks)
39
+ # rsync -a --progress /prebuilt/${prebuilt_version}/archive/build/ build/
28
40
'''
29
41
}
30
42
}
31
43
}
32
- stage('Configure') {
44
+ stage('Configure && Build ') {
33
45
steps {
34
- sh '''
46
+ script {
47
+ docker.image(env.DOCKER_IMAGE).inside(env.DOCKER_PARAMS) {
48
+ sh '''
49
+
50
+ cd clients/python/
51
+ tox -e pep8,py36,py27
52
+ cd ../..
53
+
54
+ export CCACHE_DIR=/ccache
35
55
export PATH="/usr/lib/ccache/:$PATH"
36
56
export TMPDIR=$(pwd)/build/tmp
37
- mkdir -p build/tmp
57
+ mkdir build/tmp
38
58
cd build
39
59
cmake .. \
40
60
-DBUILD_TESTS=ON \
@@ -49,31 +69,14 @@ cmake .. \
49
69
-DUSE_TENSORRT=ON \
50
70
-DUSE_TENSORRT_OSS=ON \
51
71
-DCUDA_ARCH="-gencode arch=compute_61,code=sm_61"
52
- '''
53
- }
54
- }
55
- stage('Check codestyle') {
56
- steps {
57
- sh 'cd build && make clang-format-check'
58
- }
59
- }
60
72
61
- stage('Check python client') {
62
- steps {
63
- sh 'cd clients/python/ && tox -e pep8,py36,py27'
64
- }
65
- }
66
- stage('Build GPU') {
67
- steps {
68
- sh '''
69
- export PATH="/usr/lib/ccache/:$PATH"
70
- export TMPDIR=$(pwd)/build/tmp
71
- cd build
73
+ make clang-format-check
72
74
schedtool -B -n 1 -e ionice -n 1 make -j 6 protobuf spdlog caffe_dd pytorch Multicore-TSNE faisslib ncnn xgboost cpp-netlib tensorrt-oss oatpp oatpp-swagger oatpp-zlib
73
- # TODO(sileht): we should create the prebuilt artefacts here after each successful master branch build
74
75
schedtool -B -n 1 -e ionice -n 1 make -j 6
75
76
ccache -s
76
77
'''
78
+ }
79
+ }
77
80
}
78
81
}
79
82
stage('Tests GPU') {
@@ -82,16 +85,20 @@ ccache -s
82
85
}
83
86
steps {
84
87
lock(resource: null, label: "${NODE_NAME}-gpu", variable: 'LOCKED_GPU', quantity: 1) {
85
- sh '''
86
- export CUDA_VISIBLE_DEVICES=$(echo ${LOCKED_GPU} | sed -n -e "s/[^,]* GPU \([^[0-9,]]\)*/\1/gp")
87
- echo "****************************"
88
- echo
89
- python3 -c 'import torch, sys; c=torch.cuda.device_count() ; print(f"CUDA VISIBLE GPU: {c}"); sys.exit(bool(c == 0 ))'
90
- echo
91
- echo "****************************"
92
- cd build && ctest -V -E "multigpu"
93
- '''
88
+ script {
89
+ docker.image(env.DOCKER_IMAGE).inside(env.DOCKER_PARAMS) {
90
+ sh '''
91
+ export CUDA_VISIBLE_DEVICES=$(echo ${LOCKED_GPU} | sed -n -e "s/[^,]* GPU \\([^[0-9,]]\\)*/\\1/gp")
92
+ echo "****************************"
93
+ echo
94
+ python3 -c 'import torch, sys; c=torch.cuda.device_count() ; print(f"CUDA VISIBLE GPU: {c}"); sys.exit(bool(c == 0 ))'
95
+ echo
96
+ echo "****************************"
97
+ cd build && ctest -V -E "multigpu"
98
+ '''
99
+ }
94
100
}
101
+ }
95
102
}
96
103
}
97
104
stage('Tests multi-GPU') {
@@ -100,21 +107,26 @@ ccache -s
100
107
}
101
108
steps {
102
109
lock(resource: null, label: "${NODE_NAME}-gpu", variable: 'LOCKED_GPU', quantity: 2) {
103
- sh '''
104
- export CUDA_VISIBLE_DEVICES=$(echo ${LOCKED_GPU} | sed -n -e "s/[^,]* GPU \([^[0-9,]]\)*/\1/gp")
105
- echo "****************************"
106
- echo
107
- python3 -c 'import torch, sys; c=torch.cuda.device_count() ; print(f"CUDA VISIBLE GPU: {c}"); sys.exit(bool(c < 2))'
108
- echo
109
- echo "****************************"
110
- cd build && ctest -V -R "multigpu"
111
- '''
110
+ script {
111
+ docker.image(env.DOCKER_IMAGE).inside(env.DOCKER_PARAMS) {
112
+ sh '''
113
+ export CUDA_VISIBLE_DEVICES=$(echo ${LOCKED_GPU} | sed -n -e "s/[^,]* GPU \\([^[0-9,]]\\)*/\\1/gp")
114
+ echo "****************************"
115
+ echo
116
+ python3 -c 'import torch, sys; c=torch.cuda.device_count() ; print(f"CUDA VISIBLE GPU: {c}"); sys.exit(bool(c < 2))'
117
+ echo
118
+ echo "****************************"
119
+ cd build && ctest -V -R "multigpu"
120
+ '''
121
+ }
112
122
}
123
+ }
113
124
}
114
125
}
115
126
}
116
127
post {
117
128
always {
129
+ sh '''[ -f docker-image-name] && docker image rm $(cat docker-image-name); sudo umount $(pwd)/build; '''
118
130
cleanWs(cleanWhenAborted: true, cleanWhenFailure: true, cleanWhenNotBuilt: true, cleanWhenSuccess: true, cleanWhenUnstable: true, cleanupMatrixParent: true, deleteDirs: true)
119
131
}
120
132
success {
0 commit comments