forked from GoogleCloudPlatform/cluster-toolkit
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathhpc-cluster-localssd.yaml
116 lines (106 loc) · 3.43 KB
/
hpc-cluster-localssd.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
blueprint_name: hpc-cluster-localssd
vars:
project_id: ## Set GCP Project ID Here ##
deployment_name: hpc-localssd
region: us-central1
zone: us-central1-a
# Documentation for each of the modules used below can be found at
# https://github.com/GoogleCloudPlatform/hpc-toolkit/blob/main/modules/README.md
deployment_groups:
- group: primary
modules:
- id: network1
source: modules/network/pre-existing-vpc
- id: homefs
source: modules/file-system/filestore
use: [network1]
settings:
local_mount: /home
- id: compute_node_group
source: community/modules/compute/schedmd-slurm-gcp-v5-node-group
settings:
additional_disks:
- device_name: test-disk-1
disk_name:
disk_size_gb: 375
disk_type: local-ssd
disk_labels:
auto_delete: true
boot: false
- device_name: test-disk-2
disk_name:
disk_size_gb: 375
disk_type: local-ssd
disk_labels:
auto_delete: true
boot: false
bandwidth_tier: gvnic_enabled
machine_type: n1-standard-16
node_count_dynamic_max: 5
node_count_static: 0
- id: compute_partition
source: community/modules/compute/schedmd-slurm-gcp-v5-partition
use:
- network1
- homefs
- compute_node_group
settings:
is_default: true
partition_name: ssdcomp
region: us-central1
- id: slurm_controller
source: community/modules/scheduler/schedmd-slurm-gcp-v5-controller
use:
- network1
- homefs
- compute_partition
settings:
cloud_parameters:
resume_rate: 0
resume_timeout: 300
suspend_rate: 0
suspend_timeout: 300
no_comma_params: false
compute_startup_script: |
#!/bin/bash
export LOG_FILE=/tmp/custom_startup.log
export DST_MNT="/mount/localssd" # TODO: set this appropriately
if [ -d $DST_MNT ]; then
echo "DST_MNT already exists. Canceling." >> $LOG_FILE
exit 1
fi
sudo yum install mdadm -y
lsblk >> $LOG_FILE
export DEVICES=`lsblk -d -n -oNAME,RO | grep 'nvme.*0$' | awk {'print "/dev/" $1'}`
export NB_DEVICES=`lsblk -d -n -oNAME,RO | grep 'nvme.*0$' | wc | awk {'print $1'}`
sudo mdadm --create /dev/md0 --level=0 --raid-devices=$NB_DEVICES $DEVICES
sudo mkfs.ext4 -F /dev/md0
sudo mkdir -p $DST_MNT
sudo mount /dev/md0 $DST_MNT
sudo chmod a+w $DST_MNT
echo UUID=`sudo blkid -s UUID -o value /dev/md0` $DST_MNT ext4 discard,defaults,nofail 0 2 | sudo tee -a /etc/fstab
cat /etc/fstab >> $LOG_FILE
echo "DONE" >> $LOG_FILE
cat $LOG_FILE
machine_type: n1-standard-4
- id: slurm_login
source: community/modules/scheduler/schedmd-slurm-gcp-v5-login
use:
- network1
- slurm_controller
settings:
machine_type: n1-standard-4