-
Notifications
You must be signed in to change notification settings - Fork 88
/
Copy pathterraform.tfvars.example
421 lines (334 loc) · 18.8 KB
/
terraform.tfvars.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
#################################
# ha-sap-terraform-deployments project configuration file
# Find all the available variables and definitions in the variables.tf file
#################################
# qemu uri, this example is to run locally
qemu_uri = "qemu:///system"
# Use already existing network
#network_name = "my-network"
# Use bridge device on hypervisor
#bridge_device = "br0"
# Due to some internal limitations, the iprange of the existing network must be defined
# The iprange must be defined for already existing network and to create a new one
iprange = "192.168.XXX.Y/24"
# libvirt storage pool, select the libvirt storage pool where the volume will stored
# storage_pool = "terraform"
# Base image configuration. This images will be used for all deployed machines unless the specific image is defined
# The source image has preference over the `volume_name` parameter
#source_image = "url-to-your-sles4sap-image"
# Use an already existing image. The image must be in the same storage pool defined in `storage_pool` parameter
# This option is way faster as the image must not be downloaded
#volume_name = "SLES4SAP-15_SP4"
#################################
# General configuration variables
#################################
# Deployment name. This variable is used to complement the name of multiple infrastructure resources adding the string as suffix
# If it is not used, the terraform workspace string is used
# The name must be unique among different deployments
# deployment_name = "mydeployment"
# Add the "deployment_name" as a prefix to the hostname.
#deployment_name_in_hostname = true
# SUSE Customer Center Registration parameters.
#reg_code = "<<REG_CODE>>"
#reg_email = "<<your email>>"
# For any sle12 version the additional module sle-module-adv-systems-management/12/x86_64 is mandatory if reg_code is provided
#reg_additional_modules = {
# "sle-module-adv-systems-management/12/x86_64" = ""
# "sle-module-containers/12/x86_64" = ""
# "sle-ha-geo/12.4/x86_64" = "<<REG_CODE>>"
#}
# Authorize additional keys optionally (in this case, the private key is not required)
# Path to local files or keys content
#authorized_keys = ["/home/myuser/.ssh/id_rsa_second_key.pub", "/home/myuser/.ssh/id_rsa_third_key.pub", "ssh-rsa AAAAB3NzaC1yc2EAAAA...."]
##########################
# Other deployment options
##########################
# Repository url used to install HA/SAP deployment packages
# It contains the salt formulas rpm packages and other dependencies.
#
## Specific Release - for latest release look at https://github.com/SUSE/ha-sap-terraform-deployments/releases
# To auto detect the SLE version
#ha_sap_deployment_repo = "https://download.opensuse.org/repositories/network:ha-clustering:sap-deployments:v9/"
# Otherwise use a specific SLE version:
#ha_sap_deployment_repo = "https://download.opensuse.org/repositories/network:ha-clustering:sap-deployments:v9/SLE_15_SP4/"
#
## Development Release (use if on `develop` branch)
# To auto detect the SLE version
#ha_sap_deployment_repo = "https://download.opensuse.org/repositories/network:ha-clustering:sap-deployments:devel/"
# Otherwise use a specific SLE version:
#ha_sap_deployment_repo = "https://download.opensuse.org/repositories/network:ha-clustering:sap-deployments:devel/SLE_15_SP4/"
# Provisioning log level (error by default)
#provisioning_log_level = "info"
# Print colored output of the provisioning execution (true by default)
#provisioning_output_colored = false
# Enable pre deployment steps (disabled by default)
#pre_deployment = true
# Enable post deployment steps (disabled by default)
# This e.g. deletes /etc/salt/grains after a successful deployment
#cleanup_secrets = true
# To disable the provisioning process
#provisioner = ""
# Run provisioner execution in background
#background = true
# Testing and QA purpose
# Define if the deployement is using for testing purpose
# Disable all extra packages that do not come from the image
# Except salt-minion (for the moment) and salt formulas
# true or false
#offline_mode = false
# Execute HANA Hardware Configuration Check Tool to bench filesystems
# true or false (default)
#hwcct = false
#########################
# HANA machines variables
#########################
# Hostname, without the domain part
#hana_name = "vmhana"
# Number of nodes in the cluster
# 2 nodes will always be scale-up
# 4+ nodes are needed for scale-out (also set hana_scale_out_enabled=true)
hana_count = "2"
#hana_node_vcpu = 4
#hana_node_memory = 32768
# Set specific image for HANA (it's the same for iscsi, monitoring, netweaver and drbd)
# This option has preference over base image options
# hana_source_image = "url-to-your-sles4sap-image"
# hana_volume_name = "SLES4SAP-15_SP0"
# enable to use HANA scale-out
#hana_scale_out_enabled = true
# HANA scale-out role assignments (optional, this can be defined automatically based on "hana_scale_out_standby_count")
# see https://help.sap.com/viewer/6b94445c94ae495c83a19646e7c3fd56/2.0.03/en-US/0d9fe701e2214e98ad4f8721f6558c34.html for reference
#hana_scale_out_addhosts = {
# site1 = "vmhana03:role=standby:group=default:workergroup=default,vmhana05:role=worker:group=default:workergroup=default"
# site2 = "vmhana04:role=standby:group=default:workergroup=default,vmhana06:role=worker:group=default:workergroup=default"
#}
# HANA scale-out roles
# These role assignments are made per HANA site
# Number of standby nodes per site (currently not supported on GCP)
#hana_scale_out_standby_count = 0 # default:0 - Deploy X standby nodes per site. The rest of the nodes will be worker nodes.
# The next variables define how the HANA installation software is obtained.
# 'hana_inst_master' is a NFS share where HANA installation files (extracted or not) are stored
# 'hana_inst_master' must be used always! It is used as the reference path to the other variables
# Local folder where HANA installation master will be mounted
#hana_inst_folder = "/sapmedia/HANA"
# There are multiple options:
# 1. Use an already extracted HANA Platform folder structure.
# The last numbered folder is the HANA Platform folder with the extracted files with
# something like `HDB:HANA:2.0:LINUX_X86_64:SAP HANA PLATFORM EDITION 2.0::XXXXXX` in the LABEL.ASC file
hana_inst_master = "url-to-your-nfs-share:/sapdata/sap_inst_media/51053381"
# 2. Combine the `hana_inst_master` with `hana_platform_folder` variable.
#hana_inst_master = "url-to-your-nfs-share:/sapdata/sap_inst_media"
# Specify the path to already extracted HANA platform installation media, relative to hana_inst_master mounting point.
# This will have preference over hana archive installation media
#hana_platform_folder = "51053381"
# 3. Specify the path to the HANA installation archive file in either of SAR, RAR, ZIP, EXE formats, relative to the 'hana_inst_master' mounting point
# For multipart RAR archives, provide the first part EXE file name.
#hana_archive_file = "51053381_part1.exe"
# 4. If using HANA SAR archive, provide the compatible version of sapcar executable to extract the SAR archive
#hana_archive_file = "IMDB_SERVER.SAR"
#hana_sapcar_exe = "SAPCAR"
# For option 3 and 4, HANA installation archives are extracted to the path specified
# at hana_extract_dir (optional, by default /sapmedia_extract/HANA). This folder cannot be the same as `hana_inst_folder`!
#hana_extract_dir = "/sapmedia_extract/HANA"
# The following SAP HANA Client variables are needed only when you are using a HANA database SAR archive for HANA installation or if you are installing >= HANA 2.0 SPS 06 (platform media format changed).
# HANA Client is used by monitoring & cost-optimized scenario and it is already included in HANA platform media unless a HANA database SAR archive is used.
# You can provide HANA Client in one of the two options below:
# 1. Path to already extracted hana client folder, relative to hana_inst_master mounting point
#hana_client_folder = "DATA_UNITS/HDB_CLIENT_LINUX_X86_64/SAP_HANA_CLIENT" # e.g. inside the HANA platform media
# 2. Or specify the path to the hana client SAR archive file, relative to the 'hana_inst_master'. To extract the SAR archive, you need to also provide compatible version of sapcar executable in variable hana_sapcar_exe
# It will be extracted to hana_client_extract_dir path (optional, by default /sapmedia_extract/HANA_CLIENT)
#hana_client_archive_file = "IMDB_CLIENT20_003_144-80002090.SAR"
#hana_client_extract_dir = "/sapmedia_extract/HANA_CLIENT"
# Enable system replication and HA cluster
#hana_ha_enabled = true
# This map allows to add any extra parameters to the HANA installation (inside the installation configfile).
# For more details about the parameters, have a look at the Parameter Reference, e.g.
# https://help.sap.com/docs/SAP_HANA_PLATFORM/2c1988d620e04368aa4103bf26f17727/c16432a77b6144dcb75aace2b4fcacff.html
# Some examples:
#hana_extra_parameters = {
# # needed for HANA 2.0 >= SPS07 as
# # the local secure store component (LSS) can only be installed in optimized installation mode.
# install_execution_mode = "optimized",
# # Disable minimal memory checks for HANA. Useful to deploy development clusters.
# # Low memory usage can cause a failed deployment. Be aware that this option does
# # not work with any memory size and will most likely fail with less than 16 GiB
# ignore = "check_min_mem"
#}
# Select SBD as fencing mechanism for the HANA cluster
#hana_cluster_fencing_mechanism = "sbd"
# More configuration about the HANA machines
# Set the IP addresses for the HANA machines. Leave this commented to get an autogenerated addresses
#hana_ips = ["192.168.XXX.Y", "192.168.XXX.Y+1"]
# IP address used to configure the hana cluster floating IP. It must belong to the same subnet than the hana machines
#hana_cluster_vip = "192.168.XXX.Y+3"
# Enable Active/Active HANA setup (read-only access in the secondary instance)
#hana_active_active = true
# HANA cluster secondary vip. This IP address is attached to the read-only secondary instance. Only needed if hana_active_active is set to true
#hana_cluster_vip_secondary = "192.168.XXX.Y+4"
# HANA instance configuration
# Find some references about the variables in:
# https://help.sap.com
# HANA instance system identifier. The system identifier must be composed by 3 uppercase chars/digits string starting always with a character (there are some restricted options).
#hana_sid = "PRD"
# HANA instance number. It's composed of 2 integers string
#hana_instance_number = "00"
# HANA instance master password (length 10-14, 1 digit, 1 lowercase, 1 uppercase). For detailed password rules see: doc/sap_passwords.md
#hana_master_password = "YourPassword1234"
# HANA primary site name. Only used if HANA's system replication feature is enabled (hana_ha_enabled to true)
#hana_primary_site = "Site1"
# HANA secondary site name. Only used if HANA's system replication feature is enabled (hana_ha_enabled to true)
#hana_secondary_site = "Site2"
# Cost optimized scenario
#scenario_type = "performance-optimized" # default
# or
#scenario_type = "cost-optimized"
#########################
# shared storage variables
# Needed if HANA is deployed in scale-out scenario
#########################
#hana_scale_out_shared_storage_type = "nfs" # only nfs supported at the moment (default: "")
# This defines the base mountpoint on the NFS server for /hana/* and its sub directories in scale-out scenarios.
# It can be e.g. on the DRBD cluster (like for NetWeaver) or any other NFS share.
#hana_scale_out_nfs = "192.168.XXX.Z:/srv/nfs/sapdata/PRD"
# local disk configuration - scale-up example
#hana_data_disks_configuration = {
# disks_size = "256,256,256,256,64,64,512"
# # The next variables are used during the provisioning
# luns = "0,1#2,3#4#5#6"
# names = "data#log#shared#usrsap#backup"
# lv_sizes = "100#100#100#100#100"
# paths = "/hana/data#/hana/log#/hana/shared#/usr/sap#/hana/backup"
#}
# local disk configuration - scale-out without standby nodes example
# on scale-out without standby nodes, we need shared storage for shared only and everything else on local disks
#hana_data_disks_configuration = {
# disks_size = "256,256,256,256,64,512"
# # The next variables are used during the provisioning
# luns = "0,1#2,3#4#5"
# names = "data#log#usrsap#backup"
# lv_sizes = "100#100#100#100"
# paths = "/hana/data#/hana/log#/usr/sap#/hana/backup"
#}
# local Disk configuration - scale-out with standby nodes example
# on scale-out with standby nodes, we need shared storage for data/log/backup/shared and fewer local disks
#hana_data_disks_configuration = {
# disks_type = "pd-ssd"
# disks_size = "64"
# # The next variables are used during the provisioning
# luns = "0"
# names = "usrsap"
# lv_sizes = "100"
# paths = "/usr/sap"
#}
# HANA HA/DR provider configuration
# See https://documentation.suse.com/sbp/all/single-html/SLES4SAP-hana-sr-guide-PerfOpt-15/#cha.s4s.hana-hook for details.
# The SAPHanaSR hook is always enabled.
# enable susTkOver hook (disabled by default)
# hana_ha_dr_sustkover_enabled = true
# enable susChkSrv hook (disabled by default)
# hana_ha_dr_suschksrv_enabled = true
# susChkSrv action on lost, see `man 7 susChkSrv.py` (Options: stop [default], fence)
# hana_ha_dr_suschksrv_action_on_lost = "fence"
#######################
# SBD related variables
#######################
# In order to enable the iscsi machine creation _fencing_mechanism must be set to 'sbd' for any of the clusters
# Hostname, without the domain part
#iscsi_name = "vmiscsi"
# Choose the sbd storage option. Options: iscsi, shared-disk
#sbd_storage_type = "iscsi"
#iscsi_srv_ip = "192.168.XXX.Y+6"
# Number of LUN (logical units) to serve with the iscsi server. Each LUN can be used as a unique sbd disk
#iscsi_lun_count = 3
# Disk size in Bytes used to create the LUNs and partitions to be served by the ISCSI service
#sbd_disk_size = 10737418240
##############################
# Monitoring related variables
##############################
# Enable the host to be monitored by exporters
#monitoring_enabled = true
# Hostname, without the domain part
#monitoring_name = "vmmonitoring"
# IP address of the machine where prometheus and grafana are running
#monitoring_srv_ip = "192.168.XXX.Y+7"
########################
# DRBD related variables
########################
# Enable the DRBD cluster for nfs
#drbd_enabled = true
# Hostname, without the domain part
#drbd_name = "vmdrbd"
# IP of DRBD cluster
#drbd_ips = ["192.168.XXX.Y+8", "192.168.XXX.Y+9"]
# NFS share mounting point and export. Warning: Since cloud images are using cloud-init, /mnt folder cannot be used as standard mounting point folder
# DRBD cluster will create the NFS export in /mnt_permanent/sapdata/{netweaver_sid} to be connected as {drbd_cluster_vip}:/{netweaver_sid} (e.g.: )192.168.1.20:/HA1
#drbd_nfs_mounting_point = "/mnt_permanent/sapdata"
# Select SBD as fencing mechanism for the DRBD cluster
#drbd_cluster_fencing_mechanism = "sbd"
#############################
# Netweaver or S/4HANA related variables
#############################
# Enable/disable Netweaver deployment
#netweaver_enabled = true
# Hostname, without the domain part
#netweaver_name = "vmnetweaver"
# Netweaver APP server count (PAS and AAS)
# Set to 0 to install the PAS instance in the same instance as the ASCS. This means only 1 machine is installed in the deployment (2 if HA capabilities are enabled)
# Set to 1 to only enable 1 PAS instance in an additional machine`
# Set to 2 or higher to deploy additional AAS instances in new machines
#netweaver_app_server_count = 2
# Enabling this option will create a ASCS/ERS HA available cluster together with a PAS and AAS application servers
# Set to false to only create a ASCS and PAS instances
#netweaver_ha_enabled = true
# Select SBD as fencing mechanism for the Netweaver cluster
#netweaver_cluster_fencing_mechanism = "sbd"
# Set the Netweaver product id. The 'HA' sufix means that the installation uses an ASCS/ERS cluster
# Below are the supported SAP Netweaver product ids if using SWPM version 1.0:
# - NW750.HDB.ABAP
# - NW750.HDB.ABAPHA
# - S4HANA1709.CORE.HDB.ABAP
# - S4HANA1709.CORE.HDB.ABAPHA
# Below are the supported SAP Netweaver product ids if using SWPM version 2.0:
# - S4HANA1809.CORE.HDB.ABAP
# - S4HANA1809.CORE.HDB.ABAPHA
# - S4HANA1909.CORE.HDB.ABAP
# - S4HANA1909.CORE.HDB.ABAPHA
# - S4HANA2020.CORE.HDB.ABAP
# - S4HANA2020.CORE.HDB.ABAPHA
# - S4HANA2021.CORE.HDB.ABAP
# - S4HANA2021.CORE.HDB.ABAPHA
# Example:
#netweaver_product_id = "NW750.HDB.ABAPHA"
# Preparing the Netweaver download basket. Check `doc/sap_software.md` for more information
# NFS share with Netweaver installation folders
#netweaver_inst_media = "url-to-your-nfs-share"
# This share must contain the next software (select the version you want to install of course)
# NFS share to store the Netweaver shared files. Only used if drbd_enabled is not set. For single machine deployments (ASCS and PAS in the same machine) set an empty string
#netweaver_nfs_share = "url-to-your-netweaver-sapmnt-nfs-share"
# Path where netweaver sapmnt data is stored.
#netweaver_sapmnt_path = "/sapmnt"
# Netweaver installation required folders
# SAP SWPM installation folder, relative to the netweaver_inst_media mounting point
#netweaver_swpm_folder = "your_swpm"
# Or specify the path to the sapcar executable & SWPM installer sar archive, relative to the netweaver_inst_media mounting point
# The sar archive will be extracted to path specified at netweaver_extract_dir under SWPM directory (optional, by default /sapmedia_extract/NW/SWPM)
#netweaver_sapcar_exe = "your_sapcar_exe_file_path"
#netweaver_swpm_sar = "your_swpm_sar_file_path"
# Folder where needed SAR executables (sapexe, sapdbexe) are stored, relative to the netweaver_inst_media mounting point
#netweaver_sapexe_folder = "download_basket"
# Additional media archives or folders (added in start_dir.cd), relative to the netweaver_inst_media mounting point
#netweaver_additional_dvds = ["dvd1", "dvd2"]
# IP addresses of the machines hosting Netweaver instances
#netweaver_ips = ["192.168.XXX.Y+2", "192.168.XXX.Y+3", "192.168.XXX.Y+4", "192.168.XXX.Y+5"]
#netweaver_virtual_ips = ["192.168.XXX.Y+6", "192.168.XXX.Y+7", "192.168.XXX.Y+8", "192.168.XXX.Y+9"]
# Netweaver installation configuration
# Netweaver system identifier. The system identifier must be composed by 3 uppercase chars/digits string starting always with a character (there are some restricted options)
#netweaver_sid = "HA1"
# Netweaver ASCS instance number. It's composed of 2 integers string
#netweaver_ascs_instance_number = "00"
# Netweaver ERS instance number. It's composed of 2 integers string
#netweaver_ers_instance_number = "10"
# Netweaver PAS instance number. If additional AAS machines are deployed, they get the next number starting from the PAS instance number. It's composed of 2 integers string
#netweaver_pas_instance_number = "01"
# NetWeaver or S/4HANA master password (length 10-14, ASCII prefered). For detailed password rules see: doc/sap_passwords.md
#netweaver_master_password = "SuSE1234"