diff --git a/.github/hana-netweaver-tf-only.tfvars b/.github/hana-netweaver-tf-only.tfvars new file mode 100644 index 000000000..59153aa65 --- /dev/null +++ b/.github/hana-netweaver-tf-only.tfvars @@ -0,0 +1,44 @@ +# the following 2 vars are aquired via ENV +# qemu_uri = +# source_image = + +hana_inst_media = "10.162.32.134:/sapdata/sap_inst_media/51053787" +iprange = "192.168.25.0/24" + +storage_pool = "terraform" + +# Enable pre deployment to automatically copy the pillar files and create cluster ssh keys +pre_deployment = true + +# For iscsi, it will deploy a new machine hosting an iscsi service +shared_storage_type = "iscsi" +ha_sap_deployment_repo = "https://download.opensuse.org/repositories/network:/ha-clustering:/sap-deployments:/devel" + +monitoring_enabled = true + +# don't use salt for this test +provisioner = "" + +# Netweaver variables + +# Enable/disable Netweaver deployment +netweaver_enabled = true + +# NFS share with netweaver installation folders +netweaver_inst_media = "10.162.32.134:/sapdata/sap_inst_media" +netweaver_swpm_folder = "SWPM_10_SP26_6" + +# Install NetWeaver +netweaver_sapexe_folder = "kernel_nw75_sar" +netweaver_additional_dvds = ["51050829_3", "51053787"] + + +# DRBD variables + +# Enable the DRBD cluster for nfs +drbd_enabled = true + +# IP of DRBD cluster +drbd_shared_storage_type = "iscsi" + +devel_mode = false diff --git a/.github/workflows/tf-validation.yml b/.github/workflows/tf-validation.yml new file mode 100644 index 000000000..26d2a0b4b --- /dev/null +++ b/.github/workflows/tf-validation.yml @@ -0,0 +1,19 @@ +# github-actions workflow +# this test will just run terraform without salt +name: e2e tests + +on: [pull_request] + +jobs: + terraform-sap-deployment: + runs-on: self-hosted + + steps: + - uses: actions/checkout@v2 + + - name: terraform apply + run: /tmp/terraform-apply.sh + + - name: terraform destroy + if: ${{ always() }} + run: /tmp/terraform-destroy.sh diff --git a/.gitignore b/.gitignore index 41dbb44ad..1b9f65479 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ **/.terraform **/terraform.tfstate* -**/terraform.tfvars +**/terraform*.tfvars azure/terraform/provision/node0_id_rsa azure/terraform/provision/node0_id_rsa.pub azure/terraform/provision/node1_id_rsa @@ -9,8 +9,10 @@ azure/terraform/provision/node1_id_rsa.pub salt/hana_node/files/sshkeys salt/hana_node/files/pillar/* salt/drbd_node/files/pillar/* +salt/netweaver_node/files/pillar/* !salt/hana_node/files/pillar/top.sls !salt/drbd_node/files/pillar/top.sls +!salt/netweaver_node/files/pillar/top.sls # Dev specific **/*.swp diff --git a/README.md b/README.md index f5b619040..e067881bf 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,26 @@ For fine tuning refer to variable specification. - [templates](doc/deployment-templates.md) -## Rationale +## Design + +This project is based in [terraform](https://www.terraform.io/) and [salt](https://www.saltstack.com/) usage. + +Components: + +- **terraform**: Terraform is used to create the required infrastructure in the specified provider. The code is divided in different terraform modules to make the code modular and more maintanable. +- **salt**: Salt configures all the created machines by terraform based in the provided pillar files that give the option to customize the deployment. + +## Components + +The project can deploy and configure the next components (they can be enabled/disabled through configuration options): + +- SAP HANA environment: The HANA deployment is configurable. It might be deployed as a single HANA database, a dual configuration with system replication, and a HA cluster can be set in top of that. +- ISCSI server: The ISCSI server provides a network based storage mostly used by sbd fencing mechanism. +- Monitoring services server: The monitoring solution is based in [prometheus](https://prometheus.io) and [grafana](https://grafana.com/) and it provides informative and customizable dashboards to the users and administrators. +- DRBD cluster: The DRBD cluster is used to mount a HA NFS server in top of it to mount NETWEAVER shared files. +- SAP NETWEAVER environment: A SAP NETWEAVER environment with ASCS, ERS, PAS and AAS instances can be deployed using HANA database as storage. + +## Project structure This project is organized in folders containing the Terraform configuration files per Public or Private Cloud providers, each also containing documentation relevant to the use of the configuration files and to the cloud provider itself. @@ -44,3 +63,8 @@ These are links to find certified systems for each provider: - [SAP Certified IaaS Platforms for GCP](https://www.sap.com/dmc/exp/2014-09-02-hana-hardware/enEN/iaas.html#categories=Google%20Cloud%20Platform) - [SAP Certified IaaS Platforms for Azure](https://www.sap.com/dmc/exp/2014-09-02-hana-hardware/enEN/iaas.html#categories=Microsoft%20Azure) (Be carreful with Azure, **clustering** means scale-out scenario) + + +## Troubleshooting + +In the case you have some issue, take a look at the [troubleshooting guide](doc/troubleshooting.md) diff --git a/aws/README.md b/aws/README.md index b6179630c..45af54017 100644 --- a/aws/README.md +++ b/aws/README.md @@ -106,8 +106,8 @@ Here how it should look like your user or group: ``` terraform init -terraform workspace new my-execution # optional -terraform workspace select my-execution # optional +terraform workspace new myexecution # optional +terraform workspace select myexecution # optional terraform plan terraform apply ``` @@ -134,79 +134,23 @@ The infrastructure deployed includes: By default it creates 3 instances in AWS: one for support services (mainly iSCSI as most other services - DHCP, NTP, etc - are provided by Amazon) and 2 cluster nodes, but this can be changed to deploy more cluster nodes as needed. -## Provisioning by Salt -By default, the cluster and HANA installation is done using Salt Formulas in foreground. -To customize this provisioning, you have to create the pillar files (cluster.sls and hana.sls) according to the examples in the [pillar_examples](../pillar_examples) folder (more information in the dedicated [README](../pillar_examples/README.md)) +# Specifications -# Specification: +In order to deploy the environment, different configurations are available through the terraform variables. These variables can be configured using a `terraform.tfvars` file. An example is available in [terraform.tfvars.example](./terraform.tvars.example). To find all the available variables check the [variables.tf](./variables.tf) file. -These are the relevant files and what each provides: +## QA deployment -- [provider.tf](provider.tf): definition of the providers being used in the terraform configuration. Mainly `aws` and `template`. +The project has been created in order to provide the option to run the deployment in a `Test` or `QA` mode. This mode only enables the packages coming properly from SLE channels, so no other packages will be used. Find more information [here](../doc/qa.md). -- [variables.tf](variables.tf): definition of variables used in the configuration. These include definition of the AMIs in use, number and type of instances, AWS region, etc. +## Pillar files configuration -- [keys.tf](keys.tf): definition of key to include in the instances to allow connection via SSH. +Besides the `terraform.tfvars` file usage to configure the deployment, a more advanced configuration is available through pillar files customization. Find more information [here](../pillar_examples/README.md). -- [network.tf](network.tf): definition of network resources (VPC, route table, Internet Gateway and security group) used by the infrastructure. +## Use already existing network resources -- [instances.tf](instances.tf): definition of the EC2 instances to create on deployment. +The usage of already existing network resources (vpc and security groups) can be done configuring the `terraform.tfvars` file and adjusting some variables. The example of how to use them is available at [terraform.tfvars.example](terraform.tfvars.example). -- [salt_provisioner.tf](salt_provisioner.tf): salt provisioning resources. - -- [salt_provisioner_script.tpl](../salt/salt_provisioner_script.tpl): template code for the initialization script for the servers. This will add the salt-minion if needed and execute the SALT deployment. - -- [outputs.tf](outputs.tf): definition of outputs of the terraform configuration. - -- [remote-state.sample](remote-state.sample): sample file for the definition of the backend to [store the Terraform state file remotely](create_remote_state). - -- [terraform.tfvars.example](terraform.tfvars.example): file containing initialization values for variables used throughout the configuration. **Rename/Duplicate this file to terraform.tfvars and edit the content with your values before use**. - -#### Variables - -In [terraform.tfvars](terraform.tfvars.example) there are a number of variables that control what is deployed. Some of these variables are: - -* **instancetype**: instance type to use for the cluster nodes; basically the "size" (number of vCPUS and memory) of the instance. Defaults to `t2.micro`. -* **hana_data_disk_type**: disk type to use for HANA (gp2 by default). -* **ninstances**: number of cluster nodes to deploy. Defaults to 2. -* **aws_region**: AWS region where to deploy the configuration. -* **public_key_location**: local path to the public SSH key associated with the private key file. This public key is configured in the file $HOME/.ssh/authorized_keys of the administration user in the remote virtual machines. -* **private_key_location**: local path to the private SSH key associated to the public key from the previous line. -* **aws_access_key_id**: AWS access key id. -* **aws_secret_access_key**: AWS secret access key. -* **aws_credentials**: path to the `aws-cli` credentials file. This is required to configure `aws-cli` in the instances so that they can access the S3 bucket containing the HANA installation master. -* **name**: hostname for the hana node without the domain part. -* **init_type**: initialization script parameter that controls what is deployed in the cluster nodes. Valid values are `all` (installs HANA and configures cluster), `skip-hana` (does not install HANA, but configures cluster) and `skip-cluster` (installs HANA, but does not configure cluster). Defaults to `all`. -* **hana_inst_master**: path to the `S3 Bucket` containing the HANA installation master. -* **hana_inst_folder**: path where HANA installation master will be downloaded from `S3 Bucket`. -* **hana_disk_device**: device used by node where HANA will be installed. -* **hana_fstype**: filesystem type used for HANA installation (xfs by default). -* **iscsidev**: device used by the iscsi server. -* **iscsi_disks**: attached partitions number for iscsi server. -* **cluster_ssh_pub**: SSH public key name (must match with the key copied in sshkeys folder) -* **cluster_ssh_key**: SSH private key name (must match with the key copied in sshkeys folder) -* **ha_sap_deployment_repo**: Repository with HA and Salt formula packages. The latest RPM packages can be found at [https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/{YOUR OS VERSION}](https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/) -* **scenario_type**: SAP HANA scenario type. Available options: `performance-optimized` and `cost-optimized`. -* **provisioner**: select the desired provisioner to configure the nodes. Salt is used by default: [salt](../salt). Let it empty to disable the provisioning part. -* **background**: run the provisioning process in background finishing terraform execution. -* **reg_code**: registration code for the installed base product (Ex.: SLES for SAP). This parameter is optional. If informed, the system will be registered against the SUSE Customer Center. -* **reg_email**: email to be associated with the system registration. This parameter is optional. -* **reg_additional_modules**: additional optional modules and extensions to be registered (Ex.: Containers Module, HA module, Live Patching, etc). The variable is a key-value map, where the key is the _module name_ and the value is the _registration code_. If the _registration code_ is not needed, set an empty string as value. The module format must follow SUSEConnect convention: - - `//` - - *Example:* Suggested modules for SLES for SAP 15 - - sle-module-basesystem/15/x86_64 - sle-module-desktop-applications/15/x86_64 - sle-module-server-applications/15/x86_64 - sle-ha/15/x86_64 (use the same regcode as SLES for SAP) - sle-module-sap-applications/15/x86_64 - - For more information about registration, check the ["Registering SUSE Linux Enterprise and Managing Modules/Extensions"](https://www.suse.com/documentation/sles-15/book_sle_deployment/data/cha_register_sle.html) guide. - -* **additional_packages**: Additional packages to add to the guest machines. -* **hosts_ips**: Each cluster nodes IP address (sequential order). Mandatory to have a generic `/etc/hosts` file. - -[Specific QA variables](../doc/qa.md#specific-qa-variables) +**Important: In order to use the deployment with an already existing vpc, it must have an internet gateway attached.** ### Relevant Details @@ -220,8 +164,6 @@ There are some fixed values used throughout the terraform configuration: - The cluster nodes have a second disk volume that is being used for Hana installation. # Advanced Usage - - # notes: **Important**: If you want to use remote terraform states, first follow the [procedure to create a remote terraform state](create_remote_state). @@ -235,11 +177,21 @@ If the use of a private/custom image is required (for example, to perform the Bu To define the custom AMI in terraform, you should use the [terraform.tfvars](terraform.tfvars.example) file: ``` - # Custom AMI for nodes -sles4sap = { - "eu-central-1" = "ami-xxxxxxxxxxxxxxxxx" -} +hana_os_image = "ami-xxxxxxxxxxxxxxxxx" +``` + +You could also use an image available in the AWS store, in human readable form: + ``` +hana_os_image = "suse-sles-sap-15-sp1-byos" +``` + +An image owner can also be specified: +``` +hana_os_owner = "amazon" +``` + +OS for each module can be configured independently. After an `apply` command, terraform will deploy the insfrastructure to the cloud and ouput the public IP addresses and names of the iSCSI server and the cluster nodes. Connect using `ssh` as the user `ec2-user`, for example: @@ -267,15 +219,15 @@ terraform apply -var aws_region=eu-central-1 -var instancetype=m4.large Will deploy 2 `m4.large` instances in Frankfurt, instead of the `m4.2xlarge` default ones. The iSCSI server is always deployed with the `t2.micro` type instance. -Finally, the number of cluster nodes can be modified with the option `-var ninstances`. For example: +Finally, the number of cluster nodes can be modified with the option `-var hana_count`. For example: ``` -terraform apply -var aws_region=eu-central-1 -var ninstances=4 +terraform apply -var aws_region=eu-central-1 -var hana_count=4 ``` Will deploy in Frankfurt 1 `t2.micro` instance as an iSCSI server, and 4 `m4.2xlarge` instances as cluster nodes. -All this means that basically the default command `terraform apply` and be also written as `terraform apply -var instancetype=m4.2xlarge -var ninstances=2`. +All this means that basically the default command `terraform apply` and be also written as `terraform apply -var instancetype=m4.2xlarge -var hana_count=2`. @@ -552,8 +504,8 @@ Examples of the JSON files used in this document have been added to this repo. ## Logs -This configuration is leaving logs in /tmp folder in each of the instances. Connect as `ssh ec2-user@`, then do a `sudo su -` and check the following files: +This configuration is leaving logs in `/var/log` folder in each of the instances. Connect as `ssh ec2-user@`, then do a `sudo su -` and check the following files: -* **/tmp/provisioning.log**: This is the global log file, inside it you will find the logs for user_data, salt-deployment and salt-formula. -* **/tmp/salt-deployment.log**: Check here the debug log for the salt-deployment if you need to troubleshoot something. -* **/tmp/salt-formula.log**: Same as above but for salt-formula. +* **/var/log/provisioning.log**: This is the global log file, inside it you will find the logs for user_data, salt-predeployment and salt-deployment +* **/var/log/salt-predeployment.log**: Check here the debug log for the salt pre-deployment execution if you need to troubleshoot something. +* **/var/log/salt-deployment.log**: Same as above but for the final SAP/HA/DRBD deployments salt execution logs. diff --git a/aws/infrastructure.tf b/aws/infrastructure.tf new file mode 100644 index 000000000..75d988a7f --- /dev/null +++ b/aws/infrastructure.tf @@ -0,0 +1,255 @@ +# Configure the AWS Provider +provider "aws" { + version = "~> 2.7" + region = var.aws_region +} + +data "aws_vpc" "current-vpc" { + count = var.vpc_id != "" ? 1 : 0 + id = var.vpc_id +} + +data "aws_internet_gateway" "current-gateway" { + count = var.vpc_id != "" ? 1 : 0 + filter { + name = "attachment.vpc-id" + values = [var.vpc_id] + } +} + +locals { + vpc_id = var.vpc_id == "" ? aws_vpc.vpc.0.id : var.vpc_id + internet_gateway = var.vpc_id == "" ? aws_internet_gateway.igw.0.id : data.aws_internet_gateway.current-gateway.0.internet_gateway_id + security_group_id = var.security_group_id != "" ? var.security_group_id : aws_security_group.secgroup.0.id + vpc_address_range = var.vpc_id == "" ? var.vpc_address_range : (var.vpc_address_range == "" ? data.aws_vpc.current-vpc.0.cidr_block : var.vpc_address_range) + + infra_subnet_address_range = var.infra_subnet_address_range != "" ? var.infra_subnet_address_range : cidrsubnet(local.vpc_address_range, 8, 0) + + hana_subnet_address_range = length(var.hana_subnet_address_range) != 0 ? var.hana_subnet_address_range : [ + for index in range(var.hana_count) : cidrsubnet(local.vpc_address_range, 8, index + 1)] + + # The 2 is hardcoded because we create 2 subnets for NW always + netweaver_subnet_address_range = length(var.netweaver_subnet_address_range) != 0 ? var.netweaver_subnet_address_range : [ + for index in range(2) : cidrsubnet(local.vpc_address_range, 8, index + var.hana_count + 1)] + + # The 2 is hardcoded considering we create 2 subnets for NW always + drbd_subnet_address_range = length(var.drbd_subnet_address_range) != 0 ? var.drbd_subnet_address_range : [ + for index in range(2) : cidrsubnet(local.vpc_address_range, 8, index + var.hana_count + 2 + 1)] +} + +# EFS storage for nfs share used by Netweaver for /usr/sap/{sid} and /sapmnt +# It will be created for netweaver only when drbd is disabled +resource "aws_efs_file_system" "netweaver-efs" { + count = var.netweaver_enabled == true && var.drbd_enabled == false ? 1 : 0 + creation_token = "${terraform.workspace}-netweaver-efs" + performance_mode = var.netweaver_efs_performance_mode + + tags = { + Name = "${terraform.workspace}-efs" + } +} + +# AWS key pair +resource "aws_key_pair" "key-pair" { + key_name = "${terraform.workspace} - terraform" + public_key = file(var.public_key_location) +} + +# AWS availability zones +data "aws_availability_zones" "available" { + state = "available" +} + +# Network resources: VPC, Internet Gateways, Security Groups for the EC2 instances and for the EFS file system +resource "aws_vpc" "vpc" { + count = var.vpc_id == "" ? 1 : 0 + cidr_block = local.vpc_address_range + enable_dns_hostnames = true + enable_dns_support = true + + tags = { + Name = "${terraform.workspace}-vpc" + Workspace = terraform.workspace + } +} + +resource "aws_internet_gateway" "igw" { + count = var.vpc_id == "" ? 1 : 0 + vpc_id = local.vpc_id + + tags = { + Name = "${terraform.workspace}-igw" + Workspace = terraform.workspace + } +} + +resource "aws_subnet" "infra-subnet" { + vpc_id = local.vpc_id + cidr_block = local.infra_subnet_address_range + availability_zone = element(data.aws_availability_zones.available.names, 0) + + tags = { + Name = "${terraform.workspace}-infra-subnet" + Workspace = terraform.workspace + } +} + +resource "aws_route_table" "route-table" { + vpc_id = local.vpc_id + + tags = { + Name = "${terraform.workspace}-hana-route-table" + Workspace = terraform.workspace + } +} + +resource "aws_route_table_association" "infra-subnet-route-association" { + subnet_id = aws_subnet.infra-subnet.id + route_table_id = aws_route_table.route-table.id +} + +resource "aws_route" "public" { + route_table_id = aws_route_table.route-table.id + destination_cidr_block = "0.0.0.0/0" + gateway_id = local.internet_gateway +} + +locals { + create_security_group = var.security_group_id == "" ? 1 : 0 + create_security_group_monitoring = var.security_group_id == "" && var.monitoring_enabled == true ? 1 : 0 +} + +resource "aws_security_group" "secgroup" { + count = local.create_security_group + name = "${terraform.workspace}-sg" + vpc_id = local.vpc_id + + tags = { + Name = "${terraform.workspace}-sg" + Workspace = terraform.workspace + } +} + +resource "aws_security_group_rule" "outall" { + count = local.create_security_group + type = "egress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = local.security_group_id +} + +resource "aws_security_group_rule" "local" { + count = local.create_security_group + type = "ingress" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = [local.vpc_address_range] + + security_group_id = local.security_group_id +} + +resource "aws_security_group_rule" "http" { + count = local.create_security_group + type = "ingress" + from_port = 80 + to_port = 80 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = local.security_group_id +} + +resource "aws_security_group_rule" "https" { + count = local.create_security_group + type = "ingress" + from_port = 443 + to_port = 443 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = local.security_group_id +} + +resource "aws_security_group_rule" "hawk" { + count = local.create_security_group + type = "ingress" + from_port = 7630 + to_port = 7630 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = local.security_group_id +} + +resource "aws_security_group_rule" "ssh" { + count = local.create_security_group + type = "ingress" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = local.security_group_id +} + + +# Monitoring rules +resource "aws_security_group_rule" "hanadb_exporter" { + count = local.create_security_group_monitoring + type = "ingress" + from_port = 9668 + to_port = 9668 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = local.security_group_id +} + + +resource "aws_security_group_rule" "node_exporter" { + count = local.create_security_group_monitoring + type = "ingress" + from_port = 9100 + to_port = 9100 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = local.security_group_id +} + +resource "aws_security_group_rule" "ha_exporter" { + count = local.create_security_group_monitoring + type = "ingress" + from_port = 9664 + to_port = 9664 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = local.security_group_id +} + +resource "aws_security_group_rule" "prometheus_server" { + count = local.create_security_group_monitoring + type = "ingress" + from_port = 9090 + to_port = 9090 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = local.security_group_id +} + +resource "aws_security_group_rule" "grafana_server" { + count = local.create_security_group_monitoring + type = "ingress" + from_port = 3000 + to_port = 3000 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + + security_group_id = local.security_group_id +} diff --git a/aws/instances.tf b/aws/instances.tf deleted file mode 100644 index ebc349de6..000000000 --- a/aws/instances.tf +++ /dev/null @@ -1,115 +0,0 @@ -# Launch SLES-HAE of SLES4SAP cluster nodes - -data "aws_availability_zones" "available" { - state = "available" -} - -# EC2 Instances - -resource "aws_instance" "iscsisrv" { - ami = var.iscsi_srv[var.aws_region] - instance_type = "t2.micro" - key_name = aws_key_pair.hana-key-pair.key_name - associate_public_ip_address = true - subnet_id = element(aws_subnet.hana-subnet.*.id, 0) - private_ip = "10.0.0.254" - security_groups = [aws_security_group.secgroup.id] - availability_zone = element(data.aws_availability_zones.available.names, 0) - - root_block_device { - volume_type = "gp2" - volume_size = "20" - } - - ebs_block_device { - volume_type = "gp2" - volume_size = "10" - device_name = "/dev/xvdd" - } - - volume_tags = { - Name = "${terraform.workspace}-iscsi" - } - - tags = { - Name = "${terraform.workspace} - iSCSI Server" - Workspace = terraform.workspace - } -} - -module "sap_cluster_policies" { - enabled = var.ninstances > 0 ? true : false - source = "./modules/sap_cluster_policies" - name = var.name - aws_region = var.aws_region - cluster_instances = aws_instance.clusternodes.*.id - route_table_id = aws_route_table.route-table.id -} - -resource "aws_instance" "clusternodes" { - count = var.ninstances - ami = var.sles4sap[var.aws_region] - instance_type = var.instancetype - key_name = aws_key_pair.hana-key-pair.key_name - associate_public_ip_address = true - subnet_id = element(aws_subnet.hana-subnet.*.id, count.index) - private_ip = element(var.host_ips, count.index) - security_groups = [aws_security_group.secgroup.id] - availability_zone = element(data.aws_availability_zones.available.names, count.index) - source_dest_check = false - iam_instance_profile = module.sap_cluster_policies.cluster_profile_name[0] - - root_block_device { - volume_type = "gp2" - volume_size = "60" - } - - ebs_block_device { - volume_type = var.hana_data_disk_type - volume_size = "60" - device_name = "/dev/xvdd" - } - - volume_tags = { - Name = "${terraform.workspace}-${var.name}${var.ninstances > 1 ? "0${count.index + 1}" : ""}" - } - - tags = { - Name = "${terraform.workspace} - ${var.name}${var.ninstances > 1 ? "0${count.index + 1}" : ""}" - Workspace = terraform.workspace - Cluster = "${terraform.workspace}-${var.name}${var.ninstances > 1 ? "0${count.index + 1}" : ""}" - } -} - - -resource "aws_instance" "monitoring" { - count = var.monitoring_enabled == true ? 1 : 0 - ami = var.sles4sap[var.aws_region] - instance_type = "t2.micro" - key_name = aws_key_pair.hana-key-pair.key_name - associate_public_ip_address = true - subnet_id = element(aws_subnet.hana-subnet.*.id, 0) - private_ip = var.monitoring_srv_ip - security_groups = [aws_security_group.secgroup.id] - availability_zone = element(data.aws_availability_zones.available.names, 0) - - root_block_device { - volume_type = "gp2" - volume_size = "20" - } - - ebs_block_device { - volume_type = "gp2" - volume_size = "10" - device_name = "/dev/xvdd" - } - - volume_tags = { - Name = "${terraform.workspace}-monitoring" - } - - tags = { - Name = "${terraform.workspace} - Monitoring" - Workspace = terraform.workspace - } -} diff --git a/aws/keys.tf b/aws/keys.tf deleted file mode 100644 index 9c00f35de..000000000 --- a/aws/keys.tf +++ /dev/null @@ -1,4 +0,0 @@ -resource "aws_key_pair" "hana-key-pair" { - key_name = "${terraform.workspace} - terraform" - public_key = file(var.public_key_location) -} diff --git a/aws/main.tf b/aws/main.tf index b321c9fd9..d32f1bd4f 100644 --- a/aws/main.tf +++ b/aws/main.tf @@ -1,35 +1,244 @@ -module "netweaver_node" { - source = "./modules/netweaver_node" - netweaver_count = var.netweaver_enabled == true ? 4 : 0 - instancetype = var.netweaver_instancetype - name = "netweaver" +module "local_execution" { + source = "../generic_modules/local_exec" + enabled = var.pre_deployment +} + +# This locals entry is used to store the IP addresses of all the machines. +# Autogenerated addresses example based in 10.0.0.0/16 +# Iscsi server: 10.0.0.4 +# Monitoring: 10.0.0.5 +# Hana ips: 10.0.1.10, 10.0.2.11 (hana machines must be in different subnets) +# Hana cluster vip: 192.168.1.10 (virtual ip address must be in a different range than the vpc) +# Netweaver ips: 10.0.3.30, 10.0.4.31, 10.0.3.32, 10.0.4.33 (netweaver ASCS and ERS must be in different subnets) +# Netweaver virtual ips: 192.168.1.30, 192.168.1.31, 192.168.1.32, 192.168.1.33 (virtual ip addresses must be in a different range than the vpc) +# DRBD ips: 10.0.5.20, 10.0.6.21 +# DRBD cluster vip: 192.168.1.20 (virtual ip address must be in a different range than the vpc) +# If the addresses are provided by the user will always have preference +locals { + iscsi_ip = var.iscsi_srv_ip != "" ? var.iscsi_srv_ip : cidrhost(local.infra_subnet_address_range, 4) + monitoring_ip = var.monitoring_srv_ip != "" ? var.monitoring_srv_ip : cidrhost(local.infra_subnet_address_range, 5) + + # The next locals are used to map the ip index with the subnet range (something like python enumerate method) + hana_ip_start = 10 + hana_ips = length(var.hana_ips) != 0 ? var.hana_ips : [for index in range(var.hana_count) : cidrhost(element(local.hana_subnet_address_range, index % 2), index + local.hana_ip_start)] + hana_cluster_vip = var.hana_cluster_vip != "" ? var.hana_cluster_vip : cidrhost(var.virtual_address_range, local.hana_ip_start) + + drbd_ip_start = 20 + drbd_ips = length(var.drbd_ips) != 0 ? var.drbd_ips : [for index in range(2) : cidrhost(element(local.drbd_subnet_address_range, index % 2), index + local.drbd_ip_start)] + drbd_cluster_vip = var.drbd_cluster_vip != "" ? var.drbd_cluster_vip : cidrhost(var.virtual_address_range, local.drbd_ip_start) + + # range(4) hardcoded as we always deploy 4 nw machines + netweaver_ip_start = 30 + netweaver_ips = length(var.netweaver_ips) != 0 ? var.netweaver_ips : [for index in range(4) : cidrhost(element(local.netweaver_subnet_address_range, index % 2), index + local.netweaver_ip_start)] + netweaver_virtual_ips = length(var.netweaver_virtual_ips) != 0 ? var.netweaver_virtual_ips : [for ip_index in range(local.netweaver_ip_start, local.netweaver_ip_start + 4) : cidrhost(var.virtual_address_range, ip_index)] +} + +module "drbd_node" { + source = "./modules/drbd_node" + drbd_count = var.drbd_enabled == true ? 2 : 0 + drbd_instancetype = var.drbd_instancetype + min_instancetype = var.min_instancetype aws_region = var.aws_region availability_zones = data.aws_availability_zones.available.names - sles4sap_images = var.sles4sap - vpc_id = aws_vpc.vpc.id - vpc_cidr_block = aws_vpc.vpc.cidr_block - key_name = aws_key_pair.hana-key-pair.key_name - security_group_id = aws_security_group.secgroup.id + os_image = var.drbd_os_image + os_owner = var.drbd_os_owner + vpc_id = local.vpc_id + subnet_address_range = local.drbd_subnet_address_range + key_name = aws_key_pair.key-pair.key_name + security_group_id = local.security_group_id route_table_id = aws_route_table.route-table.id - efs_performance_mode = var.netweaver_efs_performance_mode aws_credentials = var.aws_credentials aws_access_key_id = var.aws_access_key_id aws_secret_access_key = var.aws_secret_access_key - s3_bucket = var.netweaver_s3_bucket - hana_ip = var.hana_cluster_vip - host_ips = var.netweaver_ips - virtual_host_ips = var.netweaver_virtual_ips + host_ips = local.drbd_ips + drbd_cluster_vip = local.drbd_cluster_vip + drbd_data_disk_size = var.drbd_data_disk_size + drbd_data_disk_type = var.drbd_data_disk_type public_key_location = var.public_key_location private_key_location = var.private_key_location - iscsi_srv_ip = aws_instance.iscsisrv.private_ip + cluster_ssh_pub = var.cluster_ssh_pub + cluster_ssh_key = var.cluster_ssh_key + iscsi_srv_ip = module.iscsi_server.iscsisrv_ip + reg_code = var.reg_code + reg_email = var.reg_email + reg_additional_modules = var.reg_additional_modules + additional_packages = var.additional_packages + ha_sap_deployment_repo = var.ha_sap_deployment_repo + devel_mode = var.devel_mode + monitoring_enabled = var.monitoring_enabled + provisioner = var.provisioner + background = var.background + qa_mode = var.qa_mode + on_destroy_dependencies = [ + aws_route.public, + aws_security_group_rule.ssh, + aws_security_group_rule.outall + ] +} + +module "iscsi_server" { + source = "./modules/iscsi_server" + aws_region = var.aws_region + availability_zones = data.aws_availability_zones.available.names + subnet_ids = aws_subnet.infra-subnet.*.id + os_image = var.iscsi_os_image + os_owner = var.iscsi_os_owner + iscsi_instancetype = var.iscsi_instancetype + min_instancetype = var.min_instancetype + key_name = aws_key_pair.key-pair.key_name + security_group_id = local.security_group_id + private_key_location = var.private_key_location + iscsi_srv_ip = local.iscsi_ip + iscsidev = var.iscsidev + iscsi_disks = var.iscsi_disks + reg_code = var.reg_code + reg_email = var.reg_email + reg_additional_modules = var.reg_additional_modules + additional_packages = var.additional_packages + ha_sap_deployment_repo = var.ha_sap_deployment_repo + provisioner = var.provisioner + background = var.background + qa_mode = var.qa_mode + on_destroy_dependencies = [ + aws_route_table_association.infra-subnet-route-association, + aws_route.public, + aws_security_group_rule.ssh, + aws_security_group_rule.outall + ] +} + +module "netweaver_node" { + source = "./modules/netweaver_node" + netweaver_count = var.netweaver_enabled == true ? 4 : 0 + instancetype = var.netweaver_instancetype + name = "netweaver" + aws_region = var.aws_region + availability_zones = data.aws_availability_zones.available.names + os_image = var.netweaver_os_image + os_owner = var.netweaver_os_owner + vpc_id = local.vpc_id + subnet_address_range = local.netweaver_subnet_address_range + key_name = aws_key_pair.key-pair.key_name + security_group_id = local.security_group_id + route_table_id = aws_route_table.route-table.id + efs_enable_mount = var.netweaver_enabled == true && var.drbd_enabled == false ? true : false + efs_file_system_id = join("", aws_efs_file_system.netweaver-efs.*.id) + aws_credentials = var.aws_credentials + aws_access_key_id = var.aws_access_key_id + aws_secret_access_key = var.aws_secret_access_key + s3_bucket = var.netweaver_s3_bucket + netweaver_product_id = var.netweaver_product_id + netweaver_swpm_folder = var.netweaver_swpm_folder + netweaver_sapcar_exe = var.netweaver_sapcar_exe + netweaver_swpm_sar = var.netweaver_swpm_sar + netweaver_swpm_extract_dir = var.netweaver_swpm_extract_dir + netweaver_sapexe_folder = var.netweaver_sapexe_folder + netweaver_additional_dvds = var.netweaver_additional_dvds + netweaver_nfs_share = var.drbd_enabled ? "${local.drbd_cluster_vip}:/HA1" : "${join("", aws_efs_file_system.netweaver-efs.*.dns_name)}:" + hana_ip = local.hana_cluster_vip + host_ips = local.netweaver_ips + virtual_host_ips = local.netweaver_virtual_ips + public_key_location = var.public_key_location + private_key_location = var.private_key_location + iscsi_srv_ip = module.iscsi_server.iscsisrv_ip + cluster_ssh_pub = var.cluster_ssh_pub + cluster_ssh_key = var.cluster_ssh_key + reg_code = var.reg_code + reg_email = var.reg_email + reg_additional_modules = var.reg_additional_modules + ha_sap_deployment_repo = var.ha_sap_deployment_repo + devel_mode = var.devel_mode + provisioner = var.provisioner + background = var.background + monitoring_enabled = var.monitoring_enabled + on_destroy_dependencies = [ + aws_route.public, + aws_security_group_rule.ssh, + aws_security_group_rule.outall + ] +} + +module "hana_node" { + source = "./modules/hana_node" + hana_count = var.hana_count + instancetype = var.instancetype + name = var.name + init_type = var.init_type + scenario_type = var.scenario_type + aws_region = var.aws_region + availability_zones = data.aws_availability_zones.available.names + os_image = var.hana_os_image + os_owner = var.hana_os_owner + vpc_id = local.vpc_id + subnet_address_range = local.hana_subnet_address_range + key_name = aws_key_pair.key-pair.key_name + security_group_id = local.security_group_id + route_table_id = aws_route_table.route-table.id + aws_credentials = var.aws_credentials + aws_access_key_id = var.aws_access_key_id + aws_secret_access_key = var.aws_secret_access_key + host_ips = local.hana_ips + hana_data_disk_type = var.hana_data_disk_type + hana_inst_master = var.hana_inst_master + hana_inst_folder = var.hana_inst_folder + hana_platform_folder = var.hana_platform_folder + hana_sapcar_exe = var.hana_sapcar_exe + hdbserver_sar = var.hdbserver_sar + hana_extract_dir = var.hana_extract_dir + hana_disk_device = var.hana_disk_device + hana_fstype = var.hana_fstype + hana_cluster_vip = local.hana_cluster_vip + private_key_location = var.private_key_location + iscsi_srv_ip = module.iscsi_server.iscsisrv_ip cluster_ssh_pub = var.cluster_ssh_pub cluster_ssh_key = var.cluster_ssh_key reg_code = var.reg_code reg_email = var.reg_email reg_additional_modules = var.reg_additional_modules + additional_packages = var.additional_packages ha_sap_deployment_repo = var.ha_sap_deployment_repo devel_mode = var.devel_mode + hwcct = var.hwcct + qa_mode = var.qa_mode + provisioner = var.provisioner + background = var.background + monitoring_enabled = var.monitoring_enabled + on_destroy_dependencies = [ + aws_route.public, + aws_security_group_rule.ssh, + aws_security_group_rule.outall + ] +} + +module "monitoring" { + source = "./modules/monitoring" + monitor_instancetype = var.monitor_instancetype + min_instancetype = var.min_instancetype + key_name = aws_key_pair.key-pair.key_name + security_group_id = local.security_group_id + monitoring_srv_ip = local.monitoring_ip + private_key_location = var.private_key_location + aws_region = var.aws_region + availability_zones = data.aws_availability_zones.available.names + os_image = var.monitoring_os_image + os_owner = var.monitoring_os_owner + subnet_ids = aws_subnet.infra-subnet.*.id + timezone = var.timezone + reg_code = var.reg_code + reg_email = var.reg_email + reg_additional_modules = var.reg_additional_modules + additional_packages = var.additional_packages + ha_sap_deployment_repo = var.ha_sap_deployment_repo provisioner = var.provisioner background = var.background monitoring_enabled = var.monitoring_enabled + hana_targets = concat(local.hana_ips, [local.hana_cluster_vip]) # we use the vip to target the active hana instance + drbd_targets = var.drbd_enabled ? local.drbd_ips : [] + netweaver_targets = var.netweaver_enabled ? local.netweaver_virtual_ips : [] + on_destroy_dependencies = [ + aws_route_table_association.infra-subnet-route-association, + aws_route.public, + aws_security_group_rule.ssh, + aws_security_group_rule.outall + ] } diff --git a/aws/modules/drbd_node/main.tf b/aws/modules/drbd_node/main.tf new file mode 100644 index 000000000..0cc6a355e --- /dev/null +++ b/aws/modules/drbd_node/main.tf @@ -0,0 +1,87 @@ +# drbd resources + +resource "aws_subnet" "drbd-subnet" { + count = var.drbd_count + vpc_id = var.vpc_id + cidr_block = element(var.subnet_address_range, count.index) + availability_zone = element(var.availability_zones, count.index) + + tags = { + Name = "${terraform.workspace}-drbd-subnet-${count.index + 1}" + Workspace = terraform.workspace + } +} + +resource "aws_route_table_association" "drbd-subnet-route-association" { + count = var.drbd_count + subnet_id = element(aws_subnet.drbd-subnet.*.id, count.index) + route_table_id = var.route_table_id +} + +resource "aws_route" "drbd-cluster-vip" { + count = var.drbd_count > 0 ? 1 : 0 + route_table_id = var.route_table_id + destination_cidr_block = "${var.drbd_cluster_vip}/32" + instance_id = aws_instance.drbd.0.id +} + +module "sap_cluster_policies" { + enabled = var.drbd_count > 0 ? true : false + source = "../../modules/sap_cluster_policies" + name = var.name + aws_region = var.aws_region + cluster_instances = aws_instance.drbd.*.id + route_table_id = var.route_table_id +} + +module "get_os_image" { + source = "../../modules/get_os_image" + os_image = var.os_image + os_owner = var.os_owner +} + +## drbd ec2 instance +resource "aws_instance" "drbd" { + count = var.drbd_count + ami = module.get_os_image.image_id + instance_type = var.drbd_instancetype == "" ? var.min_instancetype : var.drbd_instancetype + key_name = var.key_name + associate_public_ip_address = true + subnet_id = element(aws_subnet.drbd-subnet.*.id, count.index) + private_ip = element(var.host_ips, count.index) + vpc_security_group_ids = [var.security_group_id] + availability_zone = element(var.availability_zones, count.index) + source_dest_check = false + iam_instance_profile = module.sap_cluster_policies.cluster_profile_name[0] + + root_block_device { + volume_type = "gp2" + volume_size = "10" + } + + ebs_block_device { + volume_type = var.drbd_data_disk_type + volume_size = var.drbd_data_disk_size + device_name = "/dev/xvdd" + } + + volume_tags = { + Name = "${terraform.workspace}-${var.name}0${count.index + 1}" + } + + tags = { + Name = "${terraform.workspace} - ${var.name}0${count.index + 1}" + Workspace = terraform.workspace + "${terraform.workspace}-cluster" = "${var.name}0${count.index + 1}" + } +} + +module "drbd_on_destroy" { + source = "../../../generic_modules/on_destroy" + node_count = var.drbd_count + instance_ids = aws_instance.drbd.*.id + user = "ec2-user" + private_key_location = var.private_key_location + public_ips = aws_instance.drbd.*.public_ip + dependencies = var.on_destroy_dependencies +} diff --git a/aws/modules/drbd_node/outputs.tf b/aws/modules/drbd_node/outputs.tf new file mode 100644 index 000000000..3572edfcd --- /dev/null +++ b/aws/modules/drbd_node/outputs.tf @@ -0,0 +1,20 @@ +data "aws_instance" "drbd" { + count = var.drbd_count + instance_id = element(aws_instance.drbd.*.id, count.index) +} + +output "drbd_ip" { + value = data.aws_instance.drbd.*.private_ip +} + +output "drbd_public_ip" { + value = data.aws_instance.drbd.*.public_ip +} + +output "drbd_name" { + value = data.aws_instance.drbd.*.id +} + +output "drbd_public_name" { + value = data.aws_instance.drbd.*.public_dns +} diff --git a/aws/modules/drbd_node/salt_provisioner.tf b/aws/modules/drbd_node/salt_provisioner.tf new file mode 100644 index 000000000..b9652bd1d --- /dev/null +++ b/aws/modules/drbd_node/salt_provisioner.tf @@ -0,0 +1,69 @@ +resource "null_resource" "drbd_provisioner" { + count = var.provisioner == "salt" ? var.drbd_count : 0 + + triggers = { + drbd_id = join(",", aws_instance.drbd.*.id) + } + + connection { + host = element(aws_instance.drbd.*.public_ip, count.index) + type = "ssh" + user = "ec2-user" + private_key = file(var.private_key_location) + } + + provisioner "file" { + source = var.aws_access_key_id == "" || var.aws_secret_access_key == "" ? var.aws_credentials : "/dev/null" + destination = "/tmp/credentials" + } + + provisioner "file" { + content = < 0 ? 1 : 0 + route_table_id = var.route_table_id + destination_cidr_block = "${var.hana_cluster_vip}/32" + instance_id = aws_instance.clusternodes.0.id +} + +module "sap_cluster_policies" { + enabled = var.hana_count > 0 ? true : false + source = "../../modules/sap_cluster_policies" + name = var.name + aws_region = var.aws_region + cluster_instances = aws_instance.clusternodes.*.id + route_table_id = var.route_table_id +} + +module "get_os_image" { + source = "../../modules/get_os_image" + os_image = var.os_image + os_owner = var.os_owner +} + +## EC2 HANA Instance +resource "aws_instance" "clusternodes" { + count = var.hana_count + ami = module.get_os_image.image_id + instance_type = var.instancetype + key_name = var.key_name + associate_public_ip_address = true + subnet_id = element(aws_subnet.hana-subnet.*.id, count.index) + private_ip = element(var.host_ips, count.index) + vpc_security_group_ids = [var.security_group_id] + availability_zone = element(var.availability_zones, count.index) + source_dest_check = false + iam_instance_profile = module.sap_cluster_policies.cluster_profile_name[0] + + root_block_device { + volume_type = "gp2" + volume_size = "60" + } + + ebs_block_device { + volume_type = var.hana_data_disk_type + volume_size = "60" + device_name = "/dev/xvdd" + } + + volume_tags = { + Name = "${terraform.workspace}-${var.name}0${count.index + 1}" + } + + tags = { + Name = "${terraform.workspace} - ${var.name}0${count.index + 1}" + Workspace = terraform.workspace + "${terraform.workspace}-cluster" = "${var.name}0${count.index + 1}" + } +} + +module "hana_on_destroy" { + source = "../../../generic_modules/on_destroy" + node_count = var.hana_count + instance_ids = aws_instance.clusternodes.*.id + user = "ec2-user" + private_key_location = var.private_key_location + public_ips = aws_instance.clusternodes.*.public_ip + dependencies = concat( + [aws_route_table_association.hana-subnet-route-association], + var.on_destroy_dependencies + ) +} diff --git a/aws/modules/hana_node/outputs.tf b/aws/modules/hana_node/outputs.tf new file mode 100644 index 000000000..5525116ad --- /dev/null +++ b/aws/modules/hana_node/outputs.tf @@ -0,0 +1,20 @@ +data "aws_instance" "clusternodes" { + count = var.hana_count + instance_id = element(aws_instance.clusternodes.*.id, count.index) +} + +output "cluster_nodes_ip" { + value = data.aws_instance.clusternodes.*.private_ip +} + +output "cluster_nodes_public_ip" { + value = data.aws_instance.clusternodes.*.public_ip +} + +output "cluster_nodes_name" { + value = data.aws_instance.clusternodes.*.id +} + +output "cluster_nodes_public_name" { + value = data.aws_instance.clusternodes.*.public_dns +} diff --git a/aws/modules/hana_node/salt_provisioner.tf b/aws/modules/hana_node/salt_provisioner.tf new file mode 100644 index 000000000..c5af8fe20 --- /dev/null +++ b/aws/modules/hana_node/salt_provisioner.tf @@ -0,0 +1,73 @@ +resource "null_resource" "hana_node_provisioner" { + count = var.provisioner == "salt" ? var.hana_count : 0 + + triggers = { + cluster_instance_ids = join(",", aws_instance.clusternodes.*.id) + } + + connection { + host = element(aws_instance.clusternodes.*.public_ip, count.index) + type = "ssh" + user = "ec2-user" + private_key = file(var.private_key_location) + } + + provisioner "file" { + source = var.aws_access_key_id == "" || var.aws_secret_access_key == "" ? var.aws_credentials : "/dev/null" + destination = "/tmp/credentials" + } + + provisioner "file" { + content = < 0 ? 1 : 0 - creation_token = "${terraform.workspace}-netweaver-efs" - performance_mode = var.efs_performance_mode - - tags = { - Name = "${terraform.workspace}-efs" - } -} - resource "aws_efs_mount_target" "netweaver-efs-mount-target" { - count = min(var.netweaver_count, 2) - file_system_id = element(aws_efs_file_system.netweaver-efs.*.id, 0) + count = var.netweaver_count > 0 && var.efs_enable_mount ? 2 : 0 + file_system_id = var.efs_file_system_id subnet_id = element(aws_subnet.netweaver-subnet.*.id, count.index) security_groups = [var.security_group_id] } module "sap_cluster_policies" { - enabled = var.netweaver_count > 0 ? true : false - source = "../../modules/sap_cluster_policies" - name = var.name - aws_region = var.aws_region - cluster_instances = aws_instance.netweaver.*.id - route_table_id = var.route_table_id + enabled = var.netweaver_count > 0 ? true : false + source = "../../modules/sap_cluster_policies" + name = var.name + aws_region = var.aws_region + cluster_instances = slice(aws_instance.netweaver.*.id, 0, min(var.netweaver_count, 2)) + route_table_id = var.route_table_id +} + +module "get_os_image" { + source = "../../modules/get_os_image" + os_image = var.os_image + os_owner = var.os_owner } resource "aws_instance" "netweaver" { count = var.netweaver_count - ami = var.sles4sap_images[var.aws_region] + ami = module.get_os_image.image_id instance_type = var.instancetype key_name = var.key_name associate_public_ip_address = true - subnet_id = element(aws_subnet.netweaver-subnet.*.id, count.index%2) # %2 is used because there are not more than 2 subnets + subnet_id = element(aws_subnet.netweaver-subnet.*.id, count.index % 2) # %2 is used because there are not more than 2 subnets private_ip = element(var.host_ips, count.index) - security_groups = [var.security_group_id] - availability_zone = element(var.availability_zones, count.index%2) + vpc_security_group_ids = [var.security_group_id] + availability_zone = element(var.availability_zones, count.index % 2) source_dest_check = false iam_instance_profile = module.sap_cluster_policies.cluster_profile_name[0] # We apply to all nodes to have the SAP data provider, even though some policies are only for the clustered nodes @@ -75,13 +70,33 @@ resource "aws_instance" "netweaver" { volume_size = "60" } + # Disk to store Netweaver software installation files + ebs_block_device { + volume_type = "gp2" + volume_size = "60" + device_name = "/dev/xvdd" + } + volume_tags = { - Name = "${terraform.workspace}-${var.name}${var.netweaver_count > 1 ? "0${count.index + 1}" : ""}" + Name = "${terraform.workspace}-${var.name}0${count.index + 1}" } tags = { - Name = "${terraform.workspace} - ${var.name}${var.netweaver_count > 1 ? "0${count.index + 1}" : ""}" - Workspace = terraform.workspace - Cluster = "${var.name}${var.netweaver_count > 1 ? "0${count.index + 1}" : ""}" + Name = "${terraform.workspace} - ${var.name}0${count.index + 1}" + Workspace = terraform.workspace + "${terraform.workspace}-cluster" = "${var.name}0${count.index + 1}" } } + +module "netweaver_on_destroy" { + source = "../../../generic_modules/on_destroy" + node_count = var.netweaver_count + instance_ids = aws_instance.netweaver.*.id + user = "ec2-user" + private_key_location = var.private_key_location + public_ips = aws_instance.netweaver.*.public_ip + dependencies = concat( + [aws_route_table_association.netweaver-subnet-route-association], + var.on_destroy_dependencies + ) +} diff --git a/aws/modules/netweaver_node/salt_provisioner.tf b/aws/modules/netweaver_node/salt_provisioner.tf index 0dbf71b9d..0e2417ed6 100644 --- a/aws/modules/netweaver_node/salt_provisioner.tf +++ b/aws/modules/netweaver_node/salt_provisioner.tf @@ -1,12 +1,3 @@ -# Template file for user_data used in resource instances -data "template_file" "salt_provisioner" { - template = file("../salt/salt_provisioner_script.tpl") - - vars = { - regcode = var.reg_code - } -} - resource "null_resource" "netweaver_provisioner" { count = var.provisioner == "salt" ? var.netweaver_count : 0 @@ -26,29 +17,19 @@ resource "null_resource" "netweaver_provisioner" { destination = "/tmp/credentials" } - provisioner "file" { - source = "../salt" - destination = "/tmp" - } - - provisioner "file" { - content = data.template_file.salt_provisioner.rendered - destination = "/tmp/salt_provisioner.sh" - } - provisioner "file" { content = < /tmp/provisioning.log ${var.background ? "&" : ""}", - "return_code=$? && sleep 1 && exit $return_code", - ] # Workaround to let the process start in background properly - } +module "netweaver_provision" { + source = "../../../generic_modules/salt_provisioner" + node_count = var.provisioner == "salt" ? var.netweaver_count : 0 + instance_ids = null_resource.netweaver_provisioner.*.id + user = "ec2-user" + private_key_location = var.private_key_location + public_ips = aws_instance.netweaver.*.public_ip + background = var.background } diff --git a/aws/modules/netweaver_node/variables.tf b/aws/modules/netweaver_node/variables.tf index b5c52346c..4aa2f351a 100644 --- a/aws/modules/netweaver_node/variables.tf +++ b/aws/modules/netweaver_node/variables.tf @@ -22,30 +22,14 @@ variable "availability_zones" { description = "Used availability zones" } -variable "sles4sap_images" { - type = map(string) - - default = { - "us-east-1" = "ami-027447d2b7312df2d" - "us-east-2" = "ami-099a51d3b131f3ce2" - "us-west-1" = "ami-0f213357578720889" - "us-west-2" = "ami-0fc86417df3e0f6d4" - "ca-central-1" = "ami-0811b93a30ab570f7" - "eu-central-1" = "ami-024f50fdc1f2f5603" - "eu-west-1" = "ami-0ca96dfbaf35b0c31" - "eu-west-2" = "ami-00189dbab3fd43af2" - "eu-west-3" = "ami-00e70e3421f053648" - } -} - variable "vpc_id" { type = string description = "Id of the vpc used for this deployment" } -variable "vpc_cidr_block" { - type = string - description = "cidr block of the used vpc" +variable "subnet_address_range" { + type = list(string) + description = "List with subnet address ranges in cidr notation to create the netweaver subnets" } variable "key_name" { @@ -63,10 +47,14 @@ variable "route_table_id" { description = "Route table id" } -variable "efs_performance_mode" { +variable "efs_enable_mount" { + type = bool + description = "Enable the mount operation on the EFS storage" +} + +variable "efs_file_system_id" { type = string - description = "Performance mode of the EFS storage" - default = "generalPurpose" + description = "AWS efs file system ID to be used by EFS mount target" } variable "aws_credentials" { @@ -76,16 +64,63 @@ variable "aws_credentials" { } variable "aws_access_key_id" { - type = string + type = string } variable "aws_secret_access_key" { - type = string + type = string } variable "s3_bucket" { description = "S3 bucket where Netwaever installation files are stored" - type = string + type = string +} + +variable "netweaver_product_id" { + description = "Netweaver installation product. Even though the module is about Netweaver, it can be used to install other SAP instances like S4/HANA" + type = string + default = "NW750.HDB.ABAPHA" +} + +variable "netweaver_swpm_folder" { + description = "Netweaver software SWPM folder, path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_sapcar_exe" { + description = "Path to sapcar executable, relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_swpm_sar" { + description = "SWPM installer sar archive containing the installer, path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_swpm_extract_dir" { + description = "Extraction path for Netweaver software SWPM folder, if SWPM sar file is provided" + type = string + default = "/sapmedia/NW/SWPM" +} + +variable "netweaver_sapexe_folder" { + description = "Software folder where needed sapexe `SAR` executables are stored (sapexe, sapexedb, saphostagent), path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_additional_dvds" { + description = "Software folder with additional SAP software needed to install netweaver (NW export folder and HANA HDB client for example), path relative from the `netweaver_inst_media` mounted point" + type = list + default = [] +} + +variable "netweaver_nfs_share" { + description = "URL of the NFS share where /sapmnt and /usr/sap/{sid}/SYS will be mounted. This folder must have the sapmnt and usrsapsys folders" + type = string } variable "hana_ip" { @@ -95,7 +130,7 @@ variable "hana_ip" { } variable "host_ips" { - description = "ip addresses of the machines. Remember that each of the machines is in a different subnet" + description = "ip addresses of the machines. The addresses must belong to the the subnet provided in subnet_address_range" type = list(string) default = ["10.0.2.7", "10.0.3.8", "10.0.2.9", "10.0.3.10"] } @@ -181,11 +216,13 @@ variable "ha_sap_deployment_repo" { variable "devel_mode" { description = "Whether or not to install the HA/SAP packages from the `ha_sap_deployment_repo`" + type = bool default = false } variable "qa_mode" { description = "Whether or not to install the HA/SAP packages from the `ha_sap_deployment_repo`" + type = bool default = false } @@ -196,10 +233,28 @@ variable "provisioner" { variable "background" { description = "Run the provisioner execution in background if set to true finishing terraform execution" + type = bool default = false } variable "monitoring_enabled" { description = "enable the host to be monitored by exporters, e.g node_exporter" + type = bool default = false } + +variable "on_destroy_dependencies" { + description = "Resource objects needed in on_destroy script (everything that allows ssh connection)" + type = any + default = [] +} + +variable "os_image" { + description = "sles4sap AMI image identifier or a pattern used to find the image name (e.g. suse-sles-sap-15-sp1-byos)" + type = string +} + +variable "os_owner" { + description = "OS image owner" + type = string +} diff --git a/aws/modules/sap_cluster_policies/main.tf b/aws/modules/sap_cluster_policies/main.tf index 9e3ff5a07..73291a1b7 100644 --- a/aws/modules/sap_cluster_policies/main.tf +++ b/aws/modules/sap_cluster_policies/main.tf @@ -13,51 +13,39 @@ resource "aws_iam_role" "cluster-role" { } } -data "template_file" "data-provider-policy-template" { - count = var.enabled ? 1 : 0 - template = file("${path.module}/templates/aws_data_provider_policy.tpl") -} - resource "aws_iam_role_policy" "data-provider-policy" { count = var.enabled ? 1 : 0 name = "${terraform.workspace}-${var.name}-data-provider-policy" role = aws_iam_role.cluster-role[0].id - policy = data.template_file.data-provider-policy-template[0].rendered -} - -data "template_file" "stonith-policy-template" { - count = var.enabled ? 1 : 0 - template = file("${path.module}/templates/aws_stonith_policy.tpl") - vars = { - region = var.aws_region - aws_account_id = data.aws_caller_identity.current.account_id - ec2_instance1 = var.cluster_instances.0 - ec2_instance2 = var.cluster_instances.1 - } + policy = templatefile("${path.module}/templates/aws_data_provider_policy.tpl", {}) } resource "aws_iam_role_policy" "stonith-policy" { count = var.enabled ? 1 : 0 name = "${terraform.workspace}-${var.name}-stonith-policy" role = aws_iam_role.cluster-role[0].id - policy = data.template_file.stonith-policy-template[0].rendered -} - -data "template_file" "ip-agent-policy-template" { - count = var.enabled ? 1 : 0 - template = file("${path.module}/templates/aws_ip_agent_policy.tpl") - vars = { - region = var.aws_region - aws_account_id = data.aws_caller_identity.current.account_id - route_table = var.route_table_id - } + policy = templatefile( + "${path.module}/templates/aws_stonith_policy.tpl", + { + region = var.aws_region + aws_account_id = data.aws_caller_identity.current.account_id + ec2_instances = var.cluster_instances + } + ) } resource "aws_iam_role_policy" "ip-agent-policy" { count = var.enabled ? 1 : 0 name = "${terraform.workspace}-${var.name}-ip-agent-policy" role = aws_iam_role.cluster-role[0].id - policy = data.template_file.ip-agent-policy-template[0].rendered + policy = templatefile( + "${path.module}/templates/aws_ip_agent_policy.tpl", + { + region = var.aws_region + aws_account_id = data.aws_caller_identity.current.account_id + route_table = var.route_table_id + } + ) } resource "aws_iam_instance_profile" "cluster-role-profile" { diff --git a/aws/modules/sap_cluster_policies/templates/aws_stonith_policy.tpl b/aws/modules/sap_cluster_policies/templates/aws_stonith_policy.tpl index 3d117a16b..2e15a396e 100644 --- a/aws/modules/sap_cluster_policies/templates/aws_stonith_policy.tpl +++ b/aws/modules/sap_cluster_policies/templates/aws_stonith_policy.tpl @@ -20,10 +20,9 @@ "ec2:StartInstances", "ec2:StopInstances" ], - "Resource": [ - "arn:aws:ec2:${region}:${aws_account_id}:instance/${ec2_instance1}", - "arn:aws:ec2:${region}:${aws_account_id}:instance/${ec2_instance2}" - ] + "Resource": ${jsonencode([ + for ec2_instance in ec2_instances : "arn:aws:ec2:${region}:${aws_account_id}:instance/${ec2_instance}" + ])} } ] } diff --git a/aws/modules/sap_cluster_policies/variables.tf b/aws/modules/sap_cluster_policies/variables.tf index 5537787ed..26815eaef 100644 --- a/aws/modules/sap_cluster_policies/variables.tf +++ b/aws/modules/sap_cluster_policies/variables.tf @@ -1,6 +1,6 @@ variable "enabled" { - type = bool description = "Enable the sap cluster policies creation" + type = bool } variable "name" { diff --git a/aws/monitoring.tf b/aws/monitoring.tf deleted file mode 100644 index f2d0e1831..000000000 --- a/aws/monitoring.tf +++ /dev/null @@ -1,20 +0,0 @@ -variable "timezone" { - description = "Timezone setting for all VMs" - default = "Europe/Berlin" -} - -variable "monitoring_srv_ip" { - description = "monitoring server address. Must be in 10.0.0.0/24 subnet" - type = string - default = "" -} - -variable "devel_mode" { - description = "whether or not to install HA/SAP packages from ha_sap_deployment_repo" - default = false -} - -variable "monitoring_enabled" { - description = "enable the host to be monitored by exporters, e.g node_exporter" - default = false -} diff --git a/aws/network.tf b/aws/network.tf deleted file mode 100644 index 50ff9303e..000000000 --- a/aws/network.tf +++ /dev/null @@ -1,179 +0,0 @@ -# Launch SLES-HAE of SLES4SAP cluster nodes - -# Network resources: VPC, Internet Gateways, Security Groups for the EC2 instances and for the EFS file system -resource "aws_vpc" "vpc" { - cidr_block = "10.0.0.0/16" - enable_dns_hostnames = true - enable_dns_support = true - - tags = { - Name = "${terraform.workspace}-vpc" - Workspace = terraform.workspace - } -} - -resource "aws_internet_gateway" "igw" { - vpc_id = aws_vpc.vpc.id - - tags = { - Name = "${terraform.workspace}-igw" - Workspace = terraform.workspace - } -} - -resource "aws_subnet" "hana-subnet" { - count = var.ninstances - vpc_id = aws_vpc.vpc.id - cidr_block = cidrsubnet(aws_vpc.vpc.cidr_block, 8, count.index) - availability_zone = element(data.aws_availability_zones.available.names, count.index) - - tags = { - Name = "${terraform.workspace}-hana-subnet-${count.index + 1}" - Workspace = terraform.workspace - } -} - -resource "aws_route_table" "route-table" { - vpc_id = aws_vpc.vpc.id - - tags = { - Name = "${terraform.workspace}-hana-route-table" - Workspace = terraform.workspace - } -} - -resource "aws_route_table_association" "hana-subnet-route-association" { - count = var.ninstances - subnet_id = element(aws_subnet.hana-subnet.*.id, count.index) - route_table_id = aws_route_table.route-table.id -} - -resource "aws_route" "public" { - route_table_id = aws_route_table.route-table.id - destination_cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.igw.id -} - -resource "aws_route" "hana-cluster-vip" { - route_table_id = aws_route_table.route-table.id - destination_cidr_block = "${var.hana_cluster_vip}/32" - instance_id = aws_instance.clusternodes.0.id -} - - -resource "aws_security_group" "secgroup" { - name = "${terraform.workspace}-sg" - vpc_id = aws_vpc.vpc.id - - tags = { - Name = "${terraform.workspace}-sg" - Workspace = terraform.workspace - } -} - -resource "aws_security_group_rule" "outall" { - type = "egress" - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - - security_group_id = aws_security_group.secgroup.id -} - -resource "aws_security_group_rule" "local" { - type = "ingress" - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["10.0.0.0/16"] - - security_group_id = aws_security_group.secgroup.id -} - -resource "aws_security_group_rule" "http" { - type = "ingress" - from_port = 80 - to_port = 80 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - - security_group_id = aws_security_group.secgroup.id -} - -resource "aws_security_group_rule" "https" { - type = "ingress" - from_port = 443 - to_port = 443 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - - security_group_id = aws_security_group.secgroup.id -} - -resource "aws_security_group_rule" "hawk" { - type = "ingress" - from_port = 7630 - to_port = 7630 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - - security_group_id = aws_security_group.secgroup.id -} - -resource "aws_security_group_rule" "ssh" { - type = "ingress" - from_port = 22 - to_port = 22 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - - security_group_id = aws_security_group.secgroup.id -} - - -# Monitoring rules -resource "aws_security_group_rule" "hanadb_exporter" { - count = var.monitoring_enabled == true ? 1 : 0 - type = "ingress" - from_port = 9668 - to_port = 9668 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - - security_group_id = aws_security_group.secgroup.id -} - - -resource "aws_security_group_rule" "node_exporter" { - count = var.monitoring_enabled == true ? 1 : 0 - type = "ingress" - from_port = 9100 - to_port = 9100 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - - security_group_id = aws_security_group.secgroup.id -} - -resource "aws_security_group_rule" "ha_exporter" { - count = var.monitoring_enabled == true ? 1 : 0 - type = "ingress" - from_port = 9664 - to_port = 9664 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - - security_group_id = aws_security_group.secgroup.id -} - -resource "aws_security_group_rule" "prometheus_server" { - count = var.monitoring_enabled == true ? 1 : 0 - type = "ingress" - from_port = 9090 - to_port = 9090 - protocol = "tcp" - cidr_blocks = ["0.0.0.0/0"] - - security_group_id = aws_security_group.secgroup.id -} diff --git a/aws/outputs.tf b/aws/outputs.tf index 9bcd45eb2..fb8fe1ed0 100644 --- a/aws/outputs.tf +++ b/aws/outputs.tf @@ -4,72 +4,40 @@ # - Private node name # - Public node name -# iSCSI server - -data "aws_instance" "iscsisrv" { - instance_id = aws_instance.iscsisrv.id -} - -output "iscsisrv_ip" { - value = [data.aws_instance.iscsisrv.private_ip] -} - -output "iscsisrv_public_ip" { - value = [data.aws_instance.iscsisrv.public_ip] -} - -output "iscsisrv_name" { - value = [data.aws_instance.iscsisrv.id] -} - -output "iscsisrv_public_name" { - value = [data.aws_instance.iscsisrv.public_dns] -} - # Cluster nodes -data "aws_instance" "clusternodes" { - count = var.ninstances - instance_id = element(aws_instance.clusternodes.*.id, count.index) -} - output "cluster_nodes_ip" { - value = data.aws_instance.clusternodes.*.private_ip + value = module.hana_node.cluster_nodes_ip } output "cluster_nodes_public_ip" { - value = data.aws_instance.clusternodes.*.public_ip + value = module.hana_node.cluster_nodes_public_ip } output "cluster_nodes_name" { - value = data.aws_instance.clusternodes.*.id + value = module.hana_node.cluster_nodes_name } output "cluster_nodes_public_name" { - value = data.aws_instance.clusternodes.*.public_dns + value = module.hana_node.cluster_nodes_public_name } # Monitoring -data "aws_instance" "monitoring" { - count = var.monitoring_enabled == true ? 1 : 0 - instance_id = aws_instance.monitoring.0.id -} - output "monitoring_ip" { - value = join("", data.aws_instance.monitoring.*.private_ip) + value = module.monitoring.monitoring_ip } output "monitoring_public_ip" { - value = join("", data.aws_instance.monitoring.*.public_ip) + value = module.monitoring.monitoring_public_ip } output "monitoring_name" { - value = join("", data.aws_instance.monitoring.*.id) + value = module.monitoring.monitoring_name } output "monitoring_public_name" { - value = join("", data.aws_instance.monitoring.*.public_dns) + value = module.monitoring.monitoring_public_name } # Netweaver @@ -89,3 +57,39 @@ output "netweaver_name" { output "netweaver_public_name" { value = module.netweaver_node.netweaver_public_name } + +# iSCSI server + +output "iscsisrv_ip" { + value = module.iscsi_server.iscsisrv_ip +} + +output "iscsisrv_public_ip" { + value = module.iscsi_server.iscsisrv_public_ip +} + +output "iscsisrv_name" { + value = module.iscsi_server.iscsisrv_name +} + +output "iscsisrv_public_name" { + value = module.iscsi_server.iscsisrv_public_name +} + +# DRBD + +output "drbd_ip" { + value = module.drbd_node.drbd_ip +} + +output "drbd_public_ip" { + value = module.drbd_node.drbd_public_ip +} + +output "drbd_name" { + value = module.drbd_node.drbd_name +} + +output "drbd_public_name" { + value = module.drbd_node.drbd_public_name +} diff --git a/aws/provider.tf b/aws/provider.tf deleted file mode 100644 index 26a7cef0e..000000000 --- a/aws/provider.tf +++ /dev/null @@ -1,10 +0,0 @@ -# Configure the AWS Provider -provider "aws" { - version = "~> 2.7" - region = var.aws_region -} - -provider "template" { - version = "~> 2.1" -} - diff --git a/aws/salt_provisioner.tf b/aws/salt_provisioner.tf deleted file mode 100644 index ec63b31cf..000000000 --- a/aws/salt_provisioner.tf +++ /dev/null @@ -1,208 +0,0 @@ -# This file contains the salt provisioning logic. -# It will be executed if 'provisioner' is set to salt (default option) and the -# iscsi and hana node resources are created (check triggers option). - -# Template file for user_data used in resource instances -data "template_file" "salt_provisioner" { - template = file("../salt/salt_provisioner_script.tpl") - - vars = { - regcode = var.reg_code - } -} - -resource "null_resource" "iscsi_provisioner" { - count = var.provisioner == "salt" ? 1 : 0 - - triggers = { - iscsi_id = join(",", aws_instance.iscsisrv.*.id) - } - - connection { - host = element(aws_instance.iscsisrv.*.public_ip, count.index) - type = "ssh" - user = "ec2-user" - private_key = file(var.private_key_location) - } - - provisioner "file" { - source = "../salt" - destination = "/tmp/salt" - } - - provisioner "file" { - content = data.template_file.salt_provisioner.rendered - destination = "/tmp/salt_provisioner.sh" - } - - provisioner "file" { - content = < /tmp/provisioning.log ${var.background ? "&" : ""}", - "return_code=$? && sleep 1 && exit $return_code", - ] # Workaround to let the process start in background properly - } -} - -resource "null_resource" "hana_node_provisioner" { - count = var.provisioner == "salt" ? var.ninstances : 0 - - triggers = { - cluster_instance_ids = join(",", aws_instance.clusternodes.*.id) - } - - connection { - host = element(aws_instance.clusternodes.*.public_ip, count.index) - type = "ssh" - user = "ec2-user" - private_key = file(var.private_key_location) - } - - provisioner "file" { - source = var.aws_access_key_id == "" || var.aws_secret_access_key == "" ? var.aws_credentials : "/dev/null" - destination = "/tmp/credentials" - } - - provisioner "file" { - source = "../salt" - destination = "/tmp/salt" - } - - provisioner "file" { - content = data.template_file.salt_provisioner.rendered - destination = "/tmp/salt_provisioner.sh" - } - - provisioner "file" { - content = < 1 ? "0${count.index + 1}" : ""} -network_domain: "tf.local" -shared_storage_type: iscsi -sbd_disk_device: /dev/sda -hana_inst_master: ${var.hana_inst_master} -hana_inst_folder: ${var.hana_inst_folder} -hana_disk_device: ${var.hana_disk_device} -hana_fstype: ${var.hana_fstype} -iscsi_srv_ip: ${aws_instance.iscsisrv.private_ip} -init_type: ${var.init_type} -cluster_ssh_pub: ${var.cluster_ssh_pub} -cluster_ssh_key: ${var.cluster_ssh_key} -reg_code: ${var.reg_code} -reg_email: ${var.reg_email} -reg_additional_modules: {${join(", ", formatlist("'%s': '%s'", keys(var.reg_additional_modules), values(var.reg_additional_modules)))}} -additional_packages: [${join(", ", formatlist("'%s'", var.additional_packages))}] -ha_sap_deployment_repo: ${var.ha_sap_deployment_repo} -monitoring_enabled: ${var.monitoring_enabled} -devel_mode: ${var.devel_mode} -qa_mode: ${var.qa_mode} -hwcct: ${var.hwcct} -EOF - - destination = "/tmp/grains" - } - - provisioner "remote-exec" { - inline = [ - "${var.background ? "nohup" : ""} sudo sh /tmp/salt_provisioner.sh > /tmp/provisioning.log ${var.background ? "&" : ""}", - "return_code=$? && sleep 1 && exit $return_code", - ] # Workaround to let the process start in background properly - } -} - -resource "null_resource" "monitoring_provisioner" { - count = var.provisioner == "salt" && var.monitoring_enabled ? 1 : 0 - - triggers = { - monitoring_id = aws_instance.monitoring.0.id - } - - connection { - host = aws_instance.monitoring.0.public_ip - type = "ssh" - user = "ec2-user" - private_key = file(var.private_key_location) - } - - provisioner "file" { - source = "../salt" - destination = "/tmp" - } - - provisioner "file" { - content = data.template_file.salt_provisioner.rendered - destination = "/tmp/salt_provisioner.sh" - } - - provisioner "file" { - content = < /tmp/provisioning.log ${var.background ? "&" : ""}", - "return_code=$? && sleep 1 && exit $return_code", - ] # Workaround to let the process start in background properly - } -} diff --git a/aws/terraform.tfvars.example b/aws/terraform.tfvars.example index dbba08d1d..d7e68866c 100644 --- a/aws/terraform.tfvars.example +++ b/aws/terraform.tfvars.example @@ -1,13 +1,30 @@ # Launch SLES-HAE of SLES4SAP cluster nodes -# Instance type to use for the cluster nodes +# Use an already existing vpc. Make sure the vpc has the internet gateway already attached +#vpc_id = "vpc-xxxxxxxxxxxxxxxxx" + +# Use an already existing security group +#security_group_id = "sg-xxxxxxxxxxxxxxxxx" + +# vpc address range in CIDR notation +# Only used if the vpc is created by terraform or the user doesn't have read permissions in this +# resource. To use the current vpc address range set the value to an empty string +# To define custom ranges +#vpc_address_range = "10.0.0.0/16" +# Or to use already existing vpc address ranges +#vpc_address_range = "" + +# Instance type to use for the hana cluster nodes instancetype = "r3.8xlarge" +# The minimum instance type of a region, not suitable to hana nodes +min_instancetype = "t2.micro" + # Disk type for HANA hana_data_disk_type = "gp2" # Number of nodes in the cluster -ninstances = "2" +hana_count = "2" # Region where to deploy the configuration aws_region = "eu-central-1" @@ -19,9 +36,12 @@ public_key_location = "/path/to/your/public/ssh/key" private_key_location = "/path/to/your/private/ssh/key" # Custom AMI for nodes -#sles4sap = { -# "eu-central-1" = "ami-xxxxxxxxxxxxxxxxx" -#} +#hana_os_image = "ami-xxxxxxxxxxxxxxxxx" +# Or use a pattern to find the image +#hana_os_image = "suse-sles-sap-15-sp1-byos" + +# Custom owner for private AMI +#hana_os_owner = "self" # aws-cli credentials data # access keys parameters have preference over the credentials file (they are self exclusive) @@ -34,20 +54,36 @@ aws_credentials = "~/.aws/credentials" name = "hana" # S3 bucket where HANA installation master is located -hana_inst_master = "s3://path/to/your/hana/installation/master" - -# Local folder where HANA installation master will be downloaded from S3 -hana_inst_folder = "/root/hana_inst_media/" +hana_inst_master = "s3://path/to/your/hana/installation/master/51053381" +# Or you can combine the `hana_inst_master` with `hana_platform_folder` variable. +#hana_inst_master = "s3://path/to/your/hana/installation/master" +# Specify the path to already extracted HANA platform installation media, relative to hana_inst_master mounting point. +# This will have preference over hdbserver sar archive installation media +#hana_platform_folder = "51053381" + +# Or specify the path to the sapcar executable & HANA database server installation sar archive, relative to the 'hana_inst_master' mounting point +# The sar archive will be extracted to path specified at hana_extract_dir (optional, by default /sapmedia/HANA) +# Make sure to use the latest/compatible version of sapcar executable, otherwise file may be extracted incorrectly +hana_sapcar_exe = "SAPCAR" +hdbserver_sar = "IMDB_SERVER.SAR" +hana_extract_dir = "/sapmedia/HDBSERVER" # Device used by node where HANA will be installed hana_disk_device = "/dev/xvdd" # IP address used to configure the hana cluster floating IP. It must be in other subnet than the machines! -hana_cluster_vip = "192.168.1.10" +#hana_cluster_vip = "192.168.1.10" # Variable to control what is deployed in the nodes. Can be all, skip-hana or skip-cluster init_type = "all" +# iSCSI server address. It should be in same iprange as hana_ips +#iscsi_srv_ip = "10.0.0.254" + +# iSCSI OS image +#iscsi_os_image = "ami-xxxxxxxxxxxxxxxxx" +#iscsi_os_owner = "self" + # Device used by the iSCSI server to provide LUNs iscsidev = "/dev/xvdd" @@ -60,13 +96,16 @@ cluster_ssh_pub = "salt://hana_node/files/sshkeys/cluster.id_rsa.pub" cluster_ssh_key = "salt://hana_node/files/sshkeys/cluster.id_rsa" # Each host IP address (sequential order). The first ip must be in 10.0.0.0/24 subnet and the second in 10.0.1.0/24 subnet -# example : host_ips = ["10.0.0.5", "10.0.1.6"] -host_ips = ["10.0.0.5", "10.0.1.6"] +#hana_ips = ["10.0.0.5", "10.0.1.6"] # Repository url used to install HA/SAP deployment packages" # The latest RPM packages can be found at: # https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/{YOUR OS VERSION} # Contains the salt formulas rpm packages. +# To auto detect the SLE version +#ha_sap_deployment_repo = "http://download.opensuse.org/repositories/network:/ha-clustering:/Factory/" +# Specific SLE version used in all the created machines +#ha_sap_deployment_repo = "http://download.opensuse.org/repositories/network:/ha-clustering:/Factory/SLE_15/" ha_sap_deployment_repo = "" # Optional SUSE Customer Center Registration parameters @@ -94,8 +133,11 @@ ha_sap_deployment_repo = "" # Enable the host to be monitored by exporters #monitoring_enabled = true +#monitoring_os_image = "ami-xxxxxxxxxxxxxxxxx" +#monitoring_os_owner = "self" + # IP address of the machine where Prometheus and Grafana are running. Must be in 10.0.0.0/24 subnet -monitoring_srv_ip = "10.0.0.253" +#monitoring_srv_ip = "10.0.0.253" # QA variables @@ -110,11 +152,50 @@ monitoring_srv_ip = "10.0.0.253" # true or false (default) #hwcct = false +# drbd related variables + +# netweaver will use AWS efs for nfs share by default, unless drbd is enabled +# Enable drbd cluster +#drbd_enabled = false + +#drbd_instancetype = "t2.micro" + +#drbd_os_image = "ami-xxxxxxxxxxxxxxxxx" +#drbd_os_owner = "self" + +#drbd_data_disk_size = 15 + +#drbd_data_disk_type = gp2 + +# Each drbd cluster host IP address (sequential order). +#drbd_ips = ["10.0.5.20", "10.0.6.21"] +#drbd_cluster_vip = "192.168.1.20" + # Netweaver variables #netweaver_enabled = true #netweaver_instancetype = "r3.8xlarge" +#netweaver_os_image = "ami-xxxxxxxxxxxxxxxxx" +#netweaver_os_owner = "self" +#AWS efs performance mode used by netweaver nfs share, if efs storage is used #netweaver_efs_performance_mode = "generalPurpose" -#netweaver_s3_bucket = "s3://path/to/your/netweaver/installation/s3bucket" #netweaver_ips = ["10.0.2.7", "10.0.3.8", "10.0.2.9", "10.0.3.10"] #netweaver_virtual_ips = ["192.168.1.20", "192.168.1.21", "192.168.1.22", "192.168.1.23"] +# Netweaver installation required folders +#netweaver_s3_bucket = "s3://path/to/your/netweaver/installation/s3bucket" +# SAP SWPM installation folder, relative to the netweaver_s3_bucket folder +#netweaver_swpm_folder = "your_swpm" +# Or specify the path to the sapcar executable & SWPM installer sar archive, relative to the netweaver_s3_bucket folder +# The sar archive will be extracted to path specified at netweaver_swpm_extract_dir (optional, by default /sapmedia/NW/SWPM) +#netweaver_sapcar_exe = "your_sapcar_exe_file_path" +#netweaver_swpm_sar = "your_swpm_sar_file_path" +#netweaver_swpm_extract_dir = "location_to_extract_swpm_sar_absolute_path" +# Folder where needed SAR executables (sapexe, sapdbexe) are stored, relative to the netweaver_s3_bucket folder +#netweaver_sapexe_folder = "kernel_nw75_sar" +# Additional folders (added in start_dir.cd), relative to the netweaver_s3_bucket folder +#netweaver_additional_dvds = ["dvd1", "dvd2"] + +# Pre deployment + +# Enable all some pre deployment steps (disabled by default) +#pre_deployment = true diff --git a/aws/variables.tf b/aws/variables.tf index 5e2b8dc34..fa9ab37b8 100644 --- a/aws/variables.tf +++ b/aws/variables.tf @@ -1,144 +1,87 @@ -# Launch SLES-HAE of SLES4SAP cluster nodes - -# Map used for suse-sles-sap-15-byos-v20180816-hvm-ssd-x86_64 -# SLES4SAP 15 in eu-central-1: ami-024f50fdc1f2f5603 -# Used for cluster nodes - -variable "sles4sap" { - type = map(string) - - default = { - "us-east-1" = "ami-027447d2b7312df2d" - "us-east-2" = "ami-099a51d3b131f3ce2" - "us-west-1" = "ami-0f213357578720889" - "us-west-2" = "ami-0fc86417df3e0f6d4" - "ca-central-1" = "ami-0811b93a30ab570f7" - "eu-central-1" = "ami-024f50fdc1f2f5603" - "eu-west-1" = "ami-0ca96dfbaf35b0c31" - "eu-west-2" = "ami-00189dbab3fd43af2" - "eu-west-3" = "ami-00e70e3421f053648" - } -} - -# Map used for suse-sles-sap-15-byos-v20180816-hvm-ssd-x86_64 -# SLES4SAP 15 in eu-central-1: ami-024f50fdc1f2f5603 -# Used for iscsi server - -variable "iscsi_srv" { - type = map(string) - - default = { - "us-east-1" = "ami-027447d2b7312df2d" - "us-east-2" = "ami-099a51d3b131f3ce2" - "us-west-1" = "ami-0f213357578720889" - "us-west-2" = "ami-0fc86417df3e0f6d4" - "ca-central-1" = "ami-0811b93a30ab570f7" - "eu-central-1" = "ami-024f50fdc1f2f5603" - "eu-west-1" = "ami-0ca96dfbaf35b0c31" - "eu-west-2" = "ami-00189dbab3fd43af2" - "eu-west-3" = "ami-00e70e3421f053648" - } -} - -# Variables for type of instances to use and number of cluster nodes -# Use with: terraform apply -var instancetype=t2.micro -var ninstances=2 - -variable "instancetype" { - type = string - default = "r3.8xlarge" -} - -variable "hana_data_disk_type" { - type = string - default = "gp2" -} - -variable "hana_cluster_vip" { - description = "IP address used to configure the hana cluster floating IP. It must be in other subnet than the machines!" - type = string - default = "192.168.1.10" -} - -variable "ninstances" { - type = string - default = "2" -} +# AWS related variables variable "aws_region" { - type = string -} - -variable "name" { - description = "hostname, without the domain part" + description = "AWS region where the deployment machines will be created. If not provided the current configured region will be used" type = string } -variable "public_key_location" { - type = string +variable "aws_access_key_id" { + description = "AWS access key id" + type = string + default = "" } -variable "private_key_location" { - type = string +variable "aws_secret_access_key" { + description = "AWS secret access key" + type = string + default = "" } variable "aws_credentials" { - description = "AWS credentials file path in local machine" + description = "AWS credentials file path in local machine. This file will be used it `aws_access_key_id` and `aws_secret_access_key` are not provided" type = string default = "~/.aws/credentials" } -variable "aws_access_key_id" { - type = string - default = "" +variable "vpc_id" { + description = "Id of a currently existing vpc to use in the deployment. It must have an internet gateway attached. If not provided a new one will be created." + type = string + default = "" } -variable "aws_secret_access_key" { - type = string - default = "" +variable "security_group_id" { + description = "Id of a currently existing security group to use in the deployment. If not provided a new one will be created" + type = string + default = "" } -variable "init_type" { - type = string - default = "all" +variable "vpc_address_range" { + description = "vpc address range in CIDR notation" + type = string + default = "10.0.0.0/16" } -variable "hana_inst_master" { - type = string +variable "virtual_address_range" { + description = "address range for virtual addresses for the clusters. It must be in a different range than `vpc_address_range`" + type = string + default = "192.168.1.0/24" } -variable "hana_inst_folder" { - type = string - default = "/root/hana_inst_media" +variable "infra_subnet_address_range" { + description = "Address range to create the subnet for the infrastructure (iscsi, monitoring, etc) machines. If not given the addresses will be generated based on vpc_address_range" + type = string + default = "" } -variable "hana_disk_device" { - description = "device where to install HANA" +variable "public_key_location" { + description = "Path to a SSH public key used to connect to the created machines" type = string } -variable "hana_fstype" { - description = "Filesystem type to use for HANA" +variable "private_key_location" { + description = "Path to a SSH private key used to connect to the created machines" type = string - default = "xfs" } -variable "iscsidev" { - description = "device iscsi for iscsi server" +# Deployment variables + +variable "name" { + description = "hostname, without the domain part" type = string } -variable "iscsi_disks" { - description = "number of partitions attach to iscsi server. 0 means `all`." - default = 0 +variable "timezone" { + description = "Timezone setting for all VMs" + default = "Europe/Berlin" } variable "cluster_ssh_pub" { - description = "path for the public key needed by the cluster" + description = "Path to a SSH public key used during the cluster creation. The key must be passwordless" type = string } variable "cluster_ssh_key" { - description = "path for the private key needed by the cluster" + description = "Path to a SSH private key used during the cluster creation. The key must be passwordless" type = string } @@ -169,27 +112,23 @@ variable "reg_additional_modules" { } variable "additional_packages" { - description = "extra packages which should be installed" + description = "Extra packages to be installed" default = [] } -variable "host_ips" { - description = "ip addresses to set to the nodes. The first ip must be in 10.0.0.0/24 subnet and the second in 10.0.1.0/24 subnet" - type = list(string) -} - # Repository url used to install HA/SAP deployment packages" # The latest RPM packages can be found at: # https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/{YOUR OS VERSION} # Contains the salt formulas rpm packages. variable "ha_sap_deployment_repo" { - description = "Repository url used to install HA/SAP deployment packages" + description = "Repository url used to install HA/SAP deployment packages. If SLE version is not set, the deployment will automatically detect the current OS version" type = string } -variable "scenario_type" { - description = "Deployed scenario type. Available options: performance-optimized, cost-optimized" - default = "performance-optimized" +variable "devel_mode" { + description = "Increase ha_sap_deployment_repo repository priority to get the packages from this repository instead of SLE official channels" + type = bool + default = false } variable "provisioner" { @@ -199,18 +138,269 @@ variable "provisioner" { variable "background" { description = "Run the provisioner execution in background if set to true finishing terraform execution" + type = bool + default = false +} + +# Hana related variables + +variable "hana_count" { + description = "Number of hana nodes" + type = number + default = 2 +} + +variable "hana_os_image" { + description = "sles4sap AMI image identifier or a pattern used to find the image name (e.g. suse-sles-sap-15-sp1-byos)" + type = string + default = "suse-sles-sap-15-sp1-byos" +} + +variable "hana_os_owner" { + description = "OS image owner" + type = string + default = "amazon" +} + +variable "instancetype" { + description = "The instance type of the hana nodes" + type = string + default = "r3.8xlarge" +} + +variable "min_instancetype" { + description = "The minimum cost/capacity instance type, different per region" + type = string + default = "t2.micro" +} + +variable "init_type" { + description = "Type of deployment. Options: all-> Install HANA and HA; skip-hana-> Skip HANA installation; skip-cluster-> Skip HA cluster installation" + type = string + default = "all" +} + +variable "hana_subnet_address_range" { + description = "List of address ranges to create the subnets for the hana machines. If not given the addresses will be generated based on vpc_address_range" + type = list(string) + default = [] +} + +variable "hana_ips" { + description = "ip addresses to set to the nodes. The first ip must be in 10.0.0.0/24 subnet and the second in 10.0.1.0/24 subnet" + type = list(string) + default = [] +} + +variable "hana_inst_master" { + description = "S3 bucket folder path where hana installation software is available" + type = string +} + +variable "hana_inst_folder" { + description = "Folder where the hana installation software will be downloaded" + type = string + default = "/sapmedia/HANA" +} + +variable "hana_platform_folder" { + description = "Path to the hana platform media, relative to the 'hana_inst_master' mounting point" + type = string + default = "" +} + +variable "hana_sapcar_exe" { + description = "Path to the sapcar executable, relative to the 'hana_inst_master' mounting point" + type = string + default = "" +} + +variable "hdbserver_sar" { + description = "Path to the HANA database server installation sar archive, relative to the 'hana_inst_master' mounting point" + type = string + default = "" +} + +variable "hana_extract_dir" { + description = "Absolute path to folder where SAP HANA sar archive will be extracted" + type = string + default = "/sapmedia/HANA" +} + +variable "hana_data_disk_type" { + description = "Disk type of the disks used to store hana database content" + type = string + default = "gp2" +} + +variable "hana_disk_device" { + description = "Device where hana is installed" + type = string +} + +variable "hana_fstype" { + description = "Filesystem type used by the disk where hana is installed" + type = string + default = "xfs" +} + +variable "hana_cluster_vip" { + description = "IP address used to configure the hana cluster floating IP. It must be in other subnet than the machines!" + type = string + default = "" +} + +variable "scenario_type" { + description = "Deployed scenario type. Available options: performance-optimized, cost-optimized" + default = "performance-optimized" +} + +# DRBD related variables + +variable "drbd_enabled" { + description = "Enable the DRBD cluster for nfs" + type = bool + default = false +} + +variable "drbd_os_image" { + description = "sles4sap AMI image identifier or a pattern used to find the image name (e.g. suse-sles-sap-15-sp1-byos)" + type = string + default = "suse-sles-sap-15-sp1-byos" +} + +variable "drbd_os_owner" { + description = "OS image owner" + type = string + default = "amazon" +} + +variable "drbd_instancetype" { + description = "The instance type of the drbd node" + type = string + default = "" +} + +variable "drbd_cluster_vip" { + description = "IP address used to configure the drbd cluster floating IP" + type = string + default = "" +} + +variable "drbd_ips" { + description = "ip addresses to set to the drbd cluster nodes. If it's not set the addresses will be auto generated from the provided vnet address range" + type = list(string) + default = [] +} + +variable "drbd_subnet_address_range" { + description = "List of address ranges to create the subnets for the drbd machines. If not given the addresses will be generated based on vpc_address_range" + type = list(string) + default = [] +} + +variable "drbd_data_disk_size" { + description = "Disk size of the disks used to store drbd content" + type = string + default = "15" +} + +variable "drbd_data_disk_type" { + description = "Disk type of the disks used to store drbd content" + type = string + default = "gp2" +} + +# Iscsi server related variables + +variable "iscsi_os_image" { + description = "sles4sap AMI image identifier or a pattern used to find the image name (e.g. suse-sles-sap-15-sp1-byos)" + type = string + default = "suse-sles-sap-15-sp1-byos" +} + +variable "iscsi_os_owner" { + description = "OS image owner" + type = string + default = "amazon" +} + +variable "iscsi_instancetype" { + description = "The instance type of the iscsi server node." + type = string + default = "" +} + +variable "iscsidev" { + description = "Disk device where iscsi partitions are created" + type = string +} + +variable "iscsi_srv_ip" { + description = "iscsi server address. It should be in same iprange as host_ips" + type = string + default = "" +} + +variable "iscsi_disks" { + description = "Number of partitions attach to iscsi server. 0 means `all`." + default = 0 +} + +# Monitoring related variables + +variable "monitoring_os_image" { + description = "sles4sap AMI image identifier or a pattern used to find the image name (e.g. suse-sles-sap-15-sp1-byos)" + type = string + default = "suse-sles-sap-15-sp1-byos" +} + +variable "monitoring_os_owner" { + description = "OS image owner" + type = string + default = "amazon" +} + +variable "monitor_instancetype" { + description = "The instance type of the monitoring node." + type = string + default = "" +} + +variable "monitoring_srv_ip" { + description = "monitoring server address. Must be in 10.0.0.0/24 subnet" + type = string + default = "" +} + +variable "monitoring_enabled" { + description = "enable the host to be monitored by exporters, e.g node_exporter" + type = bool default = false } -# Netweaver variables +# Netweaver related variables variable "netweaver_enabled" { - description = "enable SAP Netweaver cluster deployment" + description = "Enable SAP Netweaver cluster deployment" + type = bool default = false } +variable "netweaver_os_image" { + description = "sles4sap AMI image identifier or a pattern used to find the image name (e.g. suse-sles-sap-15-sp1-byos)" + type = string + default = "suse-sles-sap-15-sp1-byos" +} + +variable "netweaver_os_owner" { + description = "OS image owner" + type = string + default = "amazon" +} + variable "netweaver_instancetype" { - description = "VM size for the Netweaver machines. Default to r3.8xlarge" + description = "Instance type for the Netweaver machines. Default to r3.8xlarge" type = string default = "r3.8xlarge" } @@ -227,6 +417,12 @@ variable "netweaver_efs_performance_mode" { default = "generalPurpose" } +variable "netweaver_subnet_address_range" { + description = "List of address ranges to create the subnets for the netweaver machines. If not given the addresses will be generated based on vpc_address_range" + type = list(string) + default = [] +} + variable "netweaver_ips" { description = "ip addresses to set to the netweaver cluster nodes" type = list(string) @@ -234,15 +430,57 @@ variable "netweaver_ips" { } variable "netweaver_virtual_ips" { - description = "virtual ip addresses to set to the netweaver cluster nodes" + description = "Virtual ip addresses to set to the netweaver cluster nodes" type = list(string) default = [] } +variable "netweaver_product_id" { + description = "Netweaver installation product. Even though the module is about Netweaver, it can be used to install other SAP instances like S4/HANA" + type = string + default = "NW750.HDB.ABAPHA" +} + +variable "netweaver_swpm_folder" { + description = "Netweaver software SWPM folder, path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_sapcar_exe" { + description = "Path to sapcar executable, relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_swpm_sar" { + description = "SWPM installer sar archive containing the installer, path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_swpm_extract_dir" { + description = "Extraction path for Netweaver software SWPM folder, if SWPM sar file is provided" + type = string + default = "/sapmedia/NW/SWPM" +} + +variable "netweaver_sapexe_folder" { + description = "Software folder where needed sapexe `SAR` executables are stored (sapexe, sapexedb, saphostagent), path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_additional_dvds" { + description = "Software folder with additional SAP software needed to install netweaver (NW export folder and HANA HDB client for example), path relative from the `netweaver_inst_media` mounted point" + type = list + default = [] +} + # Specific QA variables variable "qa_mode" { - description = "define qa mode (Disable extra packages outside images)" + description = "Enable test/qa mode (disable extra packages usage not coming in the image)" type = bool default = false } @@ -252,3 +490,11 @@ variable "hwcct" { type = bool default = false } + +# Pre deployment + +variable "pre_deployment" { + description = "Enable pre deployment local execution. Only available for clients running Linux" + type = bool + default = false +} diff --git a/azure/README.md b/azure/README.md index d210e1015..a5ef20938 100644 --- a/azure/README.md +++ b/azure/README.md @@ -85,97 +85,39 @@ By default, this configuration will create 3 virtual machines in Azure: one for Once the infrastructure is created by Terraform, the servers are provisioned with Salt. -## Provisioning by Salt -By default, the cluster and HANA installation is done using Salt Formulas in foreground. -To customize this provisioning, you have to create the pillar files (cluster.sls and hana.sls) according to the examples in the [pillar_examples](../pillar_examples) folder (more information in the dedicated [README](../pillar_examples/README.md)) +# Specifications -## Specification +In order to deploy the environment, different configurations are available through the terraform variables. These variables can be configured using a `terraform.tfvars` file. An example is available in [terraform.tfvars.example](./terraform.tvars.example). To find all the available variables check the [variables.tf](./variables.tf) file. -These are the relevant files and what each provides: +## QA deployment -- [provider.tf](provider.tf): definition of the providers being used in the terraform configuration. Mainly azurerm and template. +The project has been created in order to provide the option to run the deployment in a `Test` or `QA` mode. This mode only enables the packages coming properly from SLE channels, so no other packages will be used. Find more information [here](../doc/qa.md). -- [variables.tf](variables.tf): definition of variables used in the configuration. These include definition of the number and type of instances, Azure region, etc. +## Pillar files configuration -- [keys.tf](keys.tf): definition of variables with information of key to include in the instances to allow connection via SSH. Edit this to add your own SSH key. +Besides the `terraform.tfvars` file usage to configure the deployment, a more advanced configuration is available through pillar files customization. Find more information [here](../pillar_examples/README.md). -- [resources.tf](resources.tf): definition of the resource group and storage account to use. +## Use already existing network resources -- [image.tf](image.tf): definition of the custom images to use for the virtual machines. The image resources will be only created if the **sles4sap_uri** or **iscsi_srv_uri** are set in the -**terraform.tfvars** file. Otherwise, a public image will be used. +The usage of already existing network resources (virtual network and subnets) can be done configuring +the `terraform.tfvars` file and adjusting some variables. The example of how to use them is available +at [terraform.tfvars.example](terraform.tfvars.example). -- [network.tf](network.tf): definition of network resources (virtual network, subnet, NICs, public IPs and network security group) used by the infrastructure. +## Autogenerated network addresses -- [instances.tf](instances.tf): definition of the virtual machines to create on deployment. +The assignment of the addresses of the nodes in the network can be automatically done in order to avoid +this configuration. For that, basically, remove or comment all the variables related to the ip addresses (more information in [variables.tf](variables.tf)). With this approach all the addresses will be retrieved based in the provided virtual network addresses range (`vnet_address_range`). -- [salt_provisioner.tf](salt_provisioner.tf): salt provisioning resources. +Autogenerated addresses example based in 10.74.0.0/24 -- [salt_provisioner_script.tpl](../salt/salt_provisioner_script.tpl): template code for the initialization script for the servers. This will add the salt-minion if needed and execute the SALT deployment. - -- [outputs.tf](outputs.tf): definition of outputs of the terraform configuration. - -- [terraform.tfvars.example](terraform.tfvars.example): file containing initialization values for variables used throughout the configuration. **Rename/Duplicate this file to terraform.tfvars and edit the content with your values before use**. - -### Variables - -**Important:** The image used for the iSCSI server **must be at least SLES 15 version** since the iSCSI salt formula is not compatible with lower versions. - -In the file [terraform.tfvars.example](terraform.tfvars.example) there are a number of variables that control what is deployed. Some of these variables are: - -* **sles4sap_uri**: path to a custom sles4sap image to install in the cluster nodes. -* **iscsi_srv_uri**: path to a custom image to install the iscsi server. -* **sles4sap_public**: map with the required information to install a public sles4sap image in the cluster nodes. This data is only used if `sles4sap_uri` is not set. -* **iscsi_srv_public**: map with the required information to install a public sles4sap image in the support server. This data is only used if `iscsi_srv_uri` is not set. -* **admin_user**: name of the administration user to deploy in all virtual machines. -* **private_key_location**: path to the local file containing the private SSH key to configure in the virtual machines to allow access. -* **public_key_location**: path to the local file containing the public SSH key to configure in the virtual machines to allow access. This public key is configured in the file `$HOME/.ssh/authorized_keys` of the administration user in the remote virtual machines. -* **storage_account_name**: Azure storage account name. -* **storage_account_key**: Azure storage account secret key (key1 or key2). -* **hana_inst_master**: path to the storage account where SAP HANA installation files are stored. -* **hana_fstype**: filesystem type used for HANA installation (xfs by default). -* **hana_vm_size**: SKU to use for the cluster nodes; basically the "size" (number of vCPUS and memory) of the VM. -* **hana_data_disk_type**: disk type to use for HANA (Standard_LRS by default). -* **hana_data_disk_caching**: caching mode for HANA disk, could be None, ReadOnly or ReadWrite (ReadWrite by default). -* **hana_count**: number of cluster nodes to deploy. 2 by default. -* **hana_instance_number**: Instance number for SAP HANA. 00 by default. -* **az_region**: Azure region where to deploy the configuration. -* **init_type**: initialization script parameter that controls what is deployed in the cluster nodes. Valid values are `all` (installs Hana and configures cluster), `skip-hana` (does not install Hana, but configures cluster) and `skip-cluster` (installs hana, but does not configure cluster). Defaults to `all`. -* **iscsidev**: device used by the iSCSI server to provide LUNs. -* **iscsi_disks**: attached partitions number for iscsi server. -* **cluster_ssh_pub**: SSH public key name (must match with the key copied in sshkeys folder) -* **cluster_ssh_key**: SSH private key name (must match with the key copied in sshkeys folder) -* **ha_sap_deployment_repo**: Repository with HA and Salt formula packages. The latest RPM packages can be found at [https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/{YOUR OS VERSION}](https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/) -* **scenario_type**: SAP HANA scenario type. Available options: `performance-optimized` and `cost-optimized`. -* **provisioner**: select the desired provisioner to configure the nodes. Salt is used by default: [salt](../salt). Let it empty to disable the provisioning part. -* **background**: run the provisioning process in background finishing terraform execution. -* **reg_code**: registration code for the installed base product (Ex.: SLES for SAP). This parameter is optional. If informed, the system will be registered against the SUSE Customer Center. -* **reg_email**: email to be associated with the system registration. This parameter is optional. -* **reg_additional_modules**: additional optional modules and extensions to be registered (Ex.: Containers Module, HA module, Live Patching, etc). The variable is a key-value map, where the key is the _module name_ and the value is the _registration code_. If the _registration code_ is not needed, set an empty string as value. The module format must follow SUSEConnect convention: - - `//` - - *Example:* Suggested modules for SLES for SAP 15 - - sle-module-basesystem/15/x86_64 - sle-module-desktop-applications/15/x86_64 - sle-module-server-applications/15/x86_64 - sle-ha/15/x86_64 (use the same regcode as SLES for SAP) - sle-module-sap-applications/15/x86_64 - - For more information about registration, check the ["Registering SUSE Linux Enterprise and Managing Modules/Extensions"](https://www.suse.com/documentation/sles-15/book_sle_deployment/data/cha_register_sle.html) guide. - - * **additional_packages**: Additional packages to add to the guest machines. - * **hosts_ips**: Each cluster nodes IP address (sequential order). Mandatory to have a generic `/etc/hosts` file. - -[Specific QA variables](../doc/qa.md#specific-qa-variables) - -### The pillar files hana.sls and cluster.sls - -Find more information about the hana and cluster formulas in (check the pillar.example files): - -- [https://github.com/SUSE/saphanabootstrap-formula](https://github.com/SUSE/saphanabootstrap-formula) -- [https://github.com/SUSE/habootstrap-formula](https://github.com/SUSE/habootstrap-formula) - -As a good example, you could find some pillar examples into the folder [pillar_examples](../pillar_examples) -These files **aren't ready for deployment**, be careful to customize them or create your own files. +Iscsi server: 10.74.0.4 +Monitoring: 10.74.0.5 +Hana ips: 10.74.0.10, 10.74.0.11 +Hana cluster vip: 10.74.0.12 +DRBD ips: 10.74.0.20, 10.74.0.21 +DRBD cluster vip: 10.74.0.22 +Netweaver ips: 10.74.0.30, 10.74.0.31, 10.74.0.32, 10.74.0.33 +Netweaver virtual ips: 10.74.0.34, 10.74.0.35, 10.74.0.36, 10.74.0.37 # Advanced usage @@ -220,7 +162,7 @@ Once all four required parameters are known, there are several ways to configure * In provider definition -Add the values for subscription id, tenant id, client id and client secret in the file [provider.tf](provider.tf). +Add the values for subscription id, tenant id, client id and client secret in the file [main.tf](main.tf). * Via Environment Variables @@ -237,7 +179,7 @@ export ARM_ACCESS_KEY=access_key ## How to upload a custom image -In the terraform configuration we are using a custom images (defined in the file [image.tf](image.tf)) referenced as `azurerm_image.iscsi_srv.*.id` and `azurerm_image.sles4sap.*.id` in the file [instances.tf](instances.tf) (in the `storage_image_reference` block). +In the terraform configuration we are using a custom images which are defined in the main.tf files of terraform modules (under the `storage_image_reference` block) and referenced as `azurerm_image.iscsi_srv.*.id` and `azurerm_image.sles4sap.*.id`. This custom images need to be already uploaded to Azure before attempting to use it with terraform, as terraform does not have a mechanism to upload images as of yet. @@ -361,7 +303,7 @@ Once the image is successfully uploaded, get its URL/URI with the command: az storage blob url --name SLES12-SP4-SAP-Azure-BYOS.x86_64-0.9.0-Build2.1.vhd --container-name MyStorageContainer --account-name MyStorageAccount ``` -This URI will be used in the terraform configuration, specifically in the [image.tf](image.tf) file or via the command line, so keep it on hand. +This URI will be used in the terraform configuration, specifically in the main.tf file of corresponding terraform module or via the command line, so keep it on hand. ### Remove resources diff --git a/azure/image.tf b/azure/image.tf deleted file mode 100644 index 4dedce9c3..000000000 --- a/azure/image.tf +++ /dev/null @@ -1,149 +0,0 @@ -# This configuration defines the custom image to use - -# Variable for the image URI. Run as terraform apply -var sles4sap_uri https://blob.azure.microsoft.com/this/is/my/image.vhd -# If custom uris are enabled public information will be omitted -# One of the two options must be used - -variable "sles4sap_uri" { - type = string - default = "" -} - -variable "hana_public_publisher" { - type = string - default = "SUSE" -} - -variable "hana_public_offer" { - type = string - default = "SLES-SAP-BYOS" -} - -variable "hana_public_sku" { - type = string - default = "12-sp4" -} - -variable "hana_public_version" { - type = string - default = "latest" -} - -variable "iscsi_public_publisher" { - type = string - default = "SUSE" -} - -variable "iscsi_public_offer" { - type = string - default = "SLES-SAP-BYOS" -} - -variable "iscsi_public_sku" { - type = string - default = "15" -} - -variable "iscsi_public_version" { - type = string - default = "latest" -} - -variable "iscsi_srv_uri" { - type = string - default = "" -} - -variable "monitoring_public_publisher" { - type = string - default = "SUSE" -} - -variable "monitoring_public_offer" { - type = string - default = "SLES-SAP-BYOS" -} - -variable "monitoring_public_sku" { - type = string - default = "15" -} - -variable "monitoring_public_version" { - type = string - default = "latest" -} - -variable "monitoring_uri" { - type = string - default = "" -} - -variable "drbd_public_publisher" { - type = string - default = "SUSE" -} - -variable "drbd_public_offer" { - type = string - default = "SLES-SAP-BYOS" -} - -variable "drbd_public_sku" { - type = string - default = "15" -} - -variable "drbd_public_version" { - type = string - default = "latest" -} - -variable "drbd_image_uri" { - type = string - default = "" -} - -variable "netweaver_public_publisher" { - type = string - default = "SUSE" -} - -variable "netweaver_public_offer" { - type = string - default = "SLES-SAP-BYOS" -} - -variable "netweaver_public_sku" { - type = string - default = "15" -} - -variable "netweaver_public_version" { - type = string - default = "latest" -} - -variable "netweaver_image_uri" { - type = string - default = "" -} - -resource "azurerm_image" "iscsi_srv" { - count = var.iscsi_srv_uri != "" ? 1 : 0 - name = "IscsiSrvImg" - location = var.az_region - resource_group_name = azurerm_resource_group.myrg.name - - os_disk { - os_type = "Linux" - os_state = "Generalized" - blob_uri = var.iscsi_srv_uri - size_gb = "32" - } - - tags = { - workspace = terraform.workspace - } -} - diff --git a/azure/network.tf b/azure/infrastructure.tf similarity index 57% rename from azure/network.tf rename to azure/infrastructure.tf index e5f38dd68..01ad33dde 100644 --- a/azure/network.tf +++ b/azure/infrastructure.tf @@ -1,13 +1,65 @@ -# Launch SLES-HAE of SLES4SAP cluster nodes +# Configure the Azure Provider +provider "azurerm" { + version = "~> 1.44" +} + +terraform { + required_version = ">= 0.12" +} + +data "azurerm_subscription" "current" { +} + +data "azurerm_virtual_network" "mynet" { + count = var.vnet_name != "" && var.vnet_address_range == "" ? 1 : 0 + name = var.vnet_name + resource_group_name = local.resource_group_name +} + +data "azurerm_subnet" "mysubnet" { + count = var.subnet_name != "" && var.subnet_address_range == "" ? 1 : 0 + name = var.subnet_name + virtual_network_name = local.vnet_name + resource_group_name = local.resource_group_name +} + +locals { + resource_group_name = var.resource_group_name == "" ? azurerm_resource_group.myrg.0.name : var.resource_group_name + vnet_name = var.vnet_name == "" ? azurerm_virtual_network.mynet.0.name : var.vnet_name + subnet_id = var.subnet_name == "" ? azurerm_subnet.mysubnet.0.id : format( + "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Network/virtualNetworks/%s/subnets/%s", data.azurerm_subscription.current.subscription_id, var.resource_group_name, var.vnet_name, var.subnet_name) + # If vnet_name is not defined, a new vnet is created + # If vnet_name is defined, and the vnet_address_range is empty, it will try to get the ip range from the real vnet using the data source. If vnet_address_range is defined it will use it + vnet_address_range = var.vnet_name == "" ? var.vnet_address_range : (var.vnet_address_range == "" ? data.azurerm_virtual_network.mynet.0.address_space.0 : var.vnet_address_range) + subnet_address_range = var.subnet_name == "" ? (var.subnet_address_range == "" ? cidrsubnet(local.vnet_address_range, 8, 1) : var.subnet_address_range) : (var.subnet_address_range == "" ? data.azurerm_subnet.mysubnet.0.address_prefix : var.subnet_address_range) +} + +# Azure resource group and storage account resources +resource "azurerm_resource_group" "myrg" { + count = var.resource_group_name == "" ? 1 : 0 + name = "rg-ha-sap-${terraform.workspace}" + location = var.az_region +} -# Private IP addresses for the cluster nodes +resource "azurerm_storage_account" "mytfstorageacc" { + name = "stdiag${lower(terraform.workspace)}" + resource_group_name = local.resource_group_name + location = var.az_region + account_replication_type = "LRS" + account_tier = "Standard" + + tags = { + workspace = terraform.workspace + } +} # Network resources: Virtual Network, Subnet resource "azurerm_virtual_network" "mynet" { + count = var.vnet_name == "" ? 1 : 0 name = "vnet-${lower(terraform.workspace)}" - address_space = ["10.74.0.0/16"] + address_space = [local.vnet_address_range] location = var.az_region - resource_group_name = azurerm_resource_group.myrg.name + resource_group_name = local.resource_group_name tags = { workspace = terraform.workspace @@ -15,19 +67,20 @@ resource "azurerm_virtual_network" "mynet" { } resource "azurerm_subnet" "mysubnet" { - name = "snet-default" - resource_group_name = azurerm_resource_group.myrg.name - virtual_network_name = azurerm_virtual_network.mynet.name - address_prefix = "10.74.1.0/24" + count = var.subnet_name == "" ? 1 : 0 + name = "snet-${lower(terraform.workspace)}" + resource_group_name = local.resource_group_name + virtual_network_name = local.vnet_name + address_prefix = local.subnet_address_range } resource "azurerm_subnet_network_security_group_association" "mysubnet" { - subnet_id = azurerm_subnet.mysubnet.id + subnet_id = local.subnet_id network_security_group_id = azurerm_network_security_group.mysecgroup.id } resource "azurerm_subnet_route_table_association" "mysubnet" { - subnet_id = azurerm_subnet.mysubnet.id + subnet_id = local.subnet_id route_table_id = azurerm_route_table.myroutes.id } @@ -36,11 +89,11 @@ resource "azurerm_subnet_route_table_association" "mysubnet" { resource "azurerm_route_table" "myroutes" { name = "route-${lower(terraform.workspace)}" location = var.az_region - resource_group_name = azurerm_resource_group.myrg.name + resource_group_name = local.resource_group_name route { name = "default" - address_prefix = "10.74.0.0/16" + address_prefix = local.vnet_address_range next_hop_type = "vnetlocal" } @@ -54,7 +107,7 @@ resource "azurerm_route_table" "myroutes" { resource "azurerm_network_security_group" "mysecgroup" { name = "nsg-${lower(terraform.workspace)}" location = var.az_region - resource_group_name = azurerm_resource_group.myrg.name + resource_group_name = local.resource_group_name security_rule { name = "OUTALL" priority = 100 @@ -75,7 +128,7 @@ resource "azurerm_network_security_group" "mysecgroup" { protocol = "*" source_port_range = "*" destination_port_range = "*" - source_address_prefix = "10.74.0.0/16" + source_address_prefix = local.vnet_address_range destination_address_prefix = "*" } @@ -150,7 +203,7 @@ resource "azurerm_network_security_group" "mysecgroup" { destination_address_prefix = "*" } security_rule { - name = "ha-exporter" + name = "haExporter" priority = 1007 direction = "Inbound" access = "Allow" @@ -173,6 +226,17 @@ resource "azurerm_network_security_group" "mysecgroup" { destination_address_prefix = "*" } + security_rule { + name = "grafana" + priority = 1009 + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "3000" + source_address_prefix = "*" + destination_address_prefix = "*" + } tags = { workspace = terraform.workspace diff --git a/azure/keys.tf b/azure/keys.tf deleted file mode 100644 index 323add917..000000000 --- a/azure/keys.tf +++ /dev/null @@ -1,20 +0,0 @@ -variable "admin_user" { - type = string -} - -variable "public_key_location" { - type = string -} - -variable "private_key_location" { - type = string -} - -variable "storage_account_name" { - type = string -} - -variable "storage_account_key" { - type = string -} - diff --git a/azure/main.tf b/azure/main.tf index 84a5c4b11..559a75206 100644 --- a/azure/main.tf +++ b/azure/main.tf @@ -1,3 +1,30 @@ +module "local_execution" { + source = "../generic_modules/local_exec" + enabled = var.pre_deployment +} + +# This locals entry is used to store the IP addresses of all the machines. +# Autogenerated addresses example based in 10.74.0.0/24 +# Iscsi server: 10.74.0.4 +# Monitoring: 10.74.0.5 +# Hana ips: 10.74.0.10, 10.74.0.11 +# Hana cluster vip: 10.74.0.12 +# DRBD ips: 10.74.0.20, 10.74.0.21 +# DRBD cluster vip: 10.74.0.22 +# Netweaver ips: 10.74.0.30, 10.74.0.31, 10.74.0.32, 10.74.0.33 +# Netweaver virtual ips: 10.74.0.34, 10.74.0.35, 10.74.0.36, 10.74.0.37 +# If the addresses are provided by the user will always have preference +locals { + iscsi_ip = var.iscsi_srv_ip != "" ? var.iscsi_srv_ip : cidrhost(local.subnet_address_range, 4) + monitoring_ip = var.monitoring_srv_ip != "" ? var.monitoring_srv_ip : cidrhost(local.subnet_address_range, 5) + hana_ips = length(var.hana_ips) != 0 ? var.hana_ips : [for ip_index in range(10, 12) : cidrhost(local.subnet_address_range, ip_index)] + hana_cluster_vip = var.hana_cluster_vip != "" ? var.hana_cluster_vip : cidrhost(local.subnet_address_range, 12) + drbd_ips = length(var.drbd_ips) != 0 ? var.drbd_ips : [for ip_index in range(20, 22) : cidrhost(local.subnet_address_range, ip_index)] + drbd_cluster_vip = var.drbd_cluster_vip != "" ? var.drbd_cluster_vip : cidrhost(local.subnet_address_range, 22) + netweaver_ips = length(var.netweaver_ips) != 0 ? var.netweaver_ips : [for ip_index in range(30, 34) : cidrhost(local.subnet_address_range, ip_index)] + netweaver_virtual_ips = length(var.netweaver_virtual_ips) != 0 ? var.netweaver_virtual_ips : [for ip_index in range(34, 38) : cidrhost(local.subnet_address_range, ip_index)] +} + module "drbd_node" { source = "./modules/drbd_node" az_region = var.az_region @@ -8,8 +35,8 @@ module "drbd_node" { drbd_public_offer = var.drbd_public_offer drbd_public_sku = var.drbd_public_sku drbd_public_version = var.drbd_public_version - resource_group_name = azurerm_resource_group.myrg.name - network_subnet_id = azurerm_subnet.mysubnet.id + resource_group_name = local.resource_group_name + network_subnet_id = local.subnet_id sec_group_id = azurerm_network_security_group.mysecgroup.id storage_account = azurerm_storage_account.mytfstorageacc.primary_blob_endpoint public_key_location = var.public_key_location @@ -17,8 +44,8 @@ module "drbd_node" { cluster_ssh_pub = var.cluster_ssh_pub cluster_ssh_key = var.cluster_ssh_key admin_user = var.admin_user - host_ips = var.drbd_ips - iscsi_srv_ip = var.iscsi_srv_ip + host_ips = local.drbd_ips + iscsi_srv_ip = module.iscsi_server.iscsisrv_ip.0 reg_code = var.reg_code reg_email = var.reg_email reg_additional_modules = var.reg_additional_modules @@ -27,6 +54,7 @@ module "drbd_node" { provisioner = var.provisioner background = var.background monitoring_enabled = var.monitoring_enabled + drbd_cluster_vip = local.drbd_cluster_vip } module "netweaver_node" { @@ -42,8 +70,8 @@ module "netweaver_node" { netweaver_public_offer = var.netweaver_public_offer netweaver_public_sku = var.netweaver_public_sku netweaver_public_version = var.netweaver_public_version - resource_group_name = azurerm_resource_group.myrg.name - network_subnet_id = azurerm_subnet.mysubnet.id + resource_group_name = local.resource_group_name + network_subnet_id = local.subnet_id sec_group_id = azurerm_network_security_group.mysecgroup.id storage_account = azurerm_storage_account.mytfstorageacc.primary_blob_endpoint public_key_location = var.public_key_location @@ -51,14 +79,22 @@ module "netweaver_node" { cluster_ssh_pub = var.cluster_ssh_pub cluster_ssh_key = var.cluster_ssh_key admin_user = var.admin_user - netweaver_nfs_share = "10.74.1.201:/HA1" # drbd cluster ip address is hardcoded by now + netweaver_product_id = var.netweaver_product_id + netweaver_swpm_folder = var.netweaver_swpm_folder + netweaver_sapcar_exe = var.netweaver_sapcar_exe + netweaver_swpm_sar = var.netweaver_swpm_sar + netweaver_swpm_extract_dir = var.netweaver_swpm_extract_dir + netweaver_sapexe_folder = var.netweaver_sapexe_folder + netweaver_additional_dvds = var.netweaver_additional_dvds + netweaver_nfs_share = "${local.drbd_cluster_vip}:/HA1" storage_account_name = var.netweaver_storage_account_name storage_account_key = var.netweaver_storage_account_key storage_account_path = var.netweaver_storage_account enable_accelerated_networking = var.netweaver_enable_accelerated_networking - host_ips = var.netweaver_ips - virtual_host_ips = var.netweaver_virtual_ips - iscsi_srv_ip = var.iscsi_srv_ip + host_ips = local.netweaver_ips + virtual_host_ips = local.netweaver_virtual_ips + iscsi_srv_ip = module.iscsi_server.iscsisrv_ip.0 + hana_ip = local.hana_cluster_vip reg_code = var.reg_code reg_email = var.reg_email reg_additional_modules = var.reg_additional_modules @@ -75,10 +111,10 @@ module "hana_node" { hana_count = var.hana_count hana_instance_number = var.hana_instance_number vm_size = var.hana_vm_size - host_ips = var.host_ips + host_ips = local.hana_ips scenario_type = var.scenario_type - resource_group_name = azurerm_resource_group.myrg.name - network_subnet_id = azurerm_subnet.mysubnet.id + resource_group_name = local.resource_group_name + network_subnet_id = local.subnet_id sec_group_id = azurerm_network_security_group.mysecgroup.id storage_account = azurerm_storage_account.mytfstorageacc.primary_blob_endpoint storage_account_name = var.storage_account_name @@ -86,8 +122,13 @@ module "hana_node" { enable_accelerated_networking = var.hana_enable_accelerated_networking sles4sap_uri = var.sles4sap_uri init_type = var.init_type + hana_cluster_vip = local.hana_cluster_vip hana_inst_master = var.hana_inst_master hana_inst_folder = var.hana_inst_folder + hana_platform_folder = var.hana_platform_folder + hana_sapcar_exe = var.hana_sapcar_exe + hdbserver_sar = var.hdbserver_sar + hana_extract_dir = var.hana_extract_dir hana_disk_device = var.hana_disk_device hana_fstype = var.hana_fstype cluster_ssh_pub = var.cluster_ssh_pub @@ -102,7 +143,7 @@ module "hana_node" { hana_public_sku = var.hana_public_sku hana_public_version = var.hana_public_version admin_user = var.admin_user - iscsi_srv_ip = var.iscsi_srv_ip + iscsi_srv_ip = module.iscsi_server.iscsisrv_ip.0 reg_code = var.reg_code reg_email = var.reg_email reg_additional_modules = var.reg_additional_modules @@ -120,8 +161,8 @@ module "monitoring" { source = "./modules/monitoring" az_region = var.az_region vm_size = var.monitoring_vm_size - resource_group_name = azurerm_resource_group.myrg.name - network_subnet_id = azurerm_subnet.mysubnet.id + resource_group_name = local.resource_group_name + network_subnet_id = local.subnet_id sec_group_id = azurerm_network_security_group.mysecgroup.id storage_account = azurerm_storage_account.mytfstorageacc.primary_blob_endpoint monitoring_uri = var.monitoring_uri @@ -129,11 +170,10 @@ module "monitoring" { monitoring_public_offer = var.monitoring_public_offer monitoring_public_sku = var.monitoring_public_sku monitoring_public_version = var.monitoring_public_version - monitoring_srv_ip = var.monitoring_srv_ip + monitoring_srv_ip = local.monitoring_ip public_key_location = var.public_key_location private_key_location = var.private_key_location admin_user = var.admin_user - host_ips = var.host_ips reg_code = var.reg_code reg_email = var.reg_email reg_additional_modules = var.reg_additional_modules @@ -142,18 +182,17 @@ module "monitoring" { provisioner = var.provisioner background = var.background monitoring_enabled = var.monitoring_enabled - drbd_enabled = var.drbd_enabled - drbd_ips = var.drbd_ips - netweaver_enabled = var.netweaver_enabled - netweaver_ips = var.netweaver_ips + hana_targets = concat(local.hana_ips, [local.hana_cluster_vip]) # we use the vip to target the active hana instance + drbd_targets = var.drbd_enabled ? local.drbd_ips : [] + netweaver_targets = var.netweaver_enabled ? local.netweaver_virtual_ips : [] } module "iscsi_server" { source = "./modules/iscsi_server" az_region = var.az_region vm_size = var.iscsi_vm_size - resource_group_name = azurerm_resource_group.myrg.name - network_subnet_id = azurerm_subnet.mysubnet.id + resource_group_name = local.resource_group_name + network_subnet_id = local.subnet_id sec_group_id = azurerm_network_security_group.mysecgroup.id storage_account = azurerm_storage_account.mytfstorageacc.primary_blob_endpoint iscsi_srv_uri = var.iscsi_srv_uri @@ -166,7 +205,7 @@ module "iscsi_server" { iscsidev = var.iscsidev iscsi_disks = var.iscsi_disks admin_user = var.admin_user - iscsi_srv_ip = var.iscsi_srv_ip + iscsi_srv_ip = local.iscsi_ip reg_code = var.reg_code reg_email = var.reg_email reg_additional_modules = var.reg_additional_modules diff --git a/azure/modules/drbd_node/main.tf b/azure/modules/drbd_node/main.tf index 8e61b7c80..e311669b7 100644 --- a/azure/modules/drbd_node/main.tf +++ b/azure/modules/drbd_node/main.tf @@ -27,7 +27,7 @@ resource "azurerm_lb" "drbd-load-balancer" { name = "lbfe-drbd" subnet_id = var.network_subnet_id private_ip_address_allocation = "static" - private_ip_address = "10.74.1.201" + private_ip_address = var.drbd_cluster_vip } tags = { @@ -164,16 +164,18 @@ resource "azurerm_image" "drbd-image" { # drbd instances resource "azurerm_virtual_machine" "drbd" { - count = var.drbd_count - name = "vm${var.name}${var.drbd_count > 1 ? "0${count.index + 1}" : ""}" - location = var.az_region - resource_group_name = var.resource_group_name - network_interface_ids = [element(azurerm_network_interface.drbd.*.id, count.index)] - availability_set_id = azurerm_availability_set.drbd-availability-set[0].id - vm_size = var.vm_size + count = var.drbd_count + name = "vm${var.name}0${count.index + 1}" + location = var.az_region + resource_group_name = var.resource_group_name + network_interface_ids = [element(azurerm_network_interface.drbd.*.id, count.index)] + availability_set_id = azurerm_availability_set.drbd-availability-set[0].id + vm_size = var.vm_size + delete_os_disk_on_termination = true + delete_data_disks_on_termination = true storage_os_disk { - name = "disk-${var.name}${var.drbd_count > 1 ? "0${count.index + 1}" : ""}-Os" + name = "disk-${var.name}0${count.index + 1}-Os" caching = "ReadWrite" create_option = "FromImage" managed_disk_type = "Premium_LRS" @@ -188,7 +190,7 @@ resource "azurerm_virtual_machine" "drbd" { } storage_data_disk { - name = "disk-${var.name}${var.drbd_count > 1 ? "0${count.index + 1}" : ""}-Data01" + name = "disk-${var.name}0${count.index + 1}-Data01" caching = "ReadWrite" create_option = "Empty" disk_size_gb = "10" @@ -197,7 +199,7 @@ resource "azurerm_virtual_machine" "drbd" { } os_profile { - computer_name = "drbd0${count.index + 1}" + computer_name = "vmdrbd0${count.index + 1}" admin_username = var.admin_user } @@ -219,3 +221,13 @@ resource "azurerm_virtual_machine" "drbd" { workspace = terraform.workspace } } + +module "drbd_on_destroy" { + source = "../../../generic_modules/on_destroy" + node_count = var.drbd_count + instance_ids = azurerm_virtual_machine.drbd.*.id + user = var.admin_user + private_key_location = var.private_key_location + public_ips = data.azurerm_public_ip.drbd.*.ip_address + dependencies = [data.azurerm_public_ip.drbd] +} diff --git a/azure/modules/drbd_node/salt_provisioner.tf b/azure/modules/drbd_node/salt_provisioner.tf index 76f4ea52b..b4852e551 100644 --- a/azure/modules/drbd_node/salt_provisioner.tf +++ b/azure/modules/drbd_node/salt_provisioner.tf @@ -1,11 +1,3 @@ -data "template_file" "salt_provisioner" { - template = file("../salt/salt_provisioner_script.tpl") - - vars = { - regcode = var.reg_code - } -} - resource "null_resource" "drbd_provisioner" { count = var.provisioner == "salt" ? var.drbd_count : 0 @@ -20,22 +12,12 @@ resource "null_resource" "drbd_provisioner" { private_key = file(var.private_key_location) } - provisioner "file" { - source = "../salt" - destination = "/tmp" - } - - provisioner "file" { - content = data.template_file.salt_provisioner.rendered - destination = "/tmp/salt_provisioner.sh" - } - provisioner "file" { content = < 1 ? "0${count.index + 1}" : ""} +hostname: vm${var.name}0${count.index + 1} network_domain: ${var.network_domain} additional_packages: [] reg_code: ${var.reg_code} @@ -47,8 +29,9 @@ host_ip: ${element(var.host_ips, count.index)} cluster_ssh_pub: ${var.cluster_ssh_pub} cluster_ssh_key: ${var.cluster_ssh_key} drbd_disk_device: /dev/sdc +drbd_cluster_vip: ${var.drbd_cluster_vip} shared_storage_type: iscsi -sbd_disk_device: /dev/sde +sbd_disk_index: 3 iscsi_srv_ip: ${var.iscsi_srv_ip} ha_sap_deployment_repo: ${var.ha_sap_deployment_repo} monitoring_enabled: ${var.monitoring_enabled} @@ -58,15 +41,17 @@ partitions: 1: start: 0% end: 100% - EOF destination = "/tmp/grains" } +} - provisioner "remote-exec" { - inline = [ - "${var.background ? "nohup" : ""} sudo sh /tmp/salt_provisioner.sh > /tmp/provisioning.log ${var.background ? "&" : ""}", - "return_code=$? && sleep 1 && exit $return_code", - ] # Workaround to let the process start in background properly - } +module "drbd_provision" { + source = "../../../generic_modules/salt_provisioner" + node_count = var.provisioner == "salt" ? var.drbd_count : 0 + instance_ids = null_resource.drbd_provisioner.*.id + user = var.admin_user + private_key_location = var.private_key_location + public_ips = data.azurerm_public_ip.drbd.*.ip_address + background = var.background } diff --git a/azure/modules/drbd_node/variables.tf b/azure/modules/drbd_node/variables.tf index c38269138..b08dcf6fd 100644 --- a/azure/modules/drbd_node/variables.tf +++ b/azure/modules/drbd_node/variables.tf @@ -122,11 +122,13 @@ variable "ha_sap_deployment_repo" { variable "devel_mode" { description = "Whether or not to install the HA/SAP packages from the `ha_sap_deployment_repo`" + type = bool default = false } variable "qa_mode" { description = "Whether or not to install the HA/SAP packages from the `ha_sap_deployment_repo`" + type = bool default = false } @@ -137,10 +139,17 @@ variable "provisioner" { variable "background" { description = "Run the provisioner execution in background if set to true finishing terraform execution" + type = bool default = false } variable "monitoring_enabled" { description = "enable the host to be monitored by exporters, e.g node_exporter" + type = bool default = false } + +variable "drbd_cluster_vip" { + description = "Virtual ip for the drbd cluster" + type = string +} diff --git a/azure/modules/hana_node/main.tf b/azure/modules/hana_node/main.tf index 23299d7c2..0f3de0a70 100644 --- a/azure/modules/hana_node/main.tf +++ b/azure/modules/hana_node/main.tf @@ -23,7 +23,7 @@ resource "azurerm_lb" "hana-load-balancer" { name = "lbfe-hana" subnet_id = var.network_subnet_id private_ip_address_allocation = "static" - private_ip_address = "10.74.1.200" + private_ip_address = var.hana_cluster_vip } tags = { @@ -161,7 +161,7 @@ resource "azurerm_lb_rule" "lb_3xx17" { resource "azurerm_network_interface" "hana" { count = var.hana_count - name = "nic-${var.name}${var.hana_count > 1 ? "0${count.index + 1}" : ""}" + name = "nic-${var.name}0${count.index + 1}" location = var.az_region resource_group_name = var.resource_group_name network_security_group_id = var.sec_group_id @@ -182,7 +182,7 @@ resource "azurerm_network_interface" "hana" { resource "azurerm_public_ip" "hana" { count = var.hana_count - name = "pip-${var.name}${var.hana_count > 1 ? "0${count.index + 1}" : ""}" + name = "pip-${var.name}0${count.index + 1}" location = var.az_region resource_group_name = var.resource_group_name allocation_method = "Dynamic" @@ -214,16 +214,18 @@ resource "azurerm_image" "sles4sap" { # hana instances resource "azurerm_virtual_machine" "hana" { - count = var.hana_count - name = "vm${var.name}${var.hana_count > 1 ? "0${count.index + 1}" : ""}" - location = var.az_region - resource_group_name = var.resource_group_name - network_interface_ids = [element(azurerm_network_interface.hana.*.id, count.index)] - availability_set_id = azurerm_availability_set.hana-availability-set.id - vm_size = var.vm_size + count = var.hana_count + name = "vm${var.name}0${count.index + 1}" + location = var.az_region + resource_group_name = var.resource_group_name + network_interface_ids = [element(azurerm_network_interface.hana.*.id, count.index)] + availability_set_id = azurerm_availability_set.hana-availability-set.id + vm_size = var.vm_size + delete_os_disk_on_termination = true + delete_data_disks_on_termination = true storage_os_disk { - name = "disk-${var.name}${var.hana_count > 1 ? "0${count.index + 1}" : ""}-Os" + name = "disk-${var.name}0${count.index + 1}-Os" caching = "ReadWrite" create_option = "FromImage" managed_disk_type = "Premium_LRS" @@ -238,7 +240,7 @@ resource "azurerm_virtual_machine" "hana" { } storage_data_disk { - name = "disk-${var.name}${var.hana_count > 1 ? "0${count.index + 1}" : ""}-Data01" + name = "disk-${var.name}0${count.index + 1}-Data01" managed_disk_type = var.hana_data_disk_type create_option = "Empty" lun = 0 @@ -247,7 +249,7 @@ resource "azurerm_virtual_machine" "hana" { } storage_data_disk { - name = "disk-${var.name}${var.hana_count > 1 ? "0${count.index + 1}" : ""}-Data02" + name = "disk-${var.name}0${count.index + 1}-Data02" managed_disk_type = var.hana_data_disk_type create_option = "Empty" lun = 1 @@ -256,7 +258,7 @@ resource "azurerm_virtual_machine" "hana" { } storage_data_disk { - name = "disk-${var.name}${var.hana_count > 1 ? "0${count.index + 1}" : ""}-Data03" + name = "disk-${var.name}0${count.index + 1}-Data03" managed_disk_type = var.hana_data_disk_type create_option = "Empty" lun = 2 @@ -265,7 +267,7 @@ resource "azurerm_virtual_machine" "hana" { } os_profile { - computer_name = "${var.name}${var.hana_count > 1 ? "0${count.index + 1}" : ""}" + computer_name = "vm${var.name}0${count.index + 1}" admin_username = var.admin_user } @@ -286,4 +288,14 @@ resource "azurerm_virtual_machine" "hana" { tags = { workspace = terraform.workspace } -} \ No newline at end of file +} + +module "hana_on_destroy" { + source = "../../../generic_modules/on_destroy" + node_count = var.hana_count + instance_ids = azurerm_virtual_machine.hana.*.id + user = var.admin_user + private_key_location = var.private_key_location + public_ips = data.azurerm_public_ip.hana.*.ip_address + dependencies = [data.azurerm_public_ip.hana] +} diff --git a/azure/modules/hana_node/salt_provisioner.tf b/azure/modules/hana_node/salt_provisioner.tf index 9cf40b6e6..746a597ce 100644 --- a/azure/modules/hana_node/salt_provisioner.tf +++ b/azure/modules/hana_node/salt_provisioner.tf @@ -1,12 +1,3 @@ -# Template file to launch the salt provisioing script -data "template_file" "salt_provisioner" { - template = file("../salt/salt_provisioner_script.tpl") - - vars = { - regcode = var.reg_code - } -} - resource "null_resource" "hana_node_provisioner" { count = var.provisioner == "salt" ? var.hana_count : 0 @@ -25,35 +16,29 @@ resource "null_resource" "hana_node_provisioner" { } provisioner "file" { - source = "../salt" - destination = "/tmp" - } - - provisioner "file" { - content = data.template_file.salt_provisioner.rendered - destination = "/tmp/salt_provisioner.sh" - } - - provisioner "file" { - content = < 1 ? "0${count.index + 1}" : ""} network_domain: "tf.local" shared_storage_type: iscsi -sbd_disk_device: /dev/sdf +sbd_disk_index: 1 hana_inst_master: ${var.hana_inst_master} hana_inst_folder: ${var.hana_inst_folder} +hana_platform_folder: ${var.hana_platform_folder} +hana_sapcar_exe: ${var.hana_sapcar_exe} +hdbserver_sar: ${var.hdbserver_sar} +hana_extract_dir: ${var.hana_extract_dir} hana_disk_device: ${var.hana_disk_device} hana_fstype: ${var.hana_fstype} storage_account_name: ${var.storage_account_name} storage_account_key: ${var.storage_account_key} iscsi_srv_ip: ${var.iscsi_srv_ip} -azure_lb_ip: ${azurerm_lb.hana-load-balancer.private_ip_address} +hana_cluster_vip: ${azurerm_lb.hana-load-balancer.private_ip_address} init_type: ${var.init_type} cluster_ssh_pub: ${var.cluster_ssh_pub} cluster_ssh_key: ${var.cluster_ssh_key} @@ -66,14 +51,16 @@ reg_additional_modules: {${join(", ", formatlist("'%s': '%s'", keys(var.reg_addi additional_packages: [${join(", ", formatlist("'%s'", var.additional_packages))}] ha_sap_deployment_repo: ${var.ha_sap_deployment_repo} EOF - destination = "/tmp/grains" } +} - provisioner "remote-exec" { - inline = [ - "${var.background ? "nohup" : ""} sudo sh /tmp/salt_provisioner.sh > /tmp/provisioning.log ${var.background ? "&" : ""}", - "return_code=$? && sleep 1 && exit $return_code", - ] # Workaround to let the process start in background properly - } +module "hana_provision" { + source = "../../../generic_modules/salt_provisioner" + node_count = var.provisioner == "salt" ? var.hana_count : 0 + instance_ids = null_resource.hana_node_provisioner.*.id + user = var.admin_user + private_key_location = var.private_key_location + public_ips = data.azurerm_public_ip.hana.*.ip_address + background = var.background } diff --git a/azure/modules/hana_node/variables.tf b/azure/modules/hana_node/variables.tf index e2c2ad303..b481b5389 100644 --- a/azure/modules/hana_node/variables.tf +++ b/azure/modules/hana_node/variables.tf @@ -25,8 +25,8 @@ variable "hana_count" { } variable "name" { - type = string - default = "hana" + type = string + default = "hana" } variable "hana_instance_number" { @@ -51,7 +51,7 @@ variable "storage_account_key" { } variable "enable_accelerated_networking" { - type = bool + type = bool } variable "host_ips" { @@ -65,19 +65,19 @@ variable "sles4sap_uri" { } variable "hana_public_publisher" { - type = string + type = string } variable "hana_public_offer" { - type = string + type = string } variable "hana_public_sku" { - type = string + type = string } variable "hana_public_version" { - type = string + type = string } variable "vm_size" { @@ -125,6 +125,7 @@ variable "reg_email" { variable "monitoring_enabled" { description = "enable the host to be monitored by exporters, e.g node_exporter" + type = bool default = false } @@ -146,6 +147,7 @@ variable "ha_sap_deployment_repo" { variable "devel_mode" { description = "Whether or not to install the HA/SAP packages from the `ha_sap_deployment_repo`" + type = bool default = false } @@ -157,6 +159,7 @@ variable "hwcct" { variable "qa_mode" { description = "Whether or not to install the HA/SAP packages from the `ha_sap_deployment_repo`" + type = bool default = false } @@ -167,6 +170,7 @@ variable "provisioner" { variable "background" { description = "Run the provisioner execution in background if set to true finishing terraform execution" + type = bool default = false } @@ -181,7 +185,31 @@ variable "hana_inst_master" { variable "hana_inst_folder" { type = string - default = "/root/hana_inst_media" + default = "/sapmedia/HANA" +} + +variable "hana_platform_folder" { + description = "Path to the hana platform media, relative to the 'hana_inst_master' mounting point" + type = string + default = "" +} + +variable "hana_sapcar_exe" { + description = "Path to the sapcar executable, relative to the 'hana_inst_master' mounting point" + type = string + default = "" +} + +variable "hdbserver_sar" { + description = "Path to the HANA database server installation sar archive, relative to the 'hana_inst_master' mounting point" + type = string + default = "" +} + +variable "hana_extract_dir" { + description = "Absolute path to folder where SAP HANA sar archive will be extracted" + type = string + default = "/sapmedia/HANA" } variable "hana_disk_device" { @@ -206,5 +234,10 @@ variable "hana_data_disk_size" { } variable "hana_data_disk_caching" { - type = string + type = string +} + +variable "hana_cluster_vip" { + description = "Virtual ip for the hana cluster" + type = string } diff --git a/azure/modules/iscsi_server/main.tf b/azure/modules/iscsi_server/main.tf index b7396ba4e..31a692454 100644 --- a/azure/modules/iscsi_server/main.tf +++ b/azure/modules/iscsi_server/main.tf @@ -54,11 +54,13 @@ resource "azurerm_image" "iscsi_srv" { # iSCSI server VM resource "azurerm_virtual_machine" "iscsisrv" { - name = "vmiscsisrv" - location = var.az_region - resource_group_name = var.resource_group_name - network_interface_ids = [azurerm_network_interface.iscsisrv.id] - vm_size = var.vm_size + name = "vmiscsisrv" + location = var.az_region + resource_group_name = var.resource_group_name + network_interface_ids = [azurerm_network_interface.iscsisrv.id] + vm_size = var.vm_size + delete_os_disk_on_termination = true + delete_data_disks_on_termination = true storage_os_disk { name = "disk-iscsisrv-Os" @@ -107,3 +109,13 @@ resource "azurerm_virtual_machine" "iscsisrv" { workspace = terraform.workspace } } + +module "iscsi_on_destroy" { + source = "../../../generic_modules/on_destroy" + node_count = 1 + instance_ids = azurerm_virtual_machine.iscsisrv.*.id + user = var.admin_user + private_key_location = var.private_key_location + public_ips = data.azurerm_public_ip.iscsisrv.*.ip_address + dependencies = [data.azurerm_public_ip.iscsisrv] +} diff --git a/azure/modules/iscsi_server/salt_provisioner.tf b/azure/modules/iscsi_server/salt_provisioner.tf index 2f0b73938..21245b737 100644 --- a/azure/modules/iscsi_server/salt_provisioner.tf +++ b/azure/modules/iscsi_server/salt_provisioner.tf @@ -1,12 +1,3 @@ -# Template file to launch the salt provisioing script -data "template_file" "salt_provisioner" { - template = file("../salt/salt_provisioner_script.tpl") - - vars = { - regcode = var.reg_code - } -} - resource "null_resource" "iscsi_provisioner" { count = var.provisioner == "salt" ? 1 : 0 @@ -22,17 +13,7 @@ resource "null_resource" "iscsi_provisioner" { } provisioner "file" { - source = "../salt" - destination = "/tmp" - } - - provisioner "file" { - content = data.template_file.salt_provisioner.rendered - destination = "/tmp/salt_provisioner.sh" - } - - provisioner "file" { - content = < /tmp/provisioning.log ${var.background ? "&" : ""}", - "return_code=$? && sleep 1 && exit $return_code", - ] # Workaround to let the process start in background properly - } -} \ No newline at end of file +module "iscsi_provision" { + source = "../../../generic_modules/salt_provisioner" + node_count = var.provisioner == "salt" ? 1 : 0 + instance_ids = null_resource.iscsi_provisioner.*.id + user = var.admin_user + private_key_location = var.private_key_location + public_ips = data.azurerm_public_ip.iscsisrv.*.ip_address + background = var.background +} diff --git a/azure/modules/iscsi_server/variables.tf b/azure/modules/iscsi_server/variables.tf index 435a44c74..a5f2a8379 100644 --- a/azure/modules/iscsi_server/variables.tf +++ b/azure/modules/iscsi_server/variables.tf @@ -105,6 +105,7 @@ variable "additional_packages" { variable "qa_mode" { description = "Whether or not to install the HA/SAP packages from the `ha_sap_deployment_repo`" + type = bool default = false } @@ -115,5 +116,6 @@ variable "provisioner" { variable "background" { description = "Run the provisioner execution in background if set to true finishing terraform execution" + type = bool default = false -} \ No newline at end of file +} diff --git a/azure/modules/monitoring/main.tf b/azure/modules/monitoring/main.tf index 4f44bd471..e334e5b9f 100644 --- a/azure/modules/monitoring/main.tf +++ b/azure/modules/monitoring/main.tf @@ -56,12 +56,14 @@ resource "azurerm_image" "monitoring" { # monitoring VM resource "azurerm_virtual_machine" "monitoring" { - name = "vmmonitoring" - count = var.monitoring_enabled == true ? 1 : 0 - location = var.az_region - resource_group_name = var.resource_group_name - network_interface_ids = [azurerm_network_interface.monitoring.0.id] - vm_size = var.vm_size + name = "vmmonitoring" + count = var.monitoring_enabled == true ? 1 : 0 + location = var.az_region + resource_group_name = var.resource_group_name + network_interface_ids = [azurerm_network_interface.monitoring.0.id] + vm_size = var.vm_size + delete_os_disk_on_termination = true + delete_data_disks_on_termination = true storage_os_disk { name = "disk-monitoring-Os" @@ -109,4 +111,14 @@ resource "azurerm_virtual_machine" "monitoring" { tags = { workspace = terraform.workspace } -} \ No newline at end of file +} + +module "monitoring_on_destroy" { + source = "../../../generic_modules/on_destroy" + node_count = var.monitoring_enabled ? 1 : 0 + instance_ids = azurerm_virtual_machine.monitoring.*.id + user = var.admin_user + private_key_location = var.private_key_location + public_ips = data.azurerm_public_ip.monitoring.*.ip_address + dependencies = [data.azurerm_public_ip.monitoring] +} diff --git a/azure/modules/monitoring/salt_provisioner.tf b/azure/modules/monitoring/salt_provisioner.tf index d0b66a1d9..f2f66f764 100644 --- a/azure/modules/monitoring/salt_provisioner.tf +++ b/azure/modules/monitoring/salt_provisioner.tf @@ -1,12 +1,3 @@ -# Template file to launch the salt provisioing script -data "template_file" "salt_provisioner" { - template = file("../salt/salt_provisioner_script.tpl") - - vars = { - regcode = var.reg_code - } -} - resource "null_resource" "monitoring_provisioner" { count = var.provisioner == "salt" && var.monitoring_enabled ? 1 : 0 @@ -22,44 +13,35 @@ resource "null_resource" "monitoring_provisioner" { } provisioner "file" { - source = "../salt" - destination = "/tmp" - } - - provisioner "file" { - content = data.template_file.salt_provisioner.rendered - destination = "/tmp/salt_provisioner.sh" - } - - provisioner "file" { - content = < /tmp/provisioning.log ${var.background ? "&" : ""}", - "return_code=$? && sleep 1 && exit $return_code", - ] # Workaround to let the process start in background properly - } - +module "monitoring_provision" { + source = "../../../generic_modules/salt_provisioner" + node_count = var.provisioner == "salt" && var.monitoring_enabled ? 1 : 0 + instance_ids = null_resource.monitoring_provisioner.*.id + user = var.admin_user + private_key_location = var.private_key_location + public_ips = data.azurerm_public_ip.monitoring.*.ip_address + background = var.background } diff --git a/azure/modules/monitoring/variables.tf b/azure/modules/monitoring/variables.tf index 961a8becb..0fa9dacaf 100644 --- a/azure/modules/monitoring/variables.tf +++ b/azure/modules/monitoring/variables.tf @@ -60,11 +60,6 @@ variable "monitoring_srv_ip" { default = "" } -variable "host_ips" { - description = "ip addresses to set to the nodes" - type = list(string) -} - variable "admin_user" { type = string default = "azadmin" @@ -111,32 +106,29 @@ variable "provisioner" { variable "background" { description = "Run the provisioner execution in background if set to true finishing terraform execution" + type = bool default = false } -variable "drbd_enabled" { - description = "enable the DRBD cluster for nfs" - default = false +variable "hana_targets" { + description = "IPs of HANA hosts you want to monitor; the last one is assumed to be the virtual IP of the active HA instance." + type = list(string) } -variable "drbd_ips" { - description = "ip addresses to set to the drbd cluster nodes" +variable "drbd_targets" { + description = "IPs of DRBD hosts you want to monitor" type = list(string) default = [] } -variable "netweaver_enabled" { - description = "enable SAP Netweaver cluster deployment" - default = false -} - -variable "netweaver_ips" { - description = "ip addresses to set to the netweaver cluster nodes" +variable "netweaver_targets" { + description = "IPs of Netweaver hosts you want to monitor; the first two are assumed to be the virtual IPs of the HA instances." type = list(string) default = [] } variable "monitoring_enabled" { description = "enable the host to be monitored by exporters, e.g node_exporter" + type = bool default = false -} \ No newline at end of file +} diff --git a/azure/modules/netweaver_node/main.tf b/azure/modules/netweaver_node/main.tf index 5ad92aab8..6f0addc2c 100644 --- a/azure/modules/netweaver_node/main.tf +++ b/azure/modules/netweaver_node/main.tf @@ -373,13 +373,15 @@ resource "azurerm_image" "netweaver-image" { # netweaver instances resource "azurerm_virtual_machine" "netweaver" { - count = var.netweaver_count - name = "vmnetweaver0${count.index + 1}" - location = var.az_region - resource_group_name = var.resource_group_name - network_interface_ids = [element(azurerm_network_interface.netweaver.*.id, count.index)] - availability_set_id = azurerm_availability_set.netweaver-availability-set[0].id - vm_size = var.vm_size + count = var.netweaver_count + name = "vmnetweaver0${count.index + 1}" + location = var.az_region + resource_group_name = var.resource_group_name + network_interface_ids = [element(azurerm_network_interface.netweaver.*.id, count.index)] + availability_set_id = azurerm_availability_set.netweaver-availability-set[0].id + vm_size = var.vm_size + delete_os_disk_on_termination = true + delete_data_disks_on_termination = true storage_os_disk { name = "disk-netweaver0${count.index + 1}-Os" @@ -428,3 +430,13 @@ resource "azurerm_virtual_machine" "netweaver" { workspace = terraform.workspace } } + +module "netweaver_on_destroy" { + source = "../../../generic_modules/on_destroy" + node_count = var.netweaver_count + instance_ids = azurerm_virtual_machine.netweaver.*.id + user = var.admin_user + private_key_location = var.private_key_location + public_ips = data.azurerm_public_ip.netweaver.*.ip_address + dependencies = [data.azurerm_public_ip.netweaver] +} diff --git a/azure/modules/netweaver_node/salt_provisioner.tf b/azure/modules/netweaver_node/salt_provisioner.tf index d861f21cd..4cdea6809 100644 --- a/azure/modules/netweaver_node/salt_provisioner.tf +++ b/azure/modules/netweaver_node/salt_provisioner.tf @@ -1,11 +1,3 @@ -data "template_file" "salt_provisioner" { - template = file("../salt/salt_provisioner_script.tpl") - - vars = { - regcode = var.reg_code - } -} - resource "null_resource" "netweaver_provisioner" { count = var.provisioner == "salt" ? var.netweaver_count : 0 @@ -20,16 +12,6 @@ resource "null_resource" "netweaver_provisioner" { private_key = file(var.private_key_location) } - provisioner "file" { - source = "../salt" - destination = "/tmp" - } - - provisioner "file" { - content = data.template_file.salt_provisioner.rendered - destination = "/tmp/salt_provisioner.sh" - } - provisioner "file" { content = < /tmp/provisioning.log ${var.background ? "&" : ""}", - "return_code=$? && sleep 1 && exit $return_code", - ] # Workaround to let the process start in background properly - } +module "netweaver_provision" { + source = "../../../generic_modules/salt_provisioner" + node_count = var.provisioner == "salt" ? var.netweaver_count : 0 + instance_ids = null_resource.netweaver_provisioner.*.id + user = var.admin_user + private_key_location = var.private_key_location + public_ips = data.azurerm_public_ip.netweaver.*.ip_address + background = var.background } diff --git a/azure/modules/netweaver_node/variables.tf b/azure/modules/netweaver_node/variables.tf index 5ac0f8cf3..e3a0932de 100644 --- a/azure/modules/netweaver_node/variables.tf +++ b/azure/modules/netweaver_node/variables.tf @@ -36,8 +36,8 @@ variable "data_disk_type" { variable "data_disk_size" { description = "Size of the Netweaver data disks, informed in GB" - type = string - default = "60" + type = string + default = "60" } variable "data_disk_caching" { @@ -69,6 +69,48 @@ variable "aas_instance_number" { default = "02" } +variable "netweaver_product_id" { + description = "Netweaver installation product. Even though the module is about Netweaver, it can be used to install other SAP instances like S4/HANA" + type = string + default = "NW750.HDB.ABAPHA" +} + +variable "netweaver_swpm_folder" { + description = "Netweaver software SWPM folder, path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_sapcar_exe" { + description = "Path to sapcar executable, relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_swpm_sar" { + description = "SWPM installer sar archive containing the installer, path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_swpm_extract_dir" { + description = "Extraction path for Netweaver software SWPM folder, if SWPM sar file is provided" + type = string + default = "/sapmedia/NW/SWPM" +} + +variable "netweaver_sapexe_folder" { + description = "Software folder where needed sapexe `SAR` executables are stored (sapexe, sapexedb, saphostagent), path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_additional_dvds" { + description = "Software folder with additional SAP software needed to install netweaver (NW export folder and HANA HDB client for example), path relative from the `netweaver_inst_media` mounted point" + type = list + default = [] +} + variable "netweaver_nfs_share" { description = "URL of the NFS share where /sapmnt and /usr/sap/{sid}/SYS will be mounted. This folder must have the sapmnt and usrsapsys folders" type = string @@ -91,7 +133,7 @@ variable "storage_account_path" { variable "enable_accelerated_networking" { description = "Enable accelerated networking for netweaver. This function is mandatory for certified Netweaver environments and are not available for all kinds of instances. Check https://docs.microsoft.com/en-us/azure/virtual-network/create-vm-accelerated-networking-cli for more details" - type = bool + type = bool default = true } @@ -132,6 +174,11 @@ variable "netweaver_public_version" { default = "latest" } +variable "hana_ip" { + type = string + description = "Ip address of the hana database" +} + variable "admin_user" { type = string default = "azadmin" @@ -188,11 +235,13 @@ variable "ha_sap_deployment_repo" { variable "devel_mode" { description = "Whether or not to install the HA/SAP packages from the `ha_sap_deployment_repo`" + type = bool default = false } variable "qa_mode" { description = "Whether or not to install the HA/SAP packages from the `ha_sap_deployment_repo`" + type = bool default = false } @@ -203,10 +252,12 @@ variable "provisioner" { variable "background" { description = "Run the provisioner execution in background if set to true finishing terraform execution" + type = bool default = false } variable "monitoring_enabled" { description = "enable the host to be monitored by exporters, e.g node_exporter" + type = bool default = false } diff --git a/azure/monitoring.tf b/azure/monitoring.tf deleted file mode 100644 index 3261042d1..000000000 --- a/azure/monitoring.tf +++ /dev/null @@ -1,20 +0,0 @@ -variable "timezone" { - description = "Timezone setting for all VMs" - default = "Europe/Berlin" -} - -variable "monitoring_srv_ip" { - description = "monitoring server address" - type = string - default = "" -} - -variable "devel_mode" { - description = "whether or not to install HA/SAP packages from ha_sap_deployment_repo" - default = false -} - -variable "monitoring_enabled" { - description = "enable the host to be monitored by exporters, e.g node_exporter" - default = false -} diff --git a/azure/provider.tf b/azure/provider.tf deleted file mode 100644 index 907623c8d..000000000 --- a/azure/provider.tf +++ /dev/null @@ -1,12 +0,0 @@ -# Configure the Azure Provider -provider "azurerm" { - version = "<= 1.33" -} - -provider "template" { - version = "~> 2.1" -} - -terraform { - required_version = ">= 0.12" -} diff --git a/azure/resources.tf b/azure/resources.tf deleted file mode 100644 index b8fd7fa8b..000000000 --- a/azure/resources.tf +++ /dev/null @@ -1,17 +0,0 @@ -resource "azurerm_resource_group" "myrg" { - name = "rg-ha-sap-${terraform.workspace}" - location = var.az_region -} - -resource "azurerm_storage_account" "mytfstorageacc" { - name = "stdiag${lower(terraform.workspace)}" - resource_group_name = azurerm_resource_group.myrg.name - location = var.az_region - account_replication_type = "LRS" - account_tier = "Standard" - - tags = { - workspace = terraform.workspace - } -} - diff --git a/azure/terraform.tfvars.example b/azure/terraform.tfvars.example index e715ebc89..563f9f914 100644 --- a/azure/terraform.tfvars.example +++ b/azure/terraform.tfvars.example @@ -1,3 +1,22 @@ +# Use an already existing resource group +#resource_group_name = "my-resource-group" + +# Use an already existing virtual network +#vnet_name = "my-vnet" + +# Use an already existing subnet in this virtual network +#subnet_name = "my-subnet" + +# vnet address range in CIDR notation +# Only used if the vnet is created by terraform or the user doesn't have read permissions in this +# resource. To use the current vnet address range set the value to an empty string +# To define custom ranges +#vnet_address_range = "10.74.0.0/16" +#subnet_address_range = "10.74.1.0/24" +# Or to use already existing address ranges +#vnet_address_range = "" +#subnet_address_range = "" + # VM size to use for the cluster nodes hana_vm_size = "Standard_M32ls" @@ -76,10 +95,19 @@ storage_account_name = "YOUR_STORAGE_ACCOUNT_NAME" storage_account_key = "YOUR_STORAGE_ACCOUNT_KEY" # Azure storage account path where HANA installation master is located -hana_inst_master = "//YOUR_STORAGE_ACCOUNT_NAME.file.core.windows.net/path/to/your/hana/installation/master" - -# Local folder where HANA installation master will be mounted -hana_inst_folder = "/root/hana_inst_media/" +hana_inst_master = "//YOUR_STORAGE_ACCOUNT_NAME.file.core.windows.net/path/to/your/hana/installation/master/51053381" +# Or you can combine the `hana_inst_master with` `hana_platform_folder` variable. +#hana_inst_master = "//YOUR_STORAGE_ACCOUNT_NAME.file.core.windows.net/path/to/your/hana/installation/master +# Specify the path to already extracted HANA platform installation media, relative to hana_inst_master mounting point. +# This will have preference over hdbserver sar archive installation media +#hana_platform_folder = "51053381" + +# Or specify the path to the sapcar executable & HANA database server installation sar archive, relative to the 'hana_inst_master' mounting point +# The sar archive will be extracted to path specified at hana_extract_dir (optional, by default /sapmedia/HANA) +# Make sure to use the latest/compatible version of sapcar executable, otherwise file may be extracted incorrectly +hana_sapcar_exe = "SAPCAR" +hdbserver_sar = "IMDB_SERVER.SAR" +hana_extract_dir = "/sapmedia/HDBSERVER" # Device used by node where HANA will be installed hana_disk_device = "/dev/sdc" @@ -87,8 +115,8 @@ hana_disk_device = "/dev/sdc" # Device used by the iSCSI server to provide LUNs iscsidev = "/dev/sdc" -# IP address of the iSCSI server -iscsi_srv_ip = "10.74.1.14" +# IP address of the iSCSI server. If it's not set the address will be auto generated from the provided vnet address range +#iscsi_srv_ip = "10.74.1.14" # Path to a custom ssh public key to upload to the nodes # Used for cluster communication for example @@ -98,18 +126,22 @@ cluster_ssh_pub = "salt://hana_node/files/sshkeys/cluster.id_rsa.pub" # Used for cluster communication for example cluster_ssh_key = "salt://hana_node/files/sshkeys/cluster.id_rsa" -# Each host IP address (sequential order). -# example : host_ips = ["10.0.1.0", "10.0.1.1"] -host_ips = ["10.74.1.11", "10.74.1.12"] +# Each host IP address (sequential order). If it's not set the addresses will be auto generated from the provided vnet address range +#hana_ips = ["10.74.1.11", "10.74.1.12"] +#hana_cluster_vip = "10.74.1.13" -# Each drbd cluster host IP address (sequential order). -# example : drbd_host_ips = ["10.0.1.10", "10.0.1.11"] -drbd_ips = ["10.74.1.21", "10.74.1.22"] +# Each drbd cluster host IP address (sequential order). If it's not set the addresses will be auto generated from the provided vnet address range +#drbd_ips = ["10.74.1.21", "10.74.1.22"] +#drbd_cluster_vip = "10.74.1.23" # Repository url used to install HA/SAP deployment packages" # The latest RPM packages can be found at: # https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/{YOUR OS VERSION} # Contains the salt formulas rpm packages. +# To auto detect the SLE version +#ha_sap_deployment_repo = "http://download.opensuse.org/repositories/network:/ha-clustering:/Factory/" +# Specific SLE version used in all the created machines +#ha_sap_deployment_repo = "http://download.opensuse.org/repositories/network:/ha-clustering:/Factory/SLE_15/" ha_sap_deployment_repo = "" # Optional SUSE Customer Center Registration parameters @@ -137,8 +169,8 @@ ha_sap_deployment_repo = "" # Enable the host to be monitored by exporters #monitoring_enabled = true -# IP address of the machine where Prometheus and Grafana are running -monitoring_srv_ip = "10.74.1.13" +# IP address of the machine where Prometheus and Grafana are running. If it's not set the address will be auto generated from the provided vnet address range +#monitoring_srv_ip = "10.74.1.13" # Enable drbd cluster #drbd_enabled = true @@ -146,11 +178,25 @@ monitoring_srv_ip = "10.74.1.13" # Netweaver variables #netweaver_enabled = true +# If the addresses are not set they will be auto generated from the provided vnet address range #netweaver_ips = ["10.74.1.30", "10.74.1.31", "10.74.1.32", "10.74.1.33"] #netweaver_virtual_ips = ["10.74.1.35", "10.74.1.36", "10.74.1.37", "10.74.1.38"] + #netweaver_storage_account_key = "YOUR_STORAGE_ACCOUNT_KEY" #netweaver_storage_account_name = "YOUR_STORAGE_ACCOUNT_NAME" #netweaver_storage_account = "//YOUR_STORAGE_ACCOUNT_NAME.file.core.windows.net/path/to/your/nw/installation/master" +# Netweaver installation required folders +# SAP SWPM installation folder, relative to the netweaver_storage_account mounting point +#netweaver_swpm_folder = "your_swpm" +# Or specify the path to the sapcar executable & SWPM installer sar archive, relative to the netweaver_storage_account mounting point +# The sar archive will be extracted to path specified at netweaver_swpm_extract_dir (optional, by default /sapmedia/NW/SWPM) +#netweaver_sapcar_exe = "your_sapcar_exe_file_path" +#netweaver_swpm_sar = "your_swpm_sar_file_path" +#netweaver_swpm_extract_dir = "location_to_extract_swpm_sar_absolute_path" +# Folder where needed SAR executables (sapexe, sapdbexe) are stored, relative to the netweaver_storage_account mounting point +#netweaver_sapexe_folder = "kernel_nw75_sar" +# Additional folders (added in start_dir.cd), relative to the netweaver_storage_account mounting point +#netweaver_additional_dvds = ["dvd1", "dvd2"] # QA variables @@ -164,3 +210,8 @@ monitoring_srv_ip = "10.74.1.13" # qa_mode must be set to true for executing hwcct # true or false (default) #hwcct = false + +# Pre deployment + +# Enable all some pre deployment steps (disabled by default) +#pre_deployment = true diff --git a/azure/variables.tf b/azure/variables.tf index f6cf7f129..a3bf1796e 100644 --- a/azure/variables.tf +++ b/azure/variables.tf @@ -1,36 +1,215 @@ -# Launch SLES-HAE of SLES4SAP cluster nodes +# Azure related variables -# Variables for type of instances to use and number of cluster nodes -# Use with: terraform apply -var hana_vm_size=Standard_M32ls -var hana_count=2 +variable "az_region" { + description = "Azure region where the deployment machines will be created" + type = string + default = "westeurope" +} -variable "hana_vm_size" { - description = "VM size for the hana machine" +variable "resource_group_name" { + description = "Already existing resource group where the infrastructure is created. If it's not set a new one will be created named rg-ha-sap-{{terraform.workspace}}" type = string - default = "Standard_M32ls" + default = "" +} + +variable "vnet_name" { + description = "Already existing virtual network name used by the created infrastructure. If it's not set a new one will be created named vnet-{{terraform.workspace}}" + type = string + default = "" +} + +variable "vnet_address_range" { + description = "vnet address range in CIDR notation (only used if the vnet is created by terraform or the user doesn't have read permissions in this resource. To use the current vnet address range set the value to an empty string)" + type = string + default = "10.74.0.0/16" +} + +variable "subnet_name" { + description = "Already existing subnet name used by the created infrastructure. If it's not set a new one will be created named snet-{{terraform.workspace}}" + type = string + default = "" +} + +variable "subnet_address_range" { + description = "subnet address range in CIDR notation (only used if the subnet is created by terraform or the user doesn't have read permissions in this resource. To use the current vnet address range set the value to an empty string)" + type = string + default = "" +} + +variable "admin_user" { + description = "Administration user used to create the machines" + type = string +} + +variable "storage_account_name" { + description = "Azure storage account name" + type = string +} + +variable "storage_account_key" { + description = "Azure storage account secret key" + type = string +} + +variable "public_key_location" { + description = "Path to a SSH public key used to connect to the created machines" + type = string +} + +variable "private_key_location" { + description = "Path to a SSH private key used to connect to the created machines" + type = string +} + +# Deployment variables + +variable "name" { + description = "hostname, without the domain part" + type = string + default = "hana" +} + +variable "timezone" { + description = "Timezone setting for all VMs" + default = "Europe/Berlin" +} + +variable "cluster_ssh_pub" { + description = "Path to a SSH public key used during the cluster creation. The key must be passwordless" + type = string +} + +variable "cluster_ssh_key" { + description = "Path to a SSH private key used during the cluster creation. The key must be passwordless" + type = string +} + +variable "reg_code" { + description = "If informed, register the product using SUSEConnect" + type = string + default = "" +} + +variable "reg_email" { + description = "Email used for the registration" + default = "" +} + +# The module format must follow SUSEConnect convention: +# // +# Example: Suggested modules for SLES for SAP 15 +# - sle-module-basesystem/15/x86_64 +# - sle-module-desktop-applications/15/x86_64 +# - sle-module-server-applications/15/x86_64 +# - sle-ha/15/x86_64 (Need the same regcode as SLES for SAP) +# - sle-module-sap-applications/15/x86_64 + +variable "reg_additional_modules" { + description = "Map of the modules to be registered. Module name = Regcode, when needed." + type = map(string) + default = {} +} + +variable "additional_packages" { + description = "Extra packages to be installed" + default = [] +} + +# Repository url used to install HA/SAP deployment packages" +# The latest RPM packages can be found at: +# https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/{YOUR OS VERSION} +# Contains the salt formulas rpm packages. +variable "ha_sap_deployment_repo" { + description = "Repository url used to install HA/SAP deployment packages. If SLE version is not set, the deployment will automatically detect the current OS version" + type = string +} + +variable "devel_mode" { + description = "Increase ha_sap_deployment_repo repository priority to get the packages from this repository instead of SLE official channels" + type = bool + default = false +} + +variable "provisioner" { + description = "Used provisioner option. Available options: salt. Let empty to not use any provisioner" + default = "salt" +} + +variable "background" { + description = "Run the provisioner execution in background if set to true finishing terraform execution" + type = bool + default = false +} + +# Hana related variables + +variable "hana_count" { + description = "Number of hana nodes" + type = string + default = "2" +} + +variable "hana_public_publisher" { + description = "Public image publisher name used to create the hana machines" + type = string + default = "SUSE" +} + +variable "hana_public_offer" { + description = "Public image offer name used to create the hana machines" + type = string + default = "sles-sap-15-sp1-byos" +} + +variable "hana_public_sku" { + description = "Public image sku used to create the hana machines" + type = string + default = "gen2" +} + +variable "hana_public_version" { + description = "Public image version used to create the hana machines" + type = string + default = "latest" +} + +variable "sles4sap_uri" { + description = "Path to a custom azure image in a storage account used to create the hana machines" + type = string + default = "" } # For reference: # Standard_M32ls has 32 VCPU, 256GiB RAM, 1000 GiB SSD # You could find other supported instances in Azure documentation +variable "hana_vm_size" { + description = "VM size for the hana machine" + type = string + default = "Standard_M32ls" +} -variable "hana_count" { - type = string - default = "2" +variable "init_type" { + description = "Type of deployment. Options: all-> Install HANA and HA; skip-hana-> Skip HANA installation; skip-cluster-> Skip HA cluster installation" + type = string + default = "all" } variable "hana_data_disk_type" { - type = string - default = "Standard_LRS" + description = "Disk type of the disks used to store hana database content" + type = string + default = "Standard_LRS" } variable "hana_data_disk_size" { - type = string - default = "60" + description = "Disk size of the disks used to store hana database content" + type = string + default = "60" } variable "hana_data_disk_caching" { - type = string - default = "ReadWrite" + description = "Disk caching of the disks used to store hana database content" + type = string + default = "ReadWrite" } variable "hana_enable_accelerated_networking" { @@ -39,54 +218,105 @@ variable "hana_enable_accelerated_networking" { default = true } -variable "name" { - description = "hostname, without the domain part" - type = string - default = "hana" +variable "hana_ips" { + description = "ip addresses to set to the hana nodes. If it's not set the addresses will be auto generated from the provided vnet address range" + type = list(string) + default = [] } -variable "hana_instance_number" { - description = "HANA instance number" +variable "hana_inst_master" { + description = "Azure storage account path where hana installation software is available" type = string - default = "00" } -# Variable for default region where to deploy resources +variable "hana_inst_folder" { + description = "Folder where the hana installation software will be downloaded" + type = string + default = "/sapmedia/HANA" +} -variable "az_region" { - type = string - default = "westeurope" +variable "hana_platform_folder" { + description = "Path to the hana platform media, relative to the 'hana_inst_master' mounting point" + type = string + default = "" } -variable "init_type" { - type = string - default = "all" +variable "hana_sapcar_exe" { + description = "Path to the sapcar executable, relative to the 'hana_inst_master' mounting point" + type = string + default = "" } -variable "hana_inst_master" { - type = string +variable "hdbserver_sar" { + description = "Path to the HANA database server installation sar archive, relative to the 'hana_inst_master' mounting point" + type = string + default = "" } -variable "hana_inst_folder" { - type = string - default = "/root/hana_inst_media" +variable "hana_extract_dir" { + description = "Absolute path to folder where SAP HANA sar archive will be extracted" + type = string + default = "/sapmedia/HANA" } variable "hana_disk_device" { - description = "device where to install HANA" + description = "Device where hana is installed" type = string } variable "hana_fstype" { - description = "Filesystem type to use for HANA" + description = "Filesystem type used by the disk where hana is installed" type = string default = "xfs" } -variable "iscsi_srv_ip" { - description = "iscsi server address" +variable "hana_instance_number" { + description = "Hana instance number" + type = string + default = "00" +} + +variable "hana_cluster_vip" { + description = "Virtual ip for the hana cluster. If it's not set the address will be auto generated from the provided vnet address range" + type = string + default = "" +} + +variable "scenario_type" { + description = "Deployed scenario type. Available options: performance-optimized, cost-optimized" + default = "performance-optimized" +} + +# Iscsi server related variables + +variable "iscsi_public_publisher" { + description = "Public image publisher name used to create the iscsi machines" + type = string + default = "SUSE" +} + +variable "iscsi_public_offer" { + description = "Public image offer name used to create the iscsi machines" type = string - default = "10.74.1.10" + default = "sles-sap-15-sp1-byos" +} + +variable "iscsi_public_sku" { + description = "Public image sku used to create the iscsi machines" + type = string + default = "gen2" +} + +variable "iscsi_public_version" { + description = "Public image version used to create the iscsi machines" + type = string + default = "latest" +} + +variable "iscsi_srv_uri" { + description = "Path to a custom azure image in a storage account used to create the iscsi machines" + type = string + default = "" } variable "iscsi_vm_size" { @@ -95,8 +325,14 @@ variable "iscsi_vm_size" { default = "Standard_D2s_v3" } +variable "iscsi_srv_ip" { + description = "iscsi server address. If it's not set the address will be auto generated from the provided vnet address range" + type = string + default = "" +} + variable "iscsidev" { - description = "device iscsi for iscsi server" + description = "Disk device where iscsi partitions are created" type = string } @@ -105,56 +341,62 @@ variable "iscsi_disks" { default = 0 } -variable "cluster_ssh_pub" { - description = "path for the public key needed by the cluster" - type = string +# Monitoring related variables + +variable "monitoring_enabled" { + description = "Enable the host to be monitored by exporters, e.g node_exporter" + type = bool + default = false } -variable "cluster_ssh_key" { - description = "path for the private key needed by the cluster" +variable "monitoring_vm_size" { + description = "VM size for the monitoring machine" type = string + default = "Standard_D2s_v3" } -variable "reg_code" { - description = "If informed, register the product using SUSEConnect" +variable "monitoring_public_publisher" { + description = "Public image publisher name used to create the monitoring machines" type = string - default = "" + default = "SUSE" } -variable "reg_email" { - description = "Email used for the registration" - default = "" +variable "monitoring_public_offer" { + description = "Public image offer name used to create the monitoring machines" + type = string + default = "sles-sap-15-sp1-byos" } -# The module format must follow SUSEConnect convention: -# // -# Example: Suggested modules for SLES for SAP 15 -# - sle-module-basesystem/15/x86_64 -# - sle-module-desktop-applications/15/x86_64 -# - sle-module-server-applications/15/x86_64 -# - sle-ha/15/x86_64 (Need the same regcode as SLES for SAP) -# - sle-module-sap-applications/15/x86_64 +variable "monitoring_public_sku" { + description = "Public image sku used to create the monitoring machines" + type = string + default = "gen2" +} -variable "reg_additional_modules" { - description = "Map of the modules to be registered. Module name = Regcode, when needed." - type = map(string) - default = {} +variable "monitoring_public_version" { + description = "Public image version used to create the monitoring machines" + type = string + default = "latest" } -variable "additional_packages" { - description = "extra packages which should be installed" - default = [] +variable "monitoring_uri" { + description = "Path to a custom azure image in a storage account used to create the monitoring machines" + type = string + default = "" } -variable "host_ips" { - description = "ip addresses to set to the nodes" - type = list(string) +variable "monitoring_srv_ip" { + description = "monitoring server address. If it's not set the address will be auto generated from the provided vnet address range" + type = string + default = "" } -variable "drbd_ips" { - description = "ip addresses to set to the drbd cluster nodes" - type = list(string) - default = [] +# DRBD related variables + +variable "drbd_enabled" { + description = "Enable the DRBD cluster for nfs" + type = bool + default = false } variable "drbd_vm_size" { @@ -163,48 +405,86 @@ variable "drbd_vm_size" { default = "Standard_D2s_v3" } -variable "monitoring_vm_size" { - description = "VM size for the monitoring machine" +variable "drbd_ips" { + description = "ip addresses to set to the drbd cluster nodes. If it's not set the addresses will be auto generated from the provided vnet address range" + type = list(string) + default = [] +} + +variable "drbd_public_publisher" { + description = "Public image publisher name used to create the drbd machines" type = string - default = "Standard_D2s_v3" + default = "SUSE" } -# Repository url used to install HA/SAP deployment packages" -# The latest RPM packages can be found at: -# https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/{YOUR OS VERSION} -# Contains the salt formulas rpm packages. -variable "ha_sap_deployment_repo" { - description = "Repository url used to install HA/SAP deployment packages" +variable "drbd_public_offer" { + description = "Public image offer name used to create the drbd machines" type = string + default = "sles-sap-15-sp1-byos" } -variable "scenario_type" { - description = "Deployed scenario type. Available options: performance-optimized, cost-optimized" - default = "performance-optimized" +variable "drbd_public_sku" { + description = "Public image sku used to create the drbd machines" + type = string + default = "gen2" } -variable "provisioner" { - description = "Used provisioner option. Available options: salt. Let empty to not use any provisioner" - default = "salt" +variable "drbd_public_version" { + description = "Public image sku used to create the drbd machines" + type = string + default = "latest" } -variable "background" { - description = "Run the provisioner execution in background if set to true finishing terraform execution" - default = false +variable "drbd_image_uri" { + description = "Path to a custom azure image in a storage account used to create the drbd machines" + type = string + default = "" } -variable "drbd_enabled" { - description = "enable the DRBD cluster for nfs" - default = false +variable "drbd_cluster_vip" { + description = "Virtual ip for the drbd cluster. If it's not set the address will be auto generated from the provided vnet address range" + type = string + default = "" } -# Netweaver variables +# Netweaver related variables variable "netweaver_enabled" { - description = "enable SAP Netweaver cluster deployment" + description = "Enable SAP Netweaver cluster deployment" + type = bool default = false } +variable "netweaver_public_publisher" { + description = "Public image publisher name used to create the netweaver machines" + type = string + default = "SUSE" +} + +variable "netweaver_public_offer" { + description = "Public image offer name used to create the netweaver machines" + type = string + default = "sles-sap-15-sp1-byos" +} + +variable "netweaver_public_sku" { + description = "Public image sku used to create the netweaver machines" + type = string + default = "gen2" +} + +variable "netweaver_public_version" { + description = "Public image sku used to create the netweaver machines" + type = string + default = "latest" +} + +variable "netweaver_image_uri" { + description = "Path to a custom azure image in a storage account used to create the netweaver machines" + type = string + default = "" +} + variable "netweaver_vm_size" { description = "VM size for the Netweaver machines" type = string @@ -212,8 +492,9 @@ variable "netweaver_vm_size" { } variable "netweaver_data_disk_type" { - type = string - default = "Standard_LRS" + description = "Disk type of the disks used to store netweaver content" + type = string + default = "Standard_LRS" } variable "netweaver_data_disk_size" { @@ -223,18 +504,19 @@ variable "netweaver_data_disk_size" { } variable "netweaver_data_disk_caching" { - type = string - default = "ReadWrite" + description = "Disk caching of the disks used to store hana database content" + type = string + default = "ReadWrite" } variable "netweaver_ips" { - description = "ip addresses to set to the netweaver cluster nodes" + description = "ip addresses to set to the netweaver cluster nodes. If it's not set the addresses will be auto generated from the provided vnet address range" type = list(string) default = [] } variable "netweaver_virtual_ips" { - description = "virtual ip addresses to set to the netweaver cluster nodes" + description = "Virtual ip addresses to set to the netweaver cluster nodes. If it's not set the addresses will be auto generated from the provided vnet address range" type = list(string) default = [] } @@ -263,10 +545,53 @@ variable "netweaver_enable_accelerated_networking" { default = true } +variable "netweaver_product_id" { + description = "Netweaver installation product. Even though the module is about Netweaver, it can be used to install other SAP instances like S4/HANA" + type = string + default = "NW750.HDB.ABAPHA" +} + +variable "netweaver_swpm_folder" { + description = "Netweaver software SWPM folder, path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_sapcar_exe" { + description = "Path to sapcar executable, relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_swpm_sar" { + description = "SWPM installer sar archive containing the installer, path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_swpm_extract_dir" { + description = "Extraction path for Netweaver software SWPM folder, if SWPM sar file is provided" + type = string + default = "/sapmedia/NW/SWPM" +} + +variable "netweaver_sapexe_folder" { + description = "Software folder where needed sapexe `SAR` executables are stored (sapexe, sapexedb, saphostagent), path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_additional_dvds" { + description = "Software folder with additional SAP software needed to install netweaver (NW export folder and HANA HDB client for example), path relative from the `netweaver_inst_media` mounted point" + type = list + default = [] +} + # Specific QA variables variable "qa_mode" { - description = "define qa mode (Disable extra packages outside images)" + description = "Enable test/qa mode (disable extra packages usage not coming in the image)" + type = bool default = false } @@ -275,3 +600,11 @@ variable "hwcct" { type = bool default = false } + +# Pre deployment + +variable "pre_deployment" { + description = "Enable pre deployment local execution. Only available for clients running Linux" + type = bool + default = false +} diff --git a/doc/deployment-templates.md b/doc/deployment-templates.md index d168d4d06..95c3e1ff7 100644 --- a/doc/deployment-templates.md +++ b/doc/deployment-templates.md @@ -12,7 +12,7 @@ The following examples refer to the libvirt Terraform provider, but they may be Not implementd yet: - NETWEAVER Monitoring (need implementation) -Additionally you need to set up pillars. In dev-mode we use mostly `automatic`. +Additionally you need to set up pillars. In dev-mode we use mostly `automatic`. See https://github.com/SUSE/ha-sap-terraform-deployments/tree/master/pillar_examples#pillar-examples for more details. The values of ipranges and ips needs are as example there. You will need to adapt accordingly your network configuration. @@ -22,12 +22,16 @@ The values of ipranges and ips needs are as example there. You will need to adap ``` qemu_uri = "qemu+ssh://MYUSER@MYSTEM/system" +pre_deployment = true base_image = "URL_TO_IMAGE" iprange = "192.168.210.0/24" hana_inst_media = "PATH TO INST_MEDIA" host_ips = ["192.168.110.19", "192.168.110.20"] reg_code = "MY_REG_CODE" reg_email = "MY_EMAIL" +# To auto detect the SLE version +ha_sap_deployment_repo = "http://download.opensuse.org/repositories/network:/ha-clustering:/Factory/" +# Specific SLE version used in all the created machines ha_sap_deployment_repo = "http://download.opensuse.org/repositories/network:/ha-clustering:/Factory/SLE_15/" shared_storage_type = "shared-disk" storage_pool = "terraform" @@ -42,6 +46,7 @@ To monitoring the HANA cluster need only 2 vars more than the simple HANA deploy ``` qemu_uri = "qemu+ssh://MYUSER@MYSTEM/system" +pre_deployment = true base_image = "URL_TO_IMAGE" iprange = "192.168.210.0/24" hana_inst_media = "PATH TO INST_MEDIA" @@ -50,6 +55,9 @@ monitoring_srv_ip = "192.168.110.21" monitoring_enabled = true reg_code = "MY_REG_CODE" reg_email = "MY_EMAIL" +# To auto detect the SLE version +ha_sap_deployment_repo = "http://download.opensuse.org/repositories/network:/ha-clustering:/Factory/" +# Specific SLE version used in all the created machines ha_sap_deployment_repo = "http://download.opensuse.org/repositories/network:/ha-clustering:/Factory/SLE_15/" shared_storage_type = "shared-disk" storage_pool = "terraform" @@ -59,12 +67,16 @@ storage_pool = "terraform" ``` qemu_uri = "qemu+ssh://MYUSER@MYSTEM/system" +pre_deployment = true base_image = "URL_TO_IMAGE" iprange = "192.168.210.0/24" hana_inst_media = "PATH TO INST_MEDIA" host_ips = ["192.168.110.19", "192.168.110.20"] reg_code = "YOUR_REG_CODE" reg_email = "MY_EMAIL" +# To auto detect the SLE version +ha_sap_deployment_repo = "http://download.opensuse.org/repositories/network:/ha-clustering:/Factory/" +# Specific SLE version used in all the created machines ha_sap_deployment_repo = "http://download.opensuse.org/repositories/network:/ha-clustering:/Factory/SLE_15/" shared_storage_type = "iscsi" iscsi_srv_ip = "192.168.110.31" @@ -78,12 +90,16 @@ NOTE: ISCSI server works with a sle15 or higher image ``` qemu_uri = "qemu+ssh://MYUSER@MYSTEM/system" +pre_deployment = true base_image = "URL_TO_IMAGE" iprange = "192.168.210.0/24" hana_inst_media = "PATH TO INST_MEDIA" host_ips = ["192.168.110.19", "192.168.110.20"] reg_code = "MY_REG_CODE" reg_email = "MY_EMAIL" +# To auto detect the SLE version +ha_sap_deployment_repo = "http://download.opensuse.org/repositories/network:/ha-clustering:/Factory/" +# Specific SLE version used in all the created machines ha_sap_deployment_repo = "http://download.opensuse.org/repositories/network:/ha-clustering:/Factory/SLE_15/" shared_storage_type = "shared-disk" storage_pool = "terraform" diff --git a/doc/monitoring.md b/doc/monitoring.md index 37c0e4afb..0eecb29fa 100644 --- a/doc/monitoring.md +++ b/doc/monitoring.md @@ -17,6 +17,7 @@ This configuration will create an additional VM with the chosen provider and ins The address of the Grafana dashboard will be made available in the final Terraform output. + ### DRBD and Netweaver monitoring If DRBD or Netweaver clusters are enabled setting the values `drbd_enabled` or `netweaver_enabled` to `true`, new clusters entries will be added to the dashboard automatically with the data of these 2 deployments (as far as `monitoring_enabled` is set to `true`). @@ -26,42 +27,17 @@ If DRBD or Netweaver clusters are enabled setting the values `drbd_enabled` or ` These are the exporters installed in the cluster nodes, which provide metrics to be scraped by the Prometheus server: +- [prometheus/node_exporter](https://github.com/prometheus/node_exporter) - [ClusterLabs/ha_cluster_exporter](http://github.com/ClusterLabs/ha_cluster_exporter) - [SUSE/hanadb_exporter](https://github.com/SUSE/hanadb_exporter) -- [prometheus/node_exporter](https://github.com/prometheus/node_exporter) - -#### `ha_cluster_exporter` - -In order to enable `ha_cluster_exporter` for each cluster node, the `cluster` pillar must be as follows: - -``` -cluster: - // etc. - ha_exporter: true -``` - -#### `hanadb_exporter` - -In order to enable `hanadb_exporter` for each HANA node, the `hana` pillar entries must be modified as follows: - -``` -hana: - nodes: - - // etc. - exporter: - exposition_port: 9668 # http port where the data is exported - user: SYSTEM # HANA db user - password: YourPassword1234 # HANA db password -``` - -**Note**: SAP HANA already uses some ports in the 8000 range (specifically the port 80{instance number} where instance number usually is '00'). +- [SUSE/sap_host_exporter](https://github.com/SUSE/sap_host_exporter) ### Multi-cluster monitoring -To enable multiple clusters in our monitoring solution, you will need to manually apply some changes to the `/etc/prometheus/prometheus.yaml` configuration. +To enable multiple clusters in our monitoring solution, we have made some changes to the `/etc/prometheus/prometheus.yaml` configuration. -Each cluster is a different "job" grouping all the exporters (aka "targets") to scrape, so if you had two clusters you would have 2 jobs, e.g.: +We leverage the `job_name` settings to group all the exporters (a.k.a. scraping targets) by their cluster, so if you had two clusters you would have one job each, e.g.: ``` scrape_configs: @@ -89,7 +65,7 @@ scrape_configs: This will add a `job` label in all the Prometheus metrics, in this example `job="hacluster-01"` and `job="hacluster-02"`. -You will find a dedicated cluster selector switch at the top of the Grafana dashboard. +We leverage this to implement a cluster selector switch at the top of the Multi-Cluster Grafana dashboard. ### DRBD split-brain detection diff --git a/doc/netweaver.md b/doc/netweaver.md index 8bc40e191..4575ae421 100644 --- a/doc/netweaver.md +++ b/doc/netweaver.md @@ -29,7 +29,8 @@ In order to deploy a SAP Netweaver environment with SAP Hana some changes must b - **For azure**: Add the azure storage account variables to the `terraform.tfvars` with the , `sapexe` folder, `Netweaver Export` folder and `HANA HDB Client` folders (`Netweaver Export` and `HANA HDB Client` are required if the Database, PAS and AAS instances need to be installed). The `netweaver.sls` pillar file must also be updated with all this information. `Netweaver Export` and `HANA HDB Client` folders must be provided in `additional_dvds` list. -- **For aws**: Add the aws S3 bucket variable (`netweaver_s3_bucket`) to the `terraform.tfvars` with the , `sapexe` folder, `Netweaver Export` folder and `HANA HDB Client` folders (`Netweaver Export` and `HANA HDB Client` are required if the Database, PAS and AAS instances need to be installed). The `netweaver.sls` pillar file must also be updated with all this information. `Netweaver Export` and `HANA HDB Client` folders must be provided in `additional_dvds` list. +- **For aws**: Add the aws S3 bucket variable (`netweaver_s3_bucket`) to the `terraform.tfvars` with the , `sapexe` folder, `Netweaver Export` folder and `HANA HDB Client` folders (`Netweaver Export` and `HANA HDB Client` are required if the Database, PAS and AAS instances need to be installed). The `netweaver.sls` pillar file must also be updated with all this information. `Netweaver Export` and `HANA HDB Client` folders must be provided in `additional_dvds` list. Note: Netweaver will use the Amazon Elastic File System(EFS) for its NFS share by default, unless drbd option is enabled (using `drbd_enabled` variable). + - **For gcp**: Add the gcp storage bucket variable to the `terraform.tfvars` with the , `sapexe` folder, `Netweaver Export` folder and `HANA HDB Client` folders (`Netweaver Export` and `HANA HDB Client` are required if the Database, PAS and AAS instances need to be installed). The `netweaver.sls` pillar file must also be updated with all this information. `Netweaver Export` and `HANA HDB Client` folders must be provided in `additional_dvds` list. @@ -38,3 +39,5 @@ In order to deploy a SAP Netweaver environment with SAP Hana some changes must b - Add the `netweaver_nfs_share` variable to the `terraform.tfvars` with the address to the NFS share containing the `sapmnt` and `usrsapsys` folders. Following the current example this would be `nfs_address:/sapdata/HA1`. - Modify the content of [cluster.sls](../salt/netweaver_node/files/pillar/cluster.sls) and [netweaver.sls](../salt/netweaver_node/files/pillar/netweaver.sls). The unique mandatory changes are `swpm_folder`, `sapexe_folder` and `additional_dvds` in the `netweaver.sls` file. These values must match with the folder of your `sap_inst_media`, the current values are just an example. + +- If the [netweaver.sls](pillar_examples/automatic/netweaver/netweaver.sls) pillar file from `pillar_examples/automatic/netweaver` is used, the parameters `netweaver_swpm_folder`, `netweaver_sapexe_folder` and `netweaver_additional_dvds` must be set in the `tfvars` file with the same data explained in the previous points. diff --git a/doc/qa.md b/doc/qa.md index d5dc2d178..087b19cc7 100644 --- a/doc/qa.md +++ b/doc/qa.md @@ -1,17 +1,17 @@ # QA usage You may have noticed the variable **`qa_mode`**, this project is also used for Quality Assurance testing. - + ## Specific QA variables **`qa_mode`** is used to inform the deployment that we are doing QA. Don't forget to set `qa_mode` to true in your `terraform.tfvars` file. By default, `qa_mode` is set to false. Below is the expected behavior: - -- disables extra packages installation (sap, ha pattern etc). + +- disables extra packages installation (sap, ha pattern etc). - disables first registration to install salt-minion, we consider that images are delivered with - salt-minion included. + salt-minion included. - disables salt color output (better for debugging in automated scenario)
-**`hwcct`**: If set to true, it executes HANA Hardware Configuration Check Tool to bench filesystems. It's a very long test (about 2 hours), results will be both in /root/hwcct_out and in the global log file /tmp/provisioning.log. +**`hwcct`**: If set to true, it executes HANA Hardware Configuration Check Tool to bench filesystems. It's a very long test (about 2 hours), results will be both in /root/hwcct_out and in the global log file /var/log/provisioning.log. By default, `hwcct` is set to false. Variable **`qa_mode` must be set to true**. diff --git a/doc/troubleshooting.md b/doc/troubleshooting.md new file mode 100644 index 000000000..b9ef84037 --- /dev/null +++ b/doc/troubleshooting.md @@ -0,0 +1,23 @@ +# Troubleshooting + +The goal of this guide is to provide some useful entrypoints for debug. + + +Feel free to open an issue with these logs, and/or analyze them accordingly. + + +# Salt usefull logs + +- `/var/log/salt-os-setup.log`: initial OS setup registering the machines to SCC, updating the system, etc. +- `/var/log/salt-predeployment.log`: before executing formula states, execute the saltstack file contained in the repo of ha-sap-terraform-deployments. +- `/var/log/salt-deployment.log`: this is the log file where the formulas salt execution is logged. (salt-formulas are not part of the github deployments project). + + +# Netweaver debugging + +- `/tmp/swpm_unnattended/sapinst.log` is the best first entrypoint to look at when debugging netweaver failures. + + +# Misc + +When opening/issues, provide Which SLE version, which provider, and the logs (described before). diff --git a/gcp/README.md b/gcp/README.md index 8ecae8ce7..a8deb0ed4 100644 --- a/gcp/README.md +++ b/gcp/README.md @@ -41,8 +41,8 @@ Note: You must run this command to use the Gcloud SDK and to apply this Terrafor ``` terraform init -terraform workspace new my-execution # optional -terraform workspace select my-execution # optional +terraform workspace new myexecution # optional +terraform workspace select myexecution # optional terraform plan terraform apply ``` @@ -64,90 +64,27 @@ The infrastructure deployed includes: - Public IP access for the virtual machines via ssh. - The definition of the image to use in the virtual machines. - Virtual machines to deploy. +- The created HA environment uses the route table option to forward the coming requests and manage the floating IP address of the cluster (gcp-vpc-move-route resource agent). By default, this configuration will create 3 instances in GCP: one for support services (mainly iSCSI as most other services - DHCP, NTP, etc - are provided by Google) and 2 cluster nodes, but this can be changed to deploy more cluster nodes as needed. -## Provisioning by Salt -By default, the cluster and HANA installation is done using Salt Formulas in foreground. -To customize this provisioning, you have to create the pillar files (cluster.sls and hana.sls) according to the examples in the [pillar_examples](https://github.com/SUSE/ha-sap-terraform-deployments/blob/master/pillar_examples) folder (more information in the dedicated [README](https://github.com/SUSE/ha-sap-terraform-deployments/blob/master/pillar_examples/README.md)) +# Specifications -## Specification +In order to deploy the environment, different configurations are available through the terraform variables. These variables can be configured using a `terraform.tfvars` file. An example is available in [terraform.tfvars.example](./terraform.tvars.example). To find all the available variables check the [variables.tf](./variables.tf) file. -These are the relevant files and what each provides: +## QA deployment -- [disks.tf](disks.tf): definitions of the storage used for images and virtual machines. +The project has been created in order to provide the option to run the deployment in a `Test` or `QA` mode. This mode only enables the packages coming properly from SLE channels, so no other packages will be used. Find more information [here](../doc/qa.md). -- [instances.tf](instances.tf): definition of the GCP instances to create on deployment. +## Pillar files configuration -- [network.tf](network.tf): definition of network resources used by the infrastructure and the firewall rules. +Besides the `terraform.tfvars` file usage to configure the deployment, a more advanced configuration is available through pillar files customization. Find more information [here](../pillar_examples/README.md). -- [outputs.tf](outputs.tf): definition of outputs of the terraform configuration. +## Use already existing network resources -- [provider.tf](provider.tf): definition of the providers being used in the terraform configuration. - -- [remote-state.sample](remote-state.sample): sample file for the definition of the backend to [store the Terraform state file remotely](create_remote_state). - -- [salt_provisioner.tf](salt_provisioner.tf): salt provisioning resources. - -- [salt_provisioner_script.tpl](../../salt/salt_provisioner_script.tpl): template code for the initialization script for the servers. This will add the salt-minion if needed and execute the SALT deployment. - -- [variables.tf](variables.tf): definition of variables used in the configuration. - -- [terraform.tfvars.example](terraform.tfvars.example): file containing initialization values for variables used throughout the configuration. **Rename/Duplicate this file to terraform.tfvars and edit the content with your values before use**. - -#### Variables - -In the file [terraform.tfvars](terraform.tfvars.example) there are a number of variables that control what is deployed. Some of these variables are: - -* **project**: must contain the project name. -* **gcp_credentials_file**: must contain the path to the JSON file with the GCP credentials created above. -* **ip_cidr_range**: must contain the internal IPv4 range. -* **iscsi_ip**: must contain the iscsi server IP. -* **machine_type** and **machine_type_iscsi_server** variables must contain the [GCP machine type](https://cloud.google.com/compute/docs/machine-types) for the SAP HANA nodes as well as the iSCSI server node. -* **hana_data_disk_type**: disk type to use for HANA data (pd-ssd by default). -* **hana_data_disk_size**: disk size on GB to use for HANA data disk (834GB by default). -* **hana_backup_disk_type**: disk type to use for HANA data backup (pd-standard by default). -* **hana_backup_disk_size**: disk size on GB to use for HANA backup disk (416GB by default). -* **private_key_location**: the path to your SSH private key. This is used by the provisioner. -* **public_key_location**: the path to your SSH public key. This is used to access the instances. -* **region**: the name of the desired region. -* **sap_hana_deployment_bucket**: the name of the Google Storage bucket with the HANA installation files. -* **sles4sap_boot_image**: the name of the SLES4SAP image. - -**Important:** The image used for the iSCSI server **must be at least SLES 15 version** since the iSCSI salt formula is not compatible with lower versions. Use the variable `iscsi_server_boot_image` below. -* **iscsi_server_boot_image**: the name of the SLES image for the iSCSI server used for SBD stonith. -* **init_type**: variable controls what is deployed in the cluster nodes. Valid values are `all` (installs HANA and configures cluster), `skip-hana` (does not install HANA, but configures cluster). Defaults to `all`. -* **iscsidev**: device used by the iSCSI server to provide LUNs. -* **iscsi_disks**: attached partitions number for iscsi server. -* **cluster_ssh_pub**: path to a custom ssh public key to upload to the nodes. -* **cluster_ssh_key**: path to a custom ssh private key to upload to the nodes. -* **hana_inst_folder**: path where HANA installation master will be downloaded from `GCP Bucket`. -* **hana_disk_device**: device used by node where HANA will be installed (/dev/sdb by default). -* **hana_backup_device**: device used by node where HANA backup will be stored (/dev/sdc by default). -* **hana_inst_disk_device**: device used by node where HANA will be downloaded (/dev/sdd by default). -* **hana_cluster_vip**: IP address used to configure the hana cluster floating IP. It must be in other subnet than the machines! -* **ha_sap_deployment_repo**: Repository with HA and Salt formula packages. The latest RPM packages can be found at [https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/{YOUR OS VERSION}](https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/) -* **scenario_type**: SAP HANA scenario type. Available options: `performance-optimized` and `cost-optimized`. -* **provisioner**: select the desired provisioner to configure the nodes. Salt is used by default: [salt](../../salt). Let it empty to disable the provisioning part. -* **background**: run the provisioning process in background finishing terraform execution. -* **reg_code**: registration code for the installed base product (Ex.: SLES for SAP). This parameter is optional. If informed, the system will be registered against the SUSE Customer Center. -* **reg_email**: email to be associated with the system registration. This parameter is optional. -* **reg_additional_modules**: additional optional modules and extensions to be registered (Ex.: Containers Module, HA module, Live Patching, etc). The variable is a key-value map, where the key is the _module name_ and the value is the _registration code_. If the _registration code_ is not needed, set an empty string as value. The module format must follow SUSEConnect convention: - - `//` - - *Example:* Suggested modules for SLES for SAP 15 - - sle-module-basesystem/15/x86_64 - sle-module-desktop-applications/15/x86_64 - sle-module-server-applications/15/x86_64 - sle-ha/15/x86_64 (use the same regcode as SLES for SAP) - sle-module-sap-applications/15/x86_64 - - For more information about registration, check the ["Registering SUSE Linux Enterprise and Managing Modules/Extensions"](https://www.suse.com/documentation/sles-15/book_sle_deployment/data/cha_register_sle.html) guide. - - * **additional_packages**: Additional packages to add to the guest machines. - * **hosts_ips**: Each cluster nodes IP address (sequential order). Mandatory to have a generic `/etc/hosts` file. - -[Specific QA variables](https://github.com/juadk/ha-sap-terraform-deployments/blob/improve_QA_documentation/doc/qa.md#specific-qa-variables) +The usage of already existing network resources (vpc, subnet, firewall rules, etc) can be done configuring +the `terraform.tfvars` file and adjusting some variables. The example of how to use them is available +at [terraform.tfvars.example](terraform.tfvars.example). # Advanced Usage diff --git a/gcp/disks.tf b/gcp/disks.tf deleted file mode 100644 index 353526f8a..000000000 --- a/gcp/disks.tf +++ /dev/null @@ -1,40 +0,0 @@ -resource "google_compute_disk" "iscsi_data" { - name = "${terraform.workspace}-${var.name}-iscsi-data" - type = "pd-standard" - size = "10" - zone = element(data.google_compute_zones.available.names, 0) -} - -# HANA disks configuration information: https://cloud.google.com/solutions/sap/docs/sap-hana-planning-guide#storage_configuration - -resource "google_compute_disk" "data" { - count = var.ninstances - name = "${terraform.workspace}-${var.name}-data-${count.index}" - type = var.hana_data_disk_type - size = var.hana_data_disk_size - zone = element(data.google_compute_zones.available.names, count.index) -} - -resource "google_compute_disk" "backup" { - count = var.ninstances - name = "${terraform.workspace}-${var.name}-backup-${count.index}" - type = var.hana_backup_disk_type - size = var.hana_backup_disk_size - zone = element(data.google_compute_zones.available.names, count.index) -} - -resource "google_compute_disk" "hana-software" { - count = var.ninstances - name = "${terraform.workspace}-${var.name}-hana-software-${count.index}" - type = "pd-standard" - size = "20" - zone = element(data.google_compute_zones.available.names, count.index) -} - -resource "google_compute_disk" "monitoring_data" { - count = var.monitoring_enabled == true ? 1 : 0 - name = "${terraform.workspace}-${var.name}-monitoring-data" - type = "pd-standard" - size = "20" - zone = element(data.google_compute_zones.available.names, 0) -} diff --git a/gcp/infrastructure.tf b/gcp/infrastructure.tf new file mode 100644 index 000000000..8a58364e9 --- /dev/null +++ b/gcp/infrastructure.tf @@ -0,0 +1,86 @@ +# Configure the GCP Provider +provider "google" { + credentials = file(var.gcp_credentials_file) + project = var.project + region = var.region +} + +data "google_compute_zones" "available" { + region = var.region + status = "UP" +} + +terraform { + required_version = ">= 0.12" +} + +data "google_compute_subnetwork" "current-subnet" { + count = var.ip_cidr_range == "" ? 1 : 0 + name = var.subnet_name + region = var.region +} + +locals { + network_link = var.vpc_name == "" ? google_compute_network.ha_network.0.self_link : format( + "https://www.googleapis.com/compute/v1/projects/%s/global/networks/%s", var.project, var.vpc_name) + vpc_name = var.vpc_name == "" ? google_compute_network.ha_network.0.name : var.vpc_name + subnet_name = var.subnet_name == "" ? google_compute_subnetwork.ha_subnet.0.name : var.subnet_name + subnet_address_range = var.subnet_name == "" ? var.ip_cidr_range : (var.ip_cidr_range == "" ? data.google_compute_subnetwork.current-subnet.0.ip_cidr_range : var.ip_cidr_range) +} + +# Network resources: Network, Subnet +resource "google_compute_network" "ha_network" { + count = var.vpc_name == "" ? 1 : 0 + name = "${terraform.workspace}-network" + auto_create_subnetworks = "false" +} + +resource "google_compute_subnetwork" "ha_subnet" { + count = var.subnet_name == "" ? 1 : 0 + name = "${terraform.workspace}-subnet" + network = local.network_link + region = var.region + ip_cidr_range = local.subnet_address_range +} + +# Network firewall rules +resource "google_compute_firewall" "ha_firewall_allow_internal" { + name = "${terraform.workspace}-fw-internal" + network = local.vpc_name + source_ranges = [local.subnet_address_range] + + allow { + protocol = "icmp" + } + + allow { + protocol = "udp" + ports = ["0-65535"] + } + + allow { + protocol = "tcp" + ports = ["0-65535"] + } +} + +resource "google_compute_firewall" "ha_firewall_allow_icmp" { + count = var.create_firewall_rules ? 1 : 0 + name = "${terraform.workspace}-fw-icmp" + network = local.vpc_name + + allow { + protocol = "icmp" + } +} + +resource "google_compute_firewall" "ha_firewall_allow_tcp" { + count = var.create_firewall_rules ? 1 : 0 + name = "${terraform.workspace}-fw-tcp" + network = local.vpc_name + + allow { + protocol = "tcp" + ports = ["22", "80", "443", "3000", "7630", "9668", "9100", "9664", "9090"] + } +} diff --git a/gcp/instances.tf b/gcp/instances.tf deleted file mode 100644 index 8c7148530..000000000 --- a/gcp/instances.tf +++ /dev/null @@ -1,146 +0,0 @@ -resource "google_compute_instance" "iscsisrv" { - name = "${terraform.workspace}-iscsisrv" - description = "iSCSI server" - machine_type = var.machine_type_iscsi_server - zone = element(data.google_compute_zones.available.names, 0) - - lifecycle { - create_before_destroy = true - } - - network_interface { - subnetwork = google_compute_subnetwork.ha_subnet.name - network_ip = var.iscsi_ip - - access_config { - nat_ip = "" - } - } - - scheduling { - automatic_restart = true - on_host_maintenance = "MIGRATE" - preemptible = false - } - - boot_disk { - initialize_params { - image = var.iscsi_server_boot_image - } - - auto_delete = true - } - - attached_disk { - source = google_compute_disk.iscsi_data.self_link - device_name = google_compute_disk.iscsi_data.name - mode = "READ_WRITE" - } - - metadata = { - sshKeys = "root:${file(var.public_key_location)}" - } -} - -resource "google_compute_instance" "clusternodes" { - machine_type = var.machine_type - name = "${terraform.workspace}-${var.name}${var.ninstances > 1 ? "0${count.index + 1}" : ""}" - count = var.ninstances - zone = element(data.google_compute_zones.available.names, count.index) - - can_ip_forward = true - - network_interface { - subnetwork = google_compute_subnetwork.ha_subnet.name - network_ip = element(var.host_ips, count.index) - - access_config { - nat_ip = "" - } - } - - scheduling { - automatic_restart = true - on_host_maintenance = "MIGRATE" - preemptible = false - } - - boot_disk { - initialize_params { - image = var.sles4sap_boot_image - } - - auto_delete = true - } - - attached_disk { - source = element(google_compute_disk.data.*.self_link, count.index) - device_name = element(google_compute_disk.data.*.name, count.index) - mode = "READ_WRITE" - } - - attached_disk { - source = element(google_compute_disk.backup.*.self_link, count.index) - device_name = element(google_compute_disk.backup.*.name, count.index) - mode = "READ_WRITE" - } - - attached_disk { - source = element(google_compute_disk.hana-software.*.self_link, count.index) - device_name = element(google_compute_disk.hana-software.*.name, count.index) - mode = "READ_WRITE" - } - - metadata = { - sshKeys = "root:${file(var.public_key_location)}" - } - - service_account { - scopes = ["compute-rw", "storage-rw", "logging-write", "monitoring-write", "service-control", "service-management"] - } -} - -resource "google_compute_instance" "monitoring" { - count = var.monitoring_enabled == true ? 1 : 0 - name = "${terraform.workspace}-monitoring" - description = "Monitoring server" - machine_type = "custom-1-2048" - zone = element(data.google_compute_zones.available.names, 0) - - lifecycle { - create_before_destroy = true - } - - network_interface { - subnetwork = google_compute_subnetwork.ha_subnet.name - network_ip = var.monitoring_srv_ip - - access_config { - nat_ip = "" - } - } - - scheduling { - automatic_restart = true - on_host_maintenance = "MIGRATE" - preemptible = false - } - - boot_disk { - initialize_params { - image = var.sles4sap_boot_image - } - - auto_delete = true - } - - attached_disk { - source = element(google_compute_disk.monitoring_data.*.self_link, count.index) - device_name = element(google_compute_disk.monitoring_data.*.name, count.index) - mode = "READ_WRITE" - } - - metadata = { - sshKeys = "root:${file(var.public_key_location)}" - } -} diff --git a/gcp/main.tf b/gcp/main.tf index 21d831836..a629325a7 100644 --- a/gcp/main.tf +++ b/gcp/main.tf @@ -1,18 +1,53 @@ +module "local_execution" { + source = "../generic_modules/local_exec" + enabled = var.pre_deployment +} + +# This locals entry is used to store the IP addresses of all the machines. +# Autogenerated addresses example based in 10.0.0.0/24 +# Iscsi server: 10.0.0.4 +# Monitoring: 10.0.0.5 +# Hana ips: 10.0.0.10, 10.0.0.11 +# Hana cluster vip: 10.0.1.12 +# DRBD ips: 10.0.0.20, 10.0.0.21 +# DRBD cluster vip: 10.0.1.22 +# Netweaver ips: 10.0.0.30, 10.0.0.31, 10.0.0.32, 10.0.0.33 +# Netweaver virtual ips: 10.0.1.34, 10.0.1.35, 10.0.1.36, 10.0.1.37 +# If the addresses are provided by the user they will always have preference +locals { + iscsi_srv_ip = var.iscsi_srv_ip != "" ? var.iscsi_srv_ip : cidrhost(local.subnet_address_range, 4) + monitoring_srv_ip = var.monitoring_srv_ip != "" ? var.monitoring_srv_ip : cidrhost(local.subnet_address_range, 5) + + hana_ip_start = 10 + hana_ips = length(var.hana_ips) != 0 ? var.hana_ips : [for ip_index in range(local.hana_ip_start, local.hana_ip_start + var.hana_count) : cidrhost(local.subnet_address_range, ip_index)] + hana_cluster_vip = var.hana_cluster_vip != "" ? var.hana_cluster_vip : cidrhost(cidrsubnet(local.subnet_address_range, -8, 0), 256 + local.hana_ip_start + var.hana_count) + + # 2 is hardcoded for drbd because we always deploy 4 machines + drbd_ip_start = 20 + drbd_ips = length(var.drbd_ips) != 0 ? var.drbd_ips : [for ip_index in range(local.drbd_ip_start, local.drbd_ip_start + 2) : cidrhost(local.subnet_address_range, ip_index)] + drbd_cluster_vip = var.drbd_cluster_vip != "" ? var.drbd_cluster_vip : cidrhost(cidrsubnet(local.subnet_address_range, -8, 0), 256 + local.drbd_ip_start + 2) + + # 4 is hardcoded for netweaver because we always deploy 4 machines + netweaver_ip_start = 30 + netweaver_ips = length(var.netweaver_ips) != 0 ? var.netweaver_ips : [for ip_index in range(local.netweaver_ip_start, local.netweaver_ip_start + 4) : cidrhost(local.subnet_address_range, ip_index)] + netweaver_virtual_ips = length(var.netweaver_virtual_ips) != 0 ? var.netweaver_virtual_ips : [for ip_index in range(local.netweaver_ip_start, local.netweaver_ip_start + 4) : cidrhost(cidrsubnet(local.subnet_address_range, -8, 0), 256 + ip_index + 4)] +} + module "drbd_node" { source = "./modules/drbd_node" drbd_count = var.drbd_enabled == true ? 2 : 0 machine_type = var.drbd_machine_type compute_zones = data.google_compute_zones.available.names - network_name = google_compute_network.ha_network.name - network_subnet_name = google_compute_subnetwork.ha_subnet.name + network_name = local.vpc_name + network_subnet_name = local.subnet_name drbd_image = var.drbd_image drbd_data_disk_size = var.drbd_data_disk_size drbd_data_disk_type = var.drbd_data_disk_type - drbd_cluster_vip = var.drbd_cluster_vip + drbd_cluster_vip = local.drbd_cluster_vip gcp_credentials_file = var.gcp_credentials_file network_domain = "tf.local" - host_ips = var.drbd_ips - iscsi_srv_ip = google_compute_instance.iscsisrv.network_interface.0.network_ip + host_ips = local.drbd_ips + iscsi_srv_ip = module.iscsi_server.iscsisrv_ip public_key_location = var.public_key_location private_key_location = var.private_key_location cluster_ssh_pub = var.cluster_ssh_pub @@ -25,34 +60,139 @@ module "drbd_node" { devel_mode = var.devel_mode provisioner = var.provisioner background = var.background + on_destroy_dependencies = [ + google_compute_firewall.ha_firewall_allow_tcp + ] } module "netweaver_node" { - source = "./modules/netweaver_node" - netweaver_count = var.netweaver_enabled == true ? 4 : 0 - machine_type = var.netweaver_machine_type + source = "./modules/netweaver_node" + netweaver_count = var.netweaver_enabled == true ? 4 : 0 + machine_type = var.netweaver_machine_type + compute_zones = data.google_compute_zones.available.names + network_name = local.vpc_name + network_subnet_name = local.subnet_name + netweaver_image = var.netweaver_image + gcp_credentials_file = var.gcp_credentials_file + network_domain = "tf.local" + host_ips = local.netweaver_ips + iscsi_srv_ip = module.iscsi_server.iscsisrv_ip + public_key_location = var.public_key_location + private_key_location = var.private_key_location + cluster_ssh_pub = var.cluster_ssh_pub + cluster_ssh_key = var.cluster_ssh_key + netweaver_product_id = var.netweaver_product_id + netweaver_software_bucket = var.netweaver_software_bucket + netweaver_swpm_folder = var.netweaver_swpm_folder + netweaver_sapcar_exe = var.netweaver_sapcar_exe + netweaver_swpm_sar = var.netweaver_swpm_sar + netweaver_swpm_extract_dir = var.netweaver_swpm_extract_dir + netweaver_sapexe_folder = var.netweaver_sapexe_folder + netweaver_additional_dvds = var.netweaver_additional_dvds + netweaver_nfs_share = "${local.drbd_cluster_vip}:/HA1" + hana_ip = local.hana_cluster_vip + virtual_host_ips = local.netweaver_virtual_ips + reg_code = var.reg_code + reg_email = var.reg_email + reg_additional_modules = var.reg_additional_modules + ha_sap_deployment_repo = var.ha_sap_deployment_repo + devel_mode = var.devel_mode + provisioner = var.provisioner + background = var.background + monitoring_enabled = var.monitoring_enabled + on_destroy_dependencies = [ + google_compute_firewall.ha_firewall_allow_tcp + ] +} + +module "hana_node" { + source = "./modules/hana_node" + hana_count = var.hana_count + machine_type = var.machine_type + compute_zones = data.google_compute_zones.available.names + network_name = local.vpc_name + network_subnet_name = local.subnet_name + init_type = var.init_type + sles4sap_boot_image = var.sles4sap_boot_image + gcp_credentials_file = var.gcp_credentials_file + host_ips = local.hana_ips + iscsi_srv_ip = module.iscsi_server.iscsisrv_ip + sap_hana_deployment_bucket = var.sap_hana_deployment_bucket + hana_inst_folder = var.hana_inst_folder + hana_platform_folder = var.hana_platform_folder + hana_sapcar_exe = var.hana_sapcar_exe + hdbserver_sar = var.hdbserver_sar + hana_extract_dir = var.hana_extract_dir + hana_data_disk_type = var.hana_data_disk_type + hana_data_disk_size = var.hana_data_disk_size + hana_backup_disk_type = var.hana_backup_disk_type + hana_backup_disk_size = var.hana_backup_disk_size + hana_fstype = var.hana_fstype + hana_cluster_vip = local.hana_cluster_vip + scenario_type = var.scenario_type + public_key_location = var.public_key_location + private_key_location = var.private_key_location + cluster_ssh_pub = var.cluster_ssh_pub + cluster_ssh_key = var.cluster_ssh_key + reg_code = var.reg_code + reg_email = var.reg_email + reg_additional_modules = var.reg_additional_modules + ha_sap_deployment_repo = var.ha_sap_deployment_repo + additional_packages = var.additional_packages + devel_mode = var.devel_mode + hwcct = var.hwcct + qa_mode = var.qa_mode + provisioner = var.provisioner + background = var.background + monitoring_enabled = var.monitoring_enabled + on_destroy_dependencies = [ + google_compute_firewall.ha_firewall_allow_tcp + ] +} + +module "monitoring" { + source = "./modules/monitoring" + compute_zones = data.google_compute_zones.available.names + network_subnet_name = local.subnet_name + sles4sap_boot_image = var.sles4sap_boot_image + public_key_location = var.public_key_location + private_key_location = var.private_key_location + reg_code = var.reg_code + reg_email = var.reg_email + reg_additional_modules = var.reg_additional_modules + ha_sap_deployment_repo = var.ha_sap_deployment_repo + additional_packages = var.additional_packages + monitoring_srv_ip = local.monitoring_srv_ip + monitoring_enabled = var.monitoring_enabled + hana_targets = concat(local.hana_ips, [local.hana_cluster_vip]) # we use the vip to target the active hana instance + drbd_targets = var.drbd_enabled ? local.drbd_ips : [] + netweaver_targets = var.netweaver_enabled ? local.netweaver_virtual_ips : [] + provisioner = var.provisioner + background = var.background + on_destroy_dependencies = [ + google_compute_firewall.ha_firewall_allow_tcp + ] +} + +module "iscsi_server" { + source = "./modules/iscsi_server" + machine_type_iscsi_server = var.machine_type_iscsi_server compute_zones = data.google_compute_zones.available.names - network_name = google_compute_network.ha_network.name - network_subnet_name = google_compute_subnetwork.ha_subnet.name - netweaver_image = var.netweaver_image - gcp_credentials_file = var.gcp_credentials_file - network_domain = "tf.local" - host_ips = var.netweaver_ips - iscsi_srv_ip = google_compute_instance.iscsisrv.network_interface.0.network_ip + network_subnet_name = local.subnet_name + iscsi_server_boot_image = var.iscsi_server_boot_image + iscsi_srv_ip = local.iscsi_srv_ip + iscsi_disks = var.iscsi_disks public_key_location = var.public_key_location private_key_location = var.private_key_location - cluster_ssh_pub = var.cluster_ssh_pub - cluster_ssh_key = var.cluster_ssh_key - netweaver_software_bucket = var.netweaver_software_bucket - netweaver_nfs_share = "${var.drbd_cluster_vip}:/HA1" - hana_cluster_vip = var.hana_cluster_vip - virtual_host_ips = var.netweaver_virtual_ips reg_code = var.reg_code reg_email = var.reg_email reg_additional_modules = var.reg_additional_modules ha_sap_deployment_repo = var.ha_sap_deployment_repo - devel_mode = var.devel_mode + additional_packages = var.additional_packages + qa_mode = var.qa_mode provisioner = var.provisioner background = var.background - monitoring_enabled = var.monitoring_enabled + on_destroy_dependencies = [ + google_compute_firewall.ha_firewall_allow_tcp + ] } diff --git a/gcp/modules/drbd_node/main.tf b/gcp/modules/drbd_node/main.tf index e4cf52dc0..2905f009e 100644 --- a/gcp/modules/drbd_node/main.tf +++ b/gcp/modules/drbd_node/main.tf @@ -9,9 +9,9 @@ resource "google_compute_disk" "data" { zone = element(var.compute_zones, count.index) } -# temporary HA solution to create the static routes, eventually this routes must be created by the RA gcp-vpc-move-route +# Don't remove the routes! Even though the RA gcp-vpc-move-route creates them, if they are not created here, the terraform destroy cannot work as it will find new route names resource "google_compute_route" "drbd-route" { - name = "drbd-route" + name = "${terraform.workspace}-drbd-route" count = var.drbd_count > 0 ? 1 : 0 dest_range = "${var.drbd_cluster_vip}/32" network = var.network_name @@ -22,7 +22,7 @@ resource "google_compute_route" "drbd-route" { resource "google_compute_instance" "drbd" { machine_type = var.machine_type - name = "${terraform.workspace}-drbd${var.drbd_count > 1 ? "0${count.index + 1}" : ""}" + name = "${terraform.workspace}-drbd0${count.index + 1}" count = var.drbd_count zone = element(var.compute_zones, count.index) @@ -65,3 +65,13 @@ resource "google_compute_instance" "drbd" { scopes = ["compute-rw", "storage-rw", "logging-write", "monitoring-write", "service-control", "service-management"] } } + +module "drbd_on_destroy" { + source = "../../../generic_modules/on_destroy" + node_count = var.drbd_count + instance_ids = google_compute_instance.drbd.*.id + user = "root" + private_key_location = var.private_key_location + public_ips = google_compute_instance.drbd.*.network_interface.0.access_config.0.nat_ip + dependencies = var.on_destroy_dependencies +} diff --git a/gcp/modules/drbd_node/salt_provisioner.tf b/gcp/modules/drbd_node/salt_provisioner.tf index d8a8a33b5..dabad5806 100644 --- a/gcp/modules/drbd_node/salt_provisioner.tf +++ b/gcp/modules/drbd_node/salt_provisioner.tf @@ -1,11 +1,3 @@ -data "template_file" "salt_provisioner" { - template = file("../salt/salt_provisioner_script.tpl") - - vars = { - regcode = var.reg_code - } -} - resource "null_resource" "drbd_provisioner" { count = var.provisioner == "salt" ? var.drbd_count : 0 @@ -23,27 +15,12 @@ resource "null_resource" "drbd_provisioner" { private_key = file(var.private_key_location) } - provisioner "file" { - source = var.gcp_credentials_file - destination = "/root/google_credentials.json" - } - - provisioner "file" { - source = "../salt" - destination = "/tmp" - } - - provisioner "file" { - content = data.template_file.salt_provisioner.rendered - destination = "/tmp/salt_provisioner.sh" - } - provisioner "file" { content = < 1 ? "0${count.index + 1}" : ""} +hostname: ${terraform.workspace}-drbd0${count.index + 1} network_domain: ${var.network_domain} additional_packages: [] reg_code: ${var.reg_code} @@ -54,12 +31,14 @@ host_ips: [${join(", ", formatlist("'%s'", var.host_ips))}] host_ip: ${element(var.host_ips, count.index)} cluster_ssh_pub: ${var.cluster_ssh_pub} cluster_ssh_key: ${var.cluster_ssh_key} -drbd_disk_device: /dev/sdb +drbd_disk_device: ${format("%s%s","/dev/disk/by-id/google-", element(google_compute_instance.drbd.*.attached_disk.0.device_name, count.index))} drbd_cluster_vip: ${var.drbd_cluster_vip} shared_storage_type: iscsi -sbd_disk_device: /dev/sdd +sbd_disk_index: 3 iscsi_srv_ip: ${var.iscsi_srv_ip} ha_sap_deployment_repo: ${var.ha_sap_deployment_repo} +vpc_network_name: ${var.network_name} +route_table: ${google_compute_route.drbd-route[0].name} monitoring_enabled: ${var.monitoring_enabled} devel_mode: ${var.devel_mode} partitions: @@ -70,11 +49,14 @@ partitions: EOF destination = "/tmp/grains" } +} - provisioner "remote-exec" { - inline = [ - "${var.background ? "nohup" : ""} sudo sh /tmp/salt_provisioner.sh > /tmp/provisioning.log ${var.background ? "&" : ""}", - "return_code=$? && sleep 1 && exit $return_code", - ] # Workaround to let the process start in background properly - } +module "drbd_provision" { + source = "../../../generic_modules/salt_provisioner" + node_count = var.provisioner == "salt" ? var.drbd_count : 0 + instance_ids = null_resource.drbd_provisioner.*.id + user = "root" + private_key_location = var.private_key_location + public_ips = google_compute_instance.drbd.*.network_interface.0.access_config.0.nat_ip + background = var.background } diff --git a/gcp/modules/drbd_node/variables.tf b/gcp/modules/drbd_node/variables.tf index 06cc15163..9afb54c58 100644 --- a/gcp/modules/drbd_node/variables.tf +++ b/gcp/modules/drbd_node/variables.tf @@ -113,11 +113,13 @@ variable "ha_sap_deployment_repo" { variable "monitoring_enabled" { description = "enable the host to be monitored by exporters, e.g node_exporter" + type = bool default = false } variable "devel_mode" { description = "Whether or not to install the HA/SAP packages from the `ha_sap_deployment_repo`" + type = bool default = false } @@ -128,5 +130,12 @@ variable "provisioner" { variable "background" { description = "Run the provisioner execution in background if set to true finishing terraform execution" + type = bool default = false } + +variable "on_destroy_dependencies" { + description = "Resources objects need in the on_destroy script (everything that allows ssh connection)" + type = any + default = [] +} diff --git a/gcp/modules/hana_node/main.tf b/gcp/modules/hana_node/main.tf new file mode 100644 index 000000000..b015f3051 --- /dev/null +++ b/gcp/modules/hana_node/main.tf @@ -0,0 +1,105 @@ +# HANA deployment in GCP + +# HANA disks configuration information: https://cloud.google.com/solutions/sap/docs/sap-hana-planning-guide#storage_configuration +resource "google_compute_disk" "data" { + count = var.hana_count + name = "${terraform.workspace}-hana-data-${count.index}" + type = var.hana_data_disk_type + size = var.hana_data_disk_size + zone = element(var.compute_zones, count.index) +} + +resource "google_compute_disk" "backup" { + count = var.hana_count + name = "${terraform.workspace}-hana-backup-${count.index}" + type = var.hana_backup_disk_type + size = var.hana_backup_disk_size + zone = element(var.compute_zones, count.index) +} + +resource "google_compute_disk" "hana-software" { + count = var.hana_count + name = "${terraform.workspace}-hana-software-${count.index}" + type = "pd-standard" + size = "20" + zone = element(var.compute_zones, count.index) +} + +# Don't remove the routes! Even though the RA gcp-vpc-move-route creates them, if they are not created here, the terraform destroy cannot work as it will find new route names +resource "google_compute_route" "hana-route" { + name = "${terraform.workspace}-hana-route" + count = var.hana_count > 0 ? 1 : 0 + dest_range = "${var.hana_cluster_vip}/32" + network = var.network_name + next_hop_instance = google_compute_instance.clusternodes.0.name + next_hop_instance_zone = element(var.compute_zones, 0) + priority = 1000 +} + +resource "google_compute_instance" "clusternodes" { + machine_type = var.machine_type + name = "${terraform.workspace}-hana0${count.index + 1}" + count = var.hana_count + zone = element(var.compute_zones, count.index) + + can_ip_forward = true + + network_interface { + subnetwork = var.network_subnet_name + network_ip = element(var.host_ips, count.index) + + access_config { + nat_ip = "" + } + } + + scheduling { + automatic_restart = true + on_host_maintenance = "MIGRATE" + preemptible = false + } + + boot_disk { + initialize_params { + image = var.sles4sap_boot_image + } + + auto_delete = true + } + + attached_disk { + source = element(google_compute_disk.data.*.self_link, count.index) + device_name = element(google_compute_disk.data.*.name, count.index) + mode = "READ_WRITE" + } + + attached_disk { + source = element(google_compute_disk.backup.*.self_link, count.index) + device_name = element(google_compute_disk.backup.*.name, count.index) + mode = "READ_WRITE" + } + + attached_disk { + source = element(google_compute_disk.hana-software.*.self_link, count.index) + device_name = element(google_compute_disk.hana-software.*.name, count.index) + mode = "READ_WRITE" + } + + metadata = { + sshKeys = "root:${file(var.public_key_location)}" + } + + service_account { + scopes = ["compute-rw", "storage-rw", "logging-write", "monitoring-write", "service-control", "service-management"] + } +} + +module "hana_on_destroy" { + source = "../../../generic_modules/on_destroy" + node_count = var.hana_count + instance_ids = google_compute_instance.clusternodes.*.id + user = "root" + private_key_location = var.private_key_location + public_ips = google_compute_instance.clusternodes.*.network_interface.0.access_config.0.nat_ip + dependencies = var.on_destroy_dependencies +} diff --git a/gcp/modules/hana_node/outputs.tf b/gcp/modules/hana_node/outputs.tf new file mode 100644 index 000000000..6b8b5ffcf --- /dev/null +++ b/gcp/modules/hana_node/outputs.tf @@ -0,0 +1,15 @@ +output "cluster_nodes_ip" { + value = google_compute_instance.clusternodes.*.network_interface.0.network_ip +} + +output "cluster_nodes_public_ip" { + value = google_compute_instance.clusternodes.*.network_interface.0.access_config.0.nat_ip +} + +output "cluster_nodes_name" { + value = google_compute_instance.clusternodes.*.name +} + +output "cluster_nodes_public_name" { + value = [] +} \ No newline at end of file diff --git a/gcp/modules/hana_node/salt_provisioner.tf b/gcp/modules/hana_node/salt_provisioner.tf new file mode 100644 index 000000000..66b4c9a82 --- /dev/null +++ b/gcp/modules/hana_node/salt_provisioner.tf @@ -0,0 +1,81 @@ +resource "null_resource" "hana_node_provisioner" { + count = var.provisioner == "salt" ? var.hana_count : 0 + + triggers = { + cluster_instance_ids = join(",", google_compute_instance.clusternodes.*.id) + } + + connection { + host = element( + google_compute_instance.clusternodes.*.network_interface.0.access_config.0.nat_ip, + count.index, + ) + type = "ssh" + user = "root" + private_key = file(var.private_key_location) + } + + provisioner "file" { + source = var.gcp_credentials_file + destination = "/root/google_credentials.json" + } + + provisioner "file" { + content = < 0 ? 1 : 0 dest_range = "${element(var.virtual_host_ips, 0)}/32" network = var.network_name @@ -20,9 +20,19 @@ resource "google_compute_route" "nw-route" { priority = 1000 } +resource "google_compute_route" "nw-ers-route" { + name = "${terraform.workspace}-nw-ers-route" + count = var.netweaver_count > 0 ? 1 : 0 + dest_range = "${element(var.virtual_host_ips, 1)}/32" + network = var.network_name + next_hop_instance = google_compute_instance.netweaver.1.name + next_hop_instance_zone = element(var.compute_zones, 1) + priority = 1000 +} + resource "google_compute_instance" "netweaver" { machine_type = var.machine_type - name = "${terraform.workspace}-netweaver${var.netweaver_count > 1 ? "0${count.index + 1}" : ""}" + name = "${terraform.workspace}-netweaver0${count.index + 1}" count = var.netweaver_count zone = element(var.compute_zones, count.index) @@ -65,3 +75,13 @@ resource "google_compute_instance" "netweaver" { scopes = ["compute-rw", "storage-rw", "logging-write", "monitoring-write", "service-control", "service-management"] } } + +module "netweaver_on_destroy" { + source = "../../../generic_modules/on_destroy" + node_count = var.netweaver_count + instance_ids = google_compute_instance.netweaver.*.id + user = "root" + private_key_location = var.private_key_location + public_ips = google_compute_instance.netweaver.*.network_interface.0.access_config.0.nat_ip + dependencies = var.on_destroy_dependencies +} diff --git a/gcp/modules/netweaver_node/salt_provisioner.tf b/gcp/modules/netweaver_node/salt_provisioner.tf index 27c90e6f8..78cf38a29 100644 --- a/gcp/modules/netweaver_node/salt_provisioner.tf +++ b/gcp/modules/netweaver_node/salt_provisioner.tf @@ -1,11 +1,3 @@ -data "template_file" "salt_provisioner" { - template = file("../salt/salt_provisioner_script.tpl") - - vars = { - regcode = var.reg_code - } -} - resource "null_resource" "netweaver_provisioner" { count = var.provisioner == "salt" ? var.netweaver_count : 0 @@ -29,21 +21,11 @@ resource "null_resource" "netweaver_provisioner" { } provisioner "file" { - source = "../salt" - destination = "/tmp" - } - - provisioner "file" { - content = data.template_file.salt_provisioner.rendered - destination = "/tmp/salt_provisioner.sh" - } - - provisioner "file" { - content = < 1 ? "0${count.index + 1}" : ""} +hostname: ${terraform.workspace}-netweaver0${count.index + 1} network_domain: ${var.network_domain} additional_packages: [] reg_code: ${var.reg_code} @@ -56,7 +38,7 @@ host_ip: ${element(var.host_ips, count.index)} cluster_ssh_pub: ${var.cluster_ssh_pub} cluster_ssh_key: ${var.cluster_ssh_key} shared_storage_type: iscsi -sbd_disk_device: /dev/sde +sbd_disk_index: 2 iscsi_srv_ip: ${var.iscsi_srv_ip} ha_sap_deployment_repo: ${var.ha_sap_deployment_repo} monitoring_enabled: ${var.monitoring_enabled} @@ -67,18 +49,31 @@ ascs_instance_number: ${var.ascs_instance_number} ers_instance_number: ${var.ers_instance_number} pas_instance_number: ${var.pas_instance_number} aas_instance_number: ${var.aas_instance_number} +netweaver_product_id: ${var.netweaver_product_id} +netweaver_swpm_folder: ${var.netweaver_swpm_folder} +netweaver_sapcar_exe: ${var.netweaver_sapcar_exe} +netweaver_swpm_sar: ${var.netweaver_swpm_sar} +netweaver_swpm_extract_dir: ${var.netweaver_swpm_extract_dir} +netweaver_sapexe_folder: ${var.netweaver_sapexe_folder} +netweaver_additional_dvds: [${join(", ", formatlist("'%s'", var.netweaver_additional_dvds))}] netweaver_nfs_share: ${var.netweaver_nfs_share} -nw_inst_disk_device : /dev/sdb -hana_cluster_vip: ${var.hana_cluster_vip} +netweaver_inst_disk_device: ${format("%s%s","/dev/disk/by-id/google-", element(google_compute_instance.netweaver.*.attached_disk.0.device_name, count.index))} +hana_ip: ${var.hana_ip} +vpc_network_name: ${var.network_name} +ascs_route_name: ${google_compute_route.nw-ascs-route[0].name} +ers_route_name: ${google_compute_route.nw-ers-route[0].name} EOF - destination = "/tmp/grains" + destination = "/tmp/grains" + } } - provisioner "remote-exec" { - inline = [ - "${var.background ? "nohup" : ""} sudo sh /tmp/salt_provisioner.sh > /tmp/provisioning.log ${var.background ? "&" : ""}", - "return_code=$? && sleep 1 && exit $return_code", - ] # Workaround to let the process start in background properly - } +module "netweaver_provision" { + source = "../../../generic_modules/salt_provisioner" + node_count = var.provisioner == "salt" ? var.netweaver_count : 0 + instance_ids = null_resource.netweaver_provisioner.*.id + user = "root" + private_key_location = var.private_key_location + public_ips = google_compute_instance.netweaver.*.network_interface.0.access_config.0.nat_ip + background = var.background } diff --git a/gcp/modules/netweaver_node/variables.tf b/gcp/modules/netweaver_node/variables.tf index 5ecdb7ae3..16c7cae5c 100644 --- a/gcp/modules/netweaver_node/variables.tf +++ b/gcp/modules/netweaver_node/variables.tf @@ -98,13 +98,55 @@ variable "aas_instance_number" { default = "02" } +variable "netweaver_product_id" { + description = "Netweaver installation product. Even though the module is about Netweaver, it can be used to install other SAP instances like S4/HANA" + type = string + default = "NW750.HDB.ABAPHA" +} + +variable "netweaver_swpm_folder" { + description = "Netweaver software SWPM folder, path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_sapcar_exe" { + description = "Path to sapcar executable, relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_swpm_sar" { + description = "SWPM installer sar archive containing the installer, path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_swpm_extract_dir" { + description = "Extraction path for Netweaver software SWPM folder, if SWPM sar file is provided" + type = string + default = "/sapmedia/NW/SWPM" +} + +variable "netweaver_sapexe_folder" { + description = "Software folder where needed sapexe `SAR` executables are stored (sapexe, sapexedb, saphostagent), path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_additional_dvds" { + description = "Software folder with additional SAP software needed to install netweaver (NW export folder and HANA HDB client for example), path relative from the `netweaver_inst_media` mounted point" + type = list + default = [] +} + variable "netweaver_nfs_share" { description = "URL of the NFS share where /sapmnt and /usr/sap/{sid}/SYS will be mounted. This folder must have the sapmnt and usrsapsys folders" type = string } -variable "hana_cluster_vip" { - description = "HANA cluster vip" +variable "hana_ip" { + description = "Ip address of the hana database" type = string } @@ -140,16 +182,19 @@ variable "ha_sap_deployment_repo" { variable "monitoring_enabled" { description = "enable the host to be monitored by exporters, e.g node_exporter" + type = bool default = false } variable "devel_mode" { description = "Whether or not to install the HA/SAP packages from the `ha_sap_deployment_repo`" + type = bool default = false } variable "qa_mode" { description = "Whether or not to install the HA/SAP packages from the `ha_sap_deployment_repo`" + type = bool default = false } @@ -160,5 +205,12 @@ variable "provisioner" { variable "background" { description = "Run the provisioner execution in background if set to true finishing terraform execution" + type = bool default = false } + +variable "on_destroy_dependencies" { + description = "Resources objects need in the on_destroy script (everything that allows ssh connection)" + type = any + default = [] +} diff --git a/gcp/monitoring.tf b/gcp/monitoring.tf deleted file mode 100644 index 5ddd70e97..000000000 --- a/gcp/monitoring.tf +++ /dev/null @@ -1,20 +0,0 @@ -variable "timezone" { - description = "Timezone setting for all VMs" - default = "Europe/Berlin" -} - -variable "monitoring_srv_ip" { - description = "monitoring server address" - type = string - default = "" -} - -variable "devel_mode" { - description = "whether or not to install HA/SAP packages from ha_sap_deployment_repo" - default = false -} - -variable "monitoring_enabled" { - description = "enable the host to be monitored by exporters, e.g node_exporter" - default = false -} \ No newline at end of file diff --git a/gcp/network.tf b/gcp/network.tf deleted file mode 100644 index 9f2249dd9..000000000 --- a/gcp/network.tf +++ /dev/null @@ -1,60 +0,0 @@ -resource "google_compute_network" "ha_network" { - name = "${terraform.workspace}-${var.name}-network" - auto_create_subnetworks = "false" -} - -# temporary HA solution to create the static routes, eventually this routes must be created by the RA gcp-vpc-move-route -resource "google_compute_route" "hana-route" { - name = "hana-route" - dest_range = "${var.hana_cluster_vip}/32" - network = google_compute_network.ha_network.name - next_hop_instance = google_compute_instance.clusternodes.0.name - next_hop_instance_zone = element(data.google_compute_zones.available.names, 0) - priority = 1000 -} - -resource "google_compute_subnetwork" "ha_subnet" { - name = "${terraform.workspace}-${var.name}-subnet" - network = google_compute_network.ha_network.self_link - region = var.region - ip_cidr_range = var.ip_cidr_range -} - -resource "google_compute_firewall" "ha_firewall_allow_internal" { - name = "${terraform.workspace}-${var.name}-fw-internal" - network = google_compute_network.ha_network.name - source_ranges = [var.ip_cidr_range] - - allow { - protocol = "icmp" - } - - allow { - protocol = "udp" - ports = ["0-65535"] - } - - allow { - protocol = "tcp" - ports = ["0-65535"] - } -} - -resource "google_compute_firewall" "ha_firewall_allow_icmp" { - name = "${terraform.workspace}-${var.name}-fw-icmp" - network = google_compute_network.ha_network.name - - allow { - protocol = "icmp" - } -} - -resource "google_compute_firewall" "ha_firewall_allow_tcp" { - name = "${terraform.workspace}-${var.name}-fw-tcp" - network = google_compute_network.ha_network.name - - allow { - protocol = "tcp" - ports = ["22", "80", "443", "7630", "9668", "9100", "9664", "9090"] - } -} diff --git a/gcp/outputs.tf b/gcp/outputs.tf index 155991bb2..fb8b55399 100644 --- a/gcp/outputs.tf +++ b/gcp/outputs.tf @@ -7,15 +7,15 @@ # iSCSI server output "iscsisrv_ip" { - value = google_compute_instance.iscsisrv.network_interface.*.network_ip + value = module.iscsi_server.iscsisrv_ip } output "iscsisrv_public_ip" { - value = google_compute_instance.iscsisrv.network_interface.*.access_config.0.nat_ip + value = module.iscsi_server.iscsisrv_public_ip } output "iscsisrv_name" { - value = google_compute_instance.iscsisrv.*.name + value = module.iscsi_server.iscsisrv_name } output "iscsisrv_public_name" { @@ -25,15 +25,15 @@ output "iscsisrv_public_name" { # Cluster nodes output "cluster_nodes_ip" { - value = google_compute_instance.clusternodes.*.network_interface.0.network_ip + value = module.hana_node.cluster_nodes_ip } output "cluster_nodes_public_ip" { - value = google_compute_instance.clusternodes.*.network_interface.0.access_config.0.nat_ip + value = module.hana_node.cluster_nodes_public_ip } output "cluster_nodes_name" { - value = google_compute_instance.clusternodes.*.name + value = module.hana_node.cluster_nodes_name } output "cluster_nodes_public_name" { @@ -43,19 +43,19 @@ output "cluster_nodes_public_name" { # Monitoring output "monitoring_ip" { - value = join("", google_compute_instance.monitoring.*.network_interface.0.network_ip) + value = module.monitoring.monitoring_ip } output "monitoring_public_ip" { - value = join("", google_compute_instance.monitoring.*.network_interface.0.access_config.0.nat_ip) + value = module.monitoring.monitoring_public_ip } output "monitoring_name" { - value = join("", google_compute_instance.monitoring.*.name) + value = module.monitoring.monitoring_name } output "monitoring_public_name" { - value = "" + value = module.monitoring.monitoring_public_name } # drbd diff --git a/gcp/provider.tf b/gcp/provider.tf deleted file mode 100644 index 48adb6acc..000000000 --- a/gcp/provider.tf +++ /dev/null @@ -1,14 +0,0 @@ -provider "google" { - credentials = file(var.gcp_credentials_file) - project = var.project - region = var.region -} - -data "google_compute_zones" "available" { - region = var.region - status = "UP" -} - -terraform { - required_version = ">= 0.12" -} diff --git a/gcp/salt_provisioner.tf b/gcp/salt_provisioner.tf deleted file mode 100644 index d6358e038..000000000 --- a/gcp/salt_provisioner.tf +++ /dev/null @@ -1,226 +0,0 @@ -# This file contains the salt provisioning logic. -# It will be executed if 'provisioner' is set to salt (default option) and the -# iscsi and hana node resources are created (check triggers option). - -# Template file to launch the salt provisioing script -data "template_file" "salt_provisioner" { - template = file("../salt/salt_provisioner_script.tpl") - - vars = { - regcode = var.reg_code - } -} - -resource "null_resource" "iscsi_provisioner" { - count = var.provisioner == "salt" ? 1 : 0 - - triggers = { - iscsi_id = join(",", google_compute_instance.iscsisrv.*.id) - } - - connection { - host = google_compute_instance.iscsisrv.network_interface.0.access_config.0.nat_ip - type = "ssh" - user = "root" - private_key = file(var.private_key_location) - } - - provisioner "file" { - source = "../salt" - destination = "/tmp" - } - - provisioner "file" { - content = data.template_file.salt_provisioner.rendered - destination = "/tmp/salt_provisioner.sh" - } - - provisioner "file" { - content = < /tmp/provisioning.log ${var.background ? "&" : ""}", - "return_code=$? && sleep 1 && exit $return_code", - ] # Workaround to let the process start in background properly -} -} - -resource "null_resource" "hana_node_provisioner" { - count = var.provisioner == "salt" ? var.ninstances : 0 - - triggers = { - cluster_instance_ids = join(",", google_compute_instance.clusternodes.*.id) - } - - connection { - host = element( - google_compute_instance.clusternodes.*.network_interface.0.access_config.0.nat_ip, - count.index, - ) - type = "ssh" - user = "root" - private_key = file(var.private_key_location) - } - - provisioner "file" { - source = var.gcp_credentials_file - destination = "/root/google_credentials.json" - } - - provisioner "file" { - source = "../salt" - destination = "/tmp" - } - - provisioner "file" { - content = data.template_file.salt_provisioner.rendered - destination = "/tmp/salt_provisioner.sh" - } - - provisioner "file" { - content = < 1 ? "0${count.index + 1}" : ""} -network_domain: "tf.local" -shared_storage_type: iscsi -sbd_disk_device: /dev/sde -hana_inst_folder: ${var.hana_inst_folder} -hana_disk_device: ${var.hana_disk_device} -hana_backup_device: ${var.hana_backup_device} -hana_inst_disk_device: ${var.hana_inst_disk_device} -hana_fstype: ${var.hana_fstype} -hana_cluster_vip: ${var.hana_cluster_vip} -gcp_credentials_file: ${var.gcp_credentials_file} -sap_hana_deployment_bucket: ${var.sap_hana_deployment_bucket} -iscsi_srv_ip: ${var.iscsi_ip} -init_type: ${var.init_type} -cluster_ssh_pub: ${var.cluster_ssh_pub} -cluster_ssh_key: ${var.cluster_ssh_key} -qa_mode: ${var.qa_mode} -hwcct: ${var.hwcct} -reg_code: ${var.reg_code} -reg_email: ${var.reg_email} -monitoring_enabled: ${var.monitoring_enabled} -reg_additional_modules: {${join( - ", ", - formatlist( - "'%s': '%s'", - keys(var.reg_additional_modules), - values(var.reg_additional_modules), - ), -)}} -additional_packages: [${join(", ", formatlist("'%s'", var.additional_packages))}] -ha_sap_deployment_repo: ${var.ha_sap_deployment_repo} -EOF - - -destination = "/tmp/grains" -} - -provisioner "remote-exec" { - inline = [ - "${var.background ? "nohup" : ""} sudo sh /tmp/salt_provisioner.sh > /tmp/provisioning.log ${var.background ? "&" : ""}", - "return_code=$? && sleep 1 && exit $return_code", - ] # Workaround to let the process start in background properly -} -} - -resource "null_resource" "monitoring_provisioner" { - count = var.provisioner == "salt" && var.monitoring_enabled ? 1 : 0 - - triggers = { - cluster_instance_id = google_compute_instance.monitoring.0.id - } - - connection { - host = google_compute_instance.monitoring.0.network_interface.0.access_config.0.nat_ip - type = "ssh" - user = "root" - private_key = file(var.private_key_location) - } - - provisioner "file" { - source = var.gcp_credentials_file - destination = "/root/google_credentials.json" - } - - provisioner "file" { - source = "../salt" - destination = "/tmp" - } - - provisioner "file" { - content = data.template_file.salt_provisioner.rendered - destination = "/tmp/salt_provisioner.sh" - } - - provisioner "file" { - content = < /tmp/provisioning.log ${var.background ? "&" : ""}", - "return_code=$? && sleep 1 && exit $return_code", - ] # Workaround to let the process start in background properly - } -} diff --git a/gcp/terraform.tfvars.example b/gcp/terraform.tfvars.example index 299a8ffe8..d7657a878 100644 --- a/gcp/terraform.tfvars.example +++ b/gcp/terraform.tfvars.example @@ -1,13 +1,34 @@ +# GCP project id project = "my-project" # Credentials file for GCP gcp_credentials_file = "my-project.json" -# Internal IPv4 range -ip_cidr_range = "10.0.0.0/24" +# Region where to deploy the configuration +region = "europe-west1" + +# Use an already existing vpc +#vpc_name = "my-vpc" + +# Use an already existing subnet in this virtual network +#subnet_name = "my-subnet" + +# vpc address range in CIDR notation +# Only used if the vpc is created by terraform or the user doesn't have read permissions in this +# resource. To use the current vpc address range set the value to an empty string +# To define custom ranges +#ip_cidr_range = "10.0.0.0/24" +# Or to use already existing address ranges +#ip_cidr_range = "" + +# SSH private key file +private_key_location = "/path/to/your/private/ssh/key" + +# SSH public key file +public_key_location = "/path/to/your/public/ssh/key" # IP for iSCSI server -iscsi_ip = "10.0.0.253" +#iscsi_srv_ip = "10.0.0.253" # Type of VM (vCPUs and RAM) machine_type = "n1-highmem-32" @@ -26,19 +47,22 @@ hana_backup_disk_type = "pd-standard" hana_backup_disk_size = "416" # HANA cluster vip -hana_cluster_vip = "10.0.1.200" - -# SSH private key file -private_key_location = "/path/to/your/private/ssh/key" - -# SSH public key file -public_key_location = "/path/to/your/public/ssh/key" - -# Region where to deploy the configuration -region = "europe-west1" +#hana_cluster_vip = "10.0.1.200" # The name of the GCP storage bucket in your project that contains the SAP HANA installation files -sap_hana_deployment_bucket = "MyHanaBucket" +sap_hana_deployment_bucket = "MyHanaBucket/51053381" +# Or you can combine the `sap_hana_deployment_bucket` with `hana_platform_folder` variable. +#sap_hana_deployment_bucket = "MyHanaBucket" +# Specify the path to already extracted HANA platform installation media, relative to sap_hana_deployment_bucket. +# This will have preference over hdbserver sar archive installation media +#hana_platform_folder = "51053381" + +# Or specify the path to the sapcar executable & HANA database server installation sar archive, relative to the sap_hana_deployment_bucket +# The sar archive will be extracted to path specified at hana_extract_dir (optional, by default /sapmedia/HANA) +# Make sure to use the latest/compatible version of sapcar executable, otherwise file may be extracted incorrectly +hana_sapcar_exe = "SAPCAR" +hdbserver_sar = "IMDB_SERVER.SAR" +hana_extract_dir = "/sapmedia/HDBSERVER" # Custom sles4sap image sles4sap_boot_image = "MySles4SapImage" @@ -46,9 +70,6 @@ sles4sap_boot_image = "MySles4SapImage" # Variable to control what is deployed in the nodes. Can be all, skip-hana or skip-cluster init_type = "all" -# Device used by the iSCSI server to provide LUNs -iscsidev = "/dev/sdb" - # Path to a custom ssh public key to upload to the nodes # Used for cluster communication for example cluster_ssh_pub = "salt://hana_node/files/sshkeys/cluster.id_rsa.pub" @@ -58,25 +79,19 @@ cluster_ssh_pub = "salt://hana_node/files/sshkeys/cluster.id_rsa.pub" cluster_ssh_key = "salt://hana_node/files/sshkeys/cluster.id_rsa" # Each host IP address (sequential order). -# example : host_ips = ["10.0.0.2", "10.0.0.3"] -host_ips = ["10.0.0.2", "10.0.0.3"] +#hana_ips = ["10.0.0.2", "10.0.0.3"] # Local folder where HANA installation master will be mounted -hana_inst_folder = "/root/hana_inst_media" - -# Device used by node where HANA will be installed -hana_disk_device = "/dev/sdb" - -# Device used by node where HANA backup will be stored -hana_backup_device = "/dev/sdc" - -# Device used by node where HANA will be downloaded -hana_inst_disk_device = "/dev/sdd" +hana_inst_folder = "/sapmedia/HANA" # Repository url used to install HA/SAP deployment packages" # The latest RPM packages can be found at: # https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/{YOUR OS VERSION} # Contains the salt formulas rpm packages. +# To auto detect the SLE version +#ha_sap_deployment_repo = "http://download.opensuse.org/repositories/network:/ha-clustering:/Factory/" +# Specific SLE version used in all the created machines +#ha_sap_deployment_repo = "http://download.opensuse.org/repositories/network:/ha-clustering:/Factory/SLE_15/" ha_sap_deployment_repo = "" # Optional SUSE Customer Center Registration parameters @@ -105,7 +120,7 @@ ha_sap_deployment_repo = "" #monitoring_enabled = true # IP address of the machine where Prometheus and Grafana are running -monitoring_srv_ip = "10.0.0.4" +#monitoring_srv_ip = "10.0.0.4" # QA variables @@ -134,7 +149,6 @@ monitoring_srv_ip = "10.0.0.4" #drbd_data_disk_type = pd-standard # Each drbd cluster host IP address (sequential order). -# example : drbd_host_ips = ["10.0.0.10", "10.0.0.11"] #drbd_ips = ["10.0.0.10", "10.0.0.11"] #drbd_cluster_vip = "10.0.1.201" @@ -149,6 +163,24 @@ monitoring_srv_ip = "10.0.0.4" #netweaver_software_bucket = "MyNetweaverBucket" +# Netweaver installation required folders +# SAP SWPM installation folder, relative to netweaver_software_bucket folder +#netweaver_swpm_folder = "your_swpm" +# Or specify the path to the sapcar executable & SWPM installer sar archive, relative to netweaver_software_bucket folder +# The sar archive will be extracted to path specified at netweaver_swpm_extract_dir (optional, by default /sapmedia/NW/SWPM) +#netweaver_sapcar_exe = "your_sapcar_exe_file_path" +#netweaver_swpm_sar = "your_swpm_sar_file_path" +#netweaver_swpm_extract_dir = "location_to_extract_swpm_sar_absolute_path" +# Folder where needed SAR executables (sapexe, sapdbexe) are stored, relative to netweaver_software_bucket folder +#netweaver_sapexe_folder = "kernel_nw75_sar" +# Additional folders (added in start_dir.cd), relative to netweaver_software_bucket folder +#netweaver_additional_dvds = ["dvd1", "dvd2"] + #netweaver_ips = ["10.0.0.20", "10.0.0.21", "10.0.0.22", "10.0.0.23"] #netweaver_virtual_ips = ["10.0.1.25", "10.0.1.26", "10.0.0.27", "10.0.0.28"] + +# Pre deployment + +# Enable all some pre deployment steps (disabled by default) +#pre_deployment = true diff --git a/gcp/variables.tf b/gcp/variables.tf index 7f14cc7a5..791c41e4a 100644 --- a/gcp/variables.tf +++ b/gcp/variables.tf @@ -1,204 +1,229 @@ -# Global variables +# GCP related variables variable "project" { - type = string + description = "GCP project name where the infrastructure will be created" + type = string } -variable "gcp_credentials_file" { - type = string +variable "region" { + description = "GCP region where the deployment machines will be created" + type = string } -variable "public_key_location" { - type = string +variable "gcp_credentials_file" { + description = "GCP credentials file path in local machine" + type = string } -variable "private_key_location" { - type = string +variable "vpc_name" { + description = "Already existing vpc name used by the created infrastructure. If it's not set a new one will be created" + type = string + default = "" } -variable "machine_type" { - type = string - default = "n1-highmem-32" +variable "subnet_name" { + description = "Already existing subnet name used by the created infrastructure. If it's not set a new one will be created" + type = string + default = "" } -variable "iscsi_server_boot_image" { - type = string - default = "suse-byos-cloud/sles-15-sap-byos" +variable "create_firewall_rules" { + description = "Create predifined firewall rules for the connections outside the network (internal connections are always allowed). Set to false if custom firewall rules are already created for the used network" + type = bool + default = true } -variable "machine_type_iscsi_server" { - type = string - default = "custom-1-2048" +variable "ip_cidr_range" { + description = "Internal IPv4 range of the created network" + type = string + default = "10.0.0.0/24" } -variable "region" { - type = string +variable "public_key_location" { + description = "Path to a SSH public key used to connect to the created machines" + type = string } -variable "sles4sap_boot_image" { - type = string - default = "suse-byos-cloud/sles-15-sap-byos" +variable "private_key_location" { + description = "Path to a SSH private key used to connect to the created machines" + type = string } -variable "storage_url" { - type = string - default = "https://storage.googleapis.com" -} +# Deployment variables -variable "ninstances" { - type = string - default = "2" +variable "timezone" { + description = "Timezone setting for all VMs" + default = "Europe/Berlin" } -variable "name" { - description = "hostname, without the domain part" +variable "reg_code" { + description = "If informed, register the product using SUSEConnect" type = string - default = "hana" + default = "" } -variable "init_type" { - type = string - default = "all" +variable "reg_email" { + description = "Email used for the registration" + default = "" } -variable "iscsidev" { - description = "device iscsi for iscsi server" - type = string - default = "/dev/sdb" +# The module format must follow SUSEConnect convention: +# // +# Example: Suggested modules for SLES for SAP 15 +# - sle-module-basesystem/15/x86_64 +# - sle-module-desktop-applications/15/x86_64 +# - sle-module-server-applications/15/x86_64 +# - sle-ha/15/x86_64 (Need the same regcode as SLES for SAP) +# - sle-module-sap-applications/15/x86_64 + +variable "reg_additional_modules" { + description = "Map of the modules to be registered. Module name = Regcode, when needed." + type = map(string) + default = {} } -variable "iscsi_disks" { - description = "number of partitions attach to iscsi server. 0 means `all`." - default = 0 +variable "additional_packages" { + description = "Extra packages to be installed" + default = [] } -variable "cluster_ssh_pub" { - description = "path for the public key needed by the cluster" +# Repository url used to install HA/SAP deployment packages" +# The latest RPM packages can be found at: +# https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/{YOUR OS VERSION} +# Contains the salt formulas rpm packages. +variable "ha_sap_deployment_repo" { + description = "Repository url used to install HA/SAP deployment packages. If SLE version is not set, the deployment will automatically detect the current OS version" type = string } -variable "cluster_ssh_key" { - description = "path for the private key needed by the cluster" +variable "cluster_ssh_pub" { + description = "Path to a SSH public key used during the cluster creation. The key must be passwordless" type = string } -# HANA variables - -variable "sap_hana_deployment_bucket" { - description = "GCP storage bucket that contains the SAP HANA installation files" +variable "cluster_ssh_key" { + description = "Path to a SSH private key used during the cluster creation. The key must be passwordless" type = string } -variable "hana_inst_folder" { - type = string - default = "/root/hana_inst_media" +variable "provisioner" { + description = "Used provisioner option. Available options: salt. Let empty to not use any provisioner" + default = "salt" } -variable "hana_data_disk_type" { - type = string - default = "pd-ssd" +variable "devel_mode" { + description = "Increase ha_sap_deployment_repo repository priority to get the packages from this repository instead of SLE official channels" + type = bool + default = false } -variable "hana_data_disk_size" { - type = string - default = "834" +variable "background" { + description = "Run the provisioner execution in background if set to true finishing terraform execution" + type = bool + default = false } -variable "hana_backup_disk_type" { - type = string - default = "pd-standard" -} +# Hana related variables -variable "hana_backup_disk_size" { - type = string - default = "416" +variable "hana_count" { + description = "Number of hana nodes" + type = string + default = "2" } -variable "hana_disk_device" { - description = "device where to install HANA" +variable "machine_type" { + description = "The instance type of the hana nodes" type = string - default = "/dev/sdb" + default = "n1-highmem-32" } -variable "hana_backup_device" { - description = "device where HANA backup is stored" +variable "init_type" { + description = "Type of deployment. Options: all-> Install HANA and HA; skip-hana-> Skip HANA installation; skip-cluster-> Skip HA cluster installation" type = string - default = "/dev/sdc" + default = "all" } -variable "hana_inst_disk_device" { - description = "device where to download HANA" +variable "sles4sap_boot_image" { + description = "The image used to create the hana machines" type = string - default = "/dev/sdd" + default = "suse-byos-cloud/sles-15-sp1-sap-byos" } -variable "hana_fstype" { - description = "Filesystem type to use for HANA" - type = string - default = "xfs" +variable "hana_ips" { + description = "ip addresses to set to the hana nodes. They must be in the same network addresses range defined in `ip_cidr_range`" + type = list(string) + default = [] } -variable "hana_cluster_vip" { - description = "IP address used to configure the hana cluster floating IP. It must be in other subnet than the machines!" +variable "sap_hana_deployment_bucket" { + description = "GCP storage bucket that contains the SAP HANA installation files" type = string } -# SUSE subscription variables +variable "hana_inst_folder" { + description = "Folder where the hana installation software will be downloaded" + type = string + default = "/sapmedia/HANA" +} -variable "reg_code" { - description = "If informed, register the product using SUSEConnect" +variable "hana_platform_folder" { + description = "Path to the hana platform media, relative to the hana_inst_folder" type = string default = "" } -variable "reg_email" { - description = "Email used for the registration" +variable "hana_sapcar_exe" { + description = "Path to the sapcar executable, relative to the hana_inst_folder" + type = string default = "" } -# The module format must follow SUSEConnect convention: -# // -# Example: Suggested modules for SLES for SAP 15 -# - sle-module-basesystem/15/x86_64 -# - sle-module-desktop-applications/15/x86_64 -# - sle-module-server-applications/15/x86_64 -# - sle-ha/15/x86_64 (Need the same regcode as SLES for SAP) -# - sle-module-sap-applications/15/x86_64 +variable "hdbserver_sar" { + description = "Path to the HANA database server installation sar archive, relative to the hana_inst_folder" + type = string + default = "" +} -variable "reg_additional_modules" { - description = "Map of the modules to be registered. Module name = Regcode, when needed." - type = map(string) - default = {} +variable "hana_extract_dir" { + description = "Absolute path to folder where SAP HANA sar archive will be extracted" + type = string + default = "/sapmedia/HANA" } -variable "additional_packages" { - description = "extra packages which should be installed" - default = [] +variable "hana_data_disk_type" { + description = "Disk type of the disks used to store hana database content" + type = string + default = "pd-ssd" } -# Repository url used to install HA/SAP deployment packages" -# The latest RPM packages can be found at: -# https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/{YOUR OS VERSION} -# Contains the salt formulas rpm packages. -variable "ha_sap_deployment_repo" { - description = "Repository url used to install HA/SAP deployment packages" +variable "hana_data_disk_size" { + description = "Disk size of the disks used to store hana database content" type = string + default = "834" } -# Network variables -# Pay attention to set ip address according to the cidr range +variable "hana_backup_disk_type" { + description = "Disk type of the disks used to store hana database backup content" + type = string + default = "pd-standard" +} -variable "ip_cidr_range" { - description = "internal IPv4 range" +variable "hana_backup_disk_size" { + description = "Disk size of the disks used to store hana database backup content" + type = string + default = "416" } -variable "iscsi_ip" { - description = "IP for iSCSI server" +variable "hana_fstype" { + description = "Filesystem type used by the disk where hana is installed" + type = string + default = "xfs" } -variable "host_ips" { - description = "ip addresses to set to the nodes" - type = list(string) +variable "hana_cluster_vip" { + description = "IP address used to configure the hana cluster floating IP. It must be in other subnet than the machines!" + type = string + default = "" } variable "scenario_type" { @@ -206,62 +231,77 @@ variable "scenario_type" { default = "performance-optimized" } -variable "provisioner" { - description = "Used provisioner option. Available options: salt. Let empty to not use any provisioner" - default = "salt" +# Monitoring related variables +variable "monitoring_srv_ip" { + description = "Monitoring server address" + type = string + default = "" } - -variable "background" { - description = "Run the provisioner execution in background if set to true finishing terraform execution" +variable "monitoring_enabled" { + description = "Enable the host to be monitored by exporters, e.g node_exporter" + type = bool default = false } -# Specific QA variables +# Iscsi server related variables -variable "qa_mode" { - description = "define qa mode (Disable extra packages outside images)" - default = false +variable "iscsi_server_boot_image" { + description = "The image used to create the iscsi machines" + type = string + default = "suse-byos-cloud/sles-15-sp1-sap-byos" } -variable "hwcct" { - description = "Execute HANA Hardware Configuration Check Tool to bench filesystems" - type = bool - default = false +variable "machine_type_iscsi_server" { + description = "The instance type of the iscsi nodes" + type = string + default = "custom-1-2048" } -# drbd related variables +variable "iscsi_srv_ip" { + description = "IP for iSCSI server. It must be in the same network addresses range defined in `ip_cidr_range`" + type = string + default = "" +} + +variable "iscsi_disks" { + description = "Number of partitions attach to iscsi server. 0 means `all`." + default = 0 +} + +# DRBD related variables variable "drbd_enabled" { - description = "enable the DRBD cluster for nfs" + description = "Enable the DRBD cluster for nfs" + type = bool default = false } variable "drbd_machine_type" { - description = "machine type for drbd nodes" + description = "The instance type of the drbd nodes" type = string default = "n1-standard-4" } variable "drbd_image" { - description = "image of the drbd nodes" + description = "The image used to create the drbd machines" type = string - default = "suse-byos-cloud/sles-15-sap-byos" + default = "suse-byos-cloud/sles-15-sp1-sap-byos" } variable "drbd_data_disk_size" { - description = "drbd data disk size" + description = "Disk size of the disks used to store drbd content" type = string default = "10" } variable "drbd_data_disk_type" { - description = "drbd data disk type" + description = "Disk type of the disks used to store drbd content" type = string default = "pd-standard" } variable "drbd_ips" { - description = "ip addresses to set to the drbd cluster nodes" + description = "ip addresses to set to the drbd cluster nodes. They must be in the same network addresses range defined in `ip_cidr_range`" type = list(string) default = [] } @@ -272,33 +312,34 @@ variable "drbd_cluster_vip" { default = "" } -# netweaver realted variables +# Netweaver related variables variable "netweaver_enabled" { - description = "enable netweaver cluster creation" + description = "Enable netweaver cluster creation" + type = bool default = false } variable "netweaver_machine_type" { - description = "machine type for netweaver nodes" + description = "The instance type of the netweaver nodes" type = string default = "n1-highmem-8" } variable "netweaver_image" { - description = "image of the netweaver nodes" + description = "The image used to create the netweaver machines" type = string - default = "suse-byos-cloud/sles-15-sap-byos" + default = "suse-byos-cloud/sles-15-sp1-sap-byos" } variable "netweaver_software_bucket" { - description = "gcp bucket where netweaver software is available" + description = "GCP storage bucket that contains the netweaver installation files" type = string default = "" } variable "netweaver_ips" { - description = "ip addresses to set to the netweaver cluster nodes" + description = "ip addresses to set to the netweaver cluster nodes. They must be in the same network addresses range defined in `ip_cidr_range`" type = list(string) default = [] } @@ -308,3 +349,67 @@ variable "netweaver_virtual_ips" { type = list(string) default = [] } + +variable "netweaver_product_id" { + description = "Netweaver installation product. Even though the module is about Netweaver, it can be used to install other SAP instances like S4/HANA" + type = string + default = "NW750.HDB.ABAPHA" +} + +variable "netweaver_swpm_folder" { + description = "Netweaver software SWPM folder, path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_sapcar_exe" { + description = "Path to sapcar executable, relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_swpm_sar" { + description = "SWPM installer sar archive containing the installer, path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_swpm_extract_dir" { + description = "Extraction path for Netweaver software SWPM folder, if SWPM sar file is provided" + type = string + default = "/sapmedia/NW/SWPM" +} + +variable "netweaver_sapexe_folder" { + description = "Software folder where needed sapexe `SAR` executables are stored (sapexe, sapexedb, saphostagent), path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_additional_dvds" { + description = "Software folder with additional SAP software needed to install netweaver (NW export folder and HANA HDB client for example), path relative from the `netweaver_inst_media` mounted point" + type = list + default = [] +} + +# Specific QA variables + +variable "qa_mode" { + description = "Enable test/qa mode (disable extra packages usage not coming in the image)" + type = bool + default = false +} + +variable "hwcct" { + description = "Execute HANA Hardware Configuration Check Tool to bench filesystems" + type = bool + default = false +} + +# Pre deployment + +variable "pre_deployment" { + description = "Enable pre deployment local execution. Only available for clients running Linux" + type = bool + default = false +} diff --git a/generic_modules/local_exec/main.tf b/generic_modules/local_exec/main.tf new file mode 100644 index 000000000..d81e98f27 --- /dev/null +++ b/generic_modules/local_exec/main.tf @@ -0,0 +1,20 @@ +resource "null_resource" "pre_execution" { + count = var.enabled ? 1 : 0 + provisioner "local-exec" { + working_dir = "${path.module}/../.." + command = < /dev/null 2>&1 &", + "return_code=$? && sleep 1 && exit $return_code", + ] # Workaround to let the process start in background properly + } +} + +resource "null_resource" "provision" { + count = ! var.background ? var.node_count : 0 + triggers = { + triggers = join(",", var.instance_ids) + } + + connection { + host = element(var.public_ips, count.index) + type = "ssh" + user = var.user + password = var.password + private_key = var.private_key_location != "" ? file(var.private_key_location) : "" + } + + provisioner "file" { + source = "../salt" + destination = "/tmp" + } + + provisioner "remote-exec" { + inline = [ + "sudo sh /tmp/salt/provision.sh -sol /var/log/provisioning.log", + ] + } + + provisioner "remote-exec" { + inline = [ + "[ -f /var/run/reboot-needed ] && echo \"Rebooting the machine...\" && (nohup sudo sh -c 'systemctl stop sshd;/sbin/reboot' &) && sleep 5", + ] + on_failure = continue + } + + provisioner "remote-exec" { + inline = [ + "sudo sh /root/salt/provision.sh -pdql /var/log/provisioning.log", + ] + } +} diff --git a/generic_modules/salt_provisioner/variables.tf b/generic_modules/salt_provisioner/variables.tf new file mode 100644 index 000000000..3e79d7e60 --- /dev/null +++ b/generic_modules/salt_provisioner/variables.tf @@ -0,0 +1,38 @@ +variable "node_count" { + description = "Number of nodes to run the provisioner" + type = number +} + +variable "instance_ids" { + description = "List with the instance ids that will trigger the provisioner" + type = list(string) +} + +variable "user" { + description = "User for the SSH connection" + type = string + default = "root" +} + +variable "password" { + description = "Password for the SSH connection" + type = string + default = "" +} + +variable "private_key_location" { + description = "SSH private key for the connection. It has priority over password variable" + type = string + default = "" +} + +variable "public_ips" { + description = "List of ips used to connect through SSH" + type = list(string) +} + +variable "background" { + description = "Execute the provisioning in background" + type = bool + default = false +} diff --git a/libvirt/README.md b/libvirt/README.md index 964d7886a..5ae3319e6 100644 --- a/libvirt/README.md +++ b/libvirt/README.md @@ -42,96 +42,23 @@ terraform apply terraform destroy ``` -# Design - -This project is mainly based in [sumaform](https://github.com/uyuni-project/sumaform/) - -Components: - -- **modules**: Terraform modules to deploy a basic two nodes SAP HANA environment. -- **salt**: Salt provisioning states to configure the deployed machines with the -all required components. - - -### Terraform modules -- [hana_node](modules/hana_node): Specific SAP HANA node defintion. Basically it calls the -host module with some particular updates. -- [netweaver_node](modules/netweaver_node): SAP Netweaver environment allows to have -a Netweaver landscape working with the SAP Hana database. -- [drbd_node](modules/drbd_node): DRBD cluster for NFS share. -- [iscsi_server](modules/iscsi_server): Machine to host a iscsi target. -- [monitoring](modules/monitoring): Machine to host the monitoring stack. -- [shared_disk](modules/shared_disk): Shared disk, could be used as a sbd device. - -### Salt modules -- [pre_installation](../salt/pre_installation): Adjust the configuration needed for -defult module. -- [default](../salt/default): Default configuration for each node. Install the most -basic packages and apply basic configuration. -- [hana_node](../salt/hana_node): Apply SAP HANA nodes specific updates to install -SAP HANA and enable system replication according [pillar](../pillar_examples/libvirt/hana.sls) -data. You can also use the provided [automatic pillars](../pillar_examples/automatic/hana). -- [drbd_node](../salt/drbd_node): Apply DRBD nodes specific updates to configure -DRBD cluster for NFS share according [drbd pillar](../pillar_examples/libvirt/drbd/drbd.sls) -and [cluster pillar](../pillar_examples/libvirt/drbd/cluster.sls). You can also use the -provided [automatic pillars](../pillar_examples/automatic/drbd). -- [monitoring](../salt/monitoring): Apply prometheus monitoring service configuration. -- [iscsi_srv](../salt/iscsi_srv): Apply configuration for iscsi target. -- [netweaver_node](../salt/netweaver_node): Apply netweaver packages and formula. -- [qa_mode](../salt/qa_mode): Apply configuration for Quality Assurance testing. - # Specifications -* main.tf - -**main.tf** stores the configuration of the terraform deployment, the infrastructure configuration basically. Here some important tips to update the file properly (all variables are described in each module variables file): - -- **qemu_uri**: Uri of the libvirt provider. -- **base_image**: The cluster nodes image is selected updating the *image* parameter in the *base* module. -- **network_name** and **bridge**: If the cluster is deployed locally, the *network_name* should match with a currently available virtual network. If the cluster is deployed remotely, leave the *network_name* empty and set the *bridge* value with remote machine bridge network interface. -- **hana_inst_media**: Public media where SAP HANA installation files are stored. -- **iprange**: IP range addresses for the isolated network. -- **isolated_network_bridge**: A name for the isolated virtual network bridge device. It must be no longer than 15 characters. Leave empty to have it auto-generated by libvirt. -- **host_ips**: Each host IP address (sequential order). -- **shared_storage_type**: Shared storage type between iscsi and KVM raw file shared disk. Available options: `iscsi` and `shared-disk`. -- **iscsi_srv_ip**: IP address of the machine that will host the iscsi target (only used if `iscsi` is used as a shared storage for fencing) -- **iscsi_image**: Source image of the machine hosting the iscsi target (sles15 or above) (only used if `iscsi` is used as a shared storage for fencing) -- **iscsidev**: device used by the iSCSI server to provide LUNs. -- **iscsi_disks**: attached partitions number for iscsi server. -- **monitoring_image**: Source image of the machine hosting the monitoring stack (if not set, the same image as the hana nodes will be used) -- **monitoring_srv_ip**: IP address of the machine that will host the monitoring stack -- **ha_sap_deployment_repo**: Repository with HA and Salt formula packages. The latest RPM packages can be found at [https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/{YOUR OS VERSION}](https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/) -- **devel_mode**: Whether or not to install HA/SAP packages from ha_sap_deployment_repo -- **scenario_type**: SAP HANA scenario type. Available options: `performance-optimized` and `cost-optimized`. -- **provisioner**: Select the desired provisioner to configure the nodes. Salt is used by default: [salt](../salt). Let it empty to disable the provisioning part. -- **background**: Run the provisioning process in background finishing terraform execution. -- **reg_code**: Registration code for the installed base product (Ex.: SLES for SAP). This parameter is optional. If informed, the system will be registered against the SUSE Customer Center. -- **reg_email**: Email to be associated with the system registration. This parameter is optional. -- **reg_additional_modules**: Additional optional modules and extensions to be registered (Ex.: Containers Module, HA module, Live Patching, etc). The variable is a key-value map, where the key is the _module name_ and the value is the _registration code_. If the _registration code_ is not needed, set an empty string as value. The module format must follow SUSEConnect convention: - - `//` - - *Example:* Suggested modules for SLES for SAP 15 - - - sle-module-basesystem/15/x86_64 - sle-module-desktop-applications/15/x86_64 - sle-module-server-applications/15/x86_64 - sle-ha/15/x86_64 (use the same regcode as SLES for SAP) - sle-module-sap-applications/15/x86_64 - -For more information about registration, check the ["Registering SUSE Linux Enterprise and Managing Modules/Extensions"](https://www.suse.com/documentation/sles-15/book_sle_deployment/data/cha_register_sle.html) guide. - -[Specific QA variables](../doc/qa.md#specific-qa-variables) +In order to deploy the environment, different configurations are available through the terraform variables. These variables can be configured using a `terraform.tfvars` file. An example is available in [terraform.tfvars.example](./terraform.tvars.example). To find all the available variables check the [variables.tf](./variables.tf) file. -If the current *main.tf* is used, only *uri* (usually SAP HANA cluster deployment needs a powerful machine, not recommended to deploy locally) and *hana_inst_media* parameters must be updated. +## QA deployment -* hana.sls +The project has been created in order to provide the option to run the deployment in a `Test` or `QA` mode. This mode only enables the packages coming properly from SLE channels, so no other packages will be used. Find more information [here](../doc/qa.md). -**hana.sls** is used to configure the SAP HANA cluster. Check the options in: [saphanabootstrap-formula](https://github.com/SUSE/saphanabootstrap-formula) +## Pillar files configuration -* cluster.sls +Besides the `terraform.tfvars` file usage to configure the deployment, a more advanced configuration is available through pillar files customization. Find more information [here](../pillar_examples/README.md). -**cluster.sls** is used to configure the HA cluster. Check the options in: [habootstrap-formula](https://github.com/SUSE/habootstrap-formula) +## Use already existing network resources +The usage of already existing network resources (virtual network and images) can be done configuring +the `terraform.tfvars` file and adjusting some variables. The example of how to use them is available +at [terraform.tfvars.example](terraform.tfvars.example). # Troubleshooting diff --git a/libvirt/infrastructure.tf b/libvirt/infrastructure.tf new file mode 100644 index 000000000..bfa683d0e --- /dev/null +++ b/libvirt/infrastructure.tf @@ -0,0 +1,60 @@ +provider "libvirt" { + uri = var.qemu_uri +} + +locals { + internal_network_name = var.network_name + internal_network_id = var.network_name != "" ? "" : libvirt_network.isolated_network.0.id + generic_volume_name = var.source_image != "" ? libvirt_volume.base_image.0.name : var.volume_name != "" ? var.volume_name : "" + iprange = var.iprange +} + +resource "libvirt_volume" "base_image" { + count = var.source_image != "" ? 1 : 0 + name = "${terraform.workspace}-baseimage" + source = var.source_image + pool = var.storage_pool +} + +# Internal network +resource "libvirt_network" "isolated_network" { + count = var.network_name == "" ? 1 : 0 + name = "${terraform.workspace}-isolated" + bridge = var.isolated_network_bridge + mode = "none" + addresses = [var.iprange] + dhcp { + enabled = "false" + } + dns { + enabled = true + } + autostart = true +} + +# Create shared disks for sbd +module "sbd_disk" { + source = "./modules/shared_disk" + shared_disk_count = var.shared_storage_type == "shared-disk" ? 1 : 0 + name = "sbd" + pool = var.storage_pool + shared_disk_size = var.sbd_disk_size +} + +module "drbd_sbd_disk" { + source = "./modules/shared_disk" + shared_disk_count = var.drbd_enabled == true && var.drbd_shared_storage_type == "shared-disk" ? 1 : 0 + name = "drbd-sbd" + pool = var.storage_pool + shared_disk_size = var.drbd_shared_disk_size +} + +# Netweaver uses the shared disk for more things than only sbd +# Some SAP data is stored there to enable HA stack +module "netweaver_shared_disk" { + source = "./modules/shared_disk" + shared_disk_count = var.netweaver_enabled == true ? 1 : 0 + name = "netweaver-shared" + pool = var.storage_pool + shared_disk_size = var.netweaver_shared_disk_size +} diff --git a/libvirt/main.tf b/libvirt/main.tf index 165281fae..f7fd84a30 100644 --- a/libvirt/main.tf +++ b/libvirt/main.tf @@ -1,44 +1,50 @@ -provider "libvirt" { - uri = var.qemu_uri +module "local_execution" { + source = "../generic_modules/local_exec" + enabled = var.pre_deployment } -// --------------------------------------- -// this 2 resources are shared among the modules -// baseimage for hana and monitoring modules. -// you can also change it for each modules -// baseimage is "cloned" and used centrally by other domains -resource "libvirt_volume" "base_image" { - name = "${terraform.workspace}-baseimage" - source = var.base_image - pool = var.storage_pool -} +# This locals entry is used to store the IP addresses of all the machines. +# Autogenerated addresses example based in 19.168.135.0/24 +# Iscsi server: 19.168.135.4 +# Monitoring: 19.168.135.5 +# Hana ips: 19.168.135.10, 19.168.135.11 +# Hana cluster vip: 19.168.135.12 +# DRBD ips: 19.168.135.20, 19.168.135.21 +# DRBD cluster vip: 19.168.135.22 +# Netweaver ips: 19.168.135.30, 19.168.135.31, 19.168.135.32, 19.168.135.33 +# Netweaver virtual ips: 19.168.135.34, 19.168.135.35, 19.168.135.36, 19.168.135.37 +# If the addresses are provided by the user they will always have preference +locals { + iscsi_srv_ip = var.iscsi_srv_ip != "" ? var.iscsi_srv_ip : cidrhost(local.iprange, 4) + monitoring_srv_ip = var.monitoring_srv_ip != "" ? var.monitoring_srv_ip : cidrhost(local.iprange, 5) + + hana_ip_start = 10 + hana_ips = length(var.hana_ips) != 0 ? var.hana_ips : [for ip_index in range(local.hana_ip_start, local.hana_ip_start + var.hana_count) : cidrhost(local.iprange, ip_index)] + hana_cluster_vip = var.hana_cluster_vip != "" ? var.hana_cluster_vip : cidrhost(local.iprange, local.hana_ip_start + var.hana_count) + + # 2 is hardcoded for drbd because we always deploy 2 machines + drbd_ip_start = 20 + drbd_ips = length(var.drbd_ips) != 0 ? var.drbd_ips : [for ip_index in range(local.drbd_ip_start, local.drbd_ip_start + 2) : cidrhost(local.iprange, ip_index)] + drbd_cluster_vip = var.drbd_cluster_vip != "" ? var.drbd_cluster_vip : cidrhost(local.iprange, local.drbd_ip_start + 2) -// the network used by all modules -resource "libvirt_network" "isolated_network" { - name = "${terraform.workspace}-isolated" - bridge = var.isolated_network_bridge - mode = "none" - addresses = [var.iprange] - dhcp { - enabled = "false" - } - dns { - enabled = true - } - autostart = true + # 4 is hardcoded for netweaver because we always deploy 4 machines + netweaver_ip_start = 30 + netweaver_ips = length(var.netweaver_ips) != 0 ? var.netweaver_ips : [for ip_index in range(local.netweaver_ip_start, local.netweaver_ip_start + 4) : cidrhost(local.iprange, ip_index)] + netweaver_virtual_ips = length(var.netweaver_virtual_ips) != 0 ? var.netweaver_virtual_ips : [for ip_index in range(local.netweaver_ip_start, local.netweaver_ip_start + 4) : cidrhost(local.iprange, ip_index + 4)] } -// --------------------------------------- module "iscsi_server" { source = "./modules/iscsi_server" iscsi_count = var.shared_storage_type == "iscsi" ? 1 : 0 - iscsi_image = var.iscsi_image - vcpu = 2 - memory = 4096 + source_image = var.iscsi_source_image + volume_name = var.iscsi_source_image != "" ? "" : (var.iscsi_volume_name != "" ? var.iscsi_volume_name : local.generic_volume_name) + vcpu = var.iscsi_vcpu + memory = var.iscsi_memory bridge = "br0" - pool = var.storage_pool - network_id = libvirt_network.isolated_network.id - iscsi_srv_ip = var.iscsi_srv_ip + storage_pool = var.storage_pool + isolated_network_id = local.internal_network_id + isolated_network_name = local.internal_network_name + iscsi_srv_ip = local.iscsi_srv_ip iscsidev = "/dev/vdb" iscsi_disks = var.iscsi_disks reg_code = var.reg_code @@ -49,33 +55,31 @@ module "iscsi_server" { background = var.background } -module "sbd_disk" { - source = "./modules/shared_disk" - shared_disk_count = var.shared_storage_type == "shared-disk" ? 1 : 0 - name = "sbd" - pool = var.storage_pool - shared_disk_size = 104857600 -} - -// hana01 and hana02 module "hana_node" { source = "./modules/hana_node" name = "hana" - base_image_id = libvirt_volume.base_image.id - hana_count = 2 - vcpu = 4 - memory = 32678 + source_image = var.hana_source_image + volume_name = var.hana_source_image != "" ? "" : (var.hana_volume_name != "" ? var.hana_volume_name : local.generic_volume_name) + hana_count = var.hana_count + vcpu = var.hana_node_vcpu + memory = var.hana_node_memory bridge = "br0" - pool = var.storage_pool - network_id = libvirt_network.isolated_network.id - host_ips = var.host_ips + isolated_network_id = local.internal_network_id + isolated_network_name = local.internal_network_name + storage_pool = var.storage_pool + host_ips = local.hana_ips hana_inst_folder = var.hana_inst_folder hana_inst_media = var.hana_inst_media - hana_disk_size = "68719476736" + hana_platform_folder = var.hana_platform_folder + hana_sapcar_exe = var.hana_sapcar_exe + hdbserver_sar = var.hdbserver_sar + hana_extract_dir = var.hana_extract_dir + hana_disk_size = var.hana_node_disk_size hana_fstype = var.hana_fstype + hana_cluster_vip = local.hana_cluster_vip shared_storage_type = var.shared_storage_type sbd_disk_id = module.sbd_disk.id - iscsi_srv_ip = var.iscsi_srv_ip + iscsi_srv_ip = module.iscsi_server.output_data.private_addresses.0 reg_code = var.reg_code reg_email = var.reg_email reg_additional_modules = var.reg_additional_modules @@ -89,27 +93,20 @@ module "hana_node" { monitoring_enabled = var.monitoring_enabled } -module "drbd_sbd_disk" { - source = "./modules/shared_disk" - shared_disk_count = var.drbd_enabled == true && var.drbd_shared_storage_type == "shared-disk" ? 1 : 0 - name = "drbd-sbd" - pool = var.storage_pool - shared_disk_size = 104857600 -} - -// drbd01 and drbd02 module "drbd_node" { source = "./modules/drbd_node" name = "drbd" - base_image_id = libvirt_volume.base_image.id + source_image = var.drbd_source_image + volume_name = var.drbd_source_image != "" ? "" : (var.drbd_volume_name != "" ? var.drbd_volume_name : local.generic_volume_name) drbd_count = var.drbd_enabled == true ? var.drbd_count : 0 - vcpu = 1 - memory = 1024 + vcpu = var.drbd_node_vcpu + memory = var.drbd_node_memory bridge = "br0" - host_ips = var.drbd_ips - drbd_disk_size = "10737418240" #10GB + host_ips = local.drbd_ips + drbd_cluster_vip = local.drbd_cluster_vip + drbd_disk_size = var.drbd_disk_size shared_storage_type = var.drbd_shared_storage_type - iscsi_srv_ip = var.iscsi_srv_ip + iscsi_srv_ip = module.iscsi_server.output_data.private_addresses.0 reg_code = var.reg_code reg_email = var.reg_email reg_additional_modules = var.reg_additional_modules @@ -118,8 +115,9 @@ module "drbd_node" { provisioner = var.provisioner background = var.background monitoring_enabled = var.monitoring_enabled - network_id = libvirt_network.isolated_network.id - pool = var.storage_pool + isolated_network_id = local.internal_network_id + isolated_network_name = local.internal_network_name + storage_pool = var.storage_pool sbd_disk_id = module.drbd_sbd_disk.id } @@ -127,53 +125,57 @@ module "monitoring" { source = "./modules/monitoring" name = "monitoring" monitoring_enabled = var.monitoring_enabled - monitoring_image = var.monitoring_image - base_image_id = libvirt_volume.base_image.id - vcpu = 4 - memory = 4095 + source_image = var.monitoring_source_image + volume_name = var.monitoring_source_image != "" ? "" : (var.monitoring_volume_name != "" ? var.monitoring_volume_name : local.generic_volume_name) + vcpu = var.monitoring_vcpu + memory = var.monitoring_memory bridge = "br0" - pool = var.storage_pool - network_id = libvirt_network.isolated_network.id - monitoring_srv_ip = var.monitoring_srv_ip + storage_pool = var.storage_pool + isolated_network_id = local.internal_network_id + isolated_network_name = local.internal_network_name + monitoring_srv_ip = local.monitoring_srv_ip reg_code = var.reg_code reg_email = var.reg_email reg_additional_modules = var.reg_additional_modules ha_sap_deployment_repo = var.ha_sap_deployment_repo provisioner = var.provisioner background = var.background - monitored_hosts = var.host_ips - drbd_monitored_hosts = var.drbd_enabled ? var.drbd_ips : [] - nw_monitored_hosts = var.netweaver_enabled ? var.nw_ips : [] -} - -module "nw_shared_disk" { - source = "./modules/shared_disk" - shared_disk_count = var.netweaver_enabled == true ? 1 : 0 - name = "netweaver-shared" - pool = var.storage_pool - shared_disk_size = 68719476736 + hana_targets = concat(local.hana_ips, [local.hana_cluster_vip]) # we use the vip to target the active hana instance + drbd_targets = var.drbd_enabled ? local.drbd_ips : [] + netweaver_targets = var.netweaver_enabled ? local.netweaver_virtual_ips : [] } module "netweaver_node" { - source = "./modules/netweaver_node" - name = "netweaver" - base_image_id = libvirt_volume.base_image.id - netweaver_count = var.netweaver_enabled == true ? 4 : 0 - vcpu = 4 - memory = 8192 - bridge = "br0" - pool = var.storage_pool - network_id = libvirt_network.isolated_network.id - host_ips = var.nw_ips - virtual_host_ips = var.nw_virtual_ips - shared_disk_id = module.nw_shared_disk.id - netweaver_inst_media = var.netweaver_inst_media - netweaver_nfs_share = var.netweaver_nfs_share - reg_code = var.reg_code - reg_email = var.reg_email - reg_additional_modules = var.reg_additional_modules - ha_sap_deployment_repo = var.ha_sap_deployment_repo - provisioner = var.provisioner - background = var.background - monitoring_enabled = var.monitoring_enabled + source = "./modules/netweaver_node" + name = "netweaver" + source_image = var.netweaver_source_image + volume_name = var.netweaver_source_image != "" ? "" : (var.netweaver_volume_name != "" ? var.netweaver_volume_name : local.generic_volume_name) + netweaver_count = var.netweaver_enabled == true ? 4 : 0 + vcpu = var.netweaver_node_vcpu + memory = var.netweaver_node_memory + bridge = "br0" + storage_pool = var.storage_pool + isolated_network_id = local.internal_network_id + isolated_network_name = local.internal_network_name + host_ips = local.netweaver_ips + virtual_host_ips = local.netweaver_virtual_ips + shared_disk_id = module.netweaver_shared_disk.id + hana_ip = local.hana_cluster_vip + netweaver_product_id = var.netweaver_product_id + netweaver_inst_media = var.netweaver_inst_media + netweaver_swpm_folder = var.netweaver_swpm_folder + netweaver_sapcar_exe = var.netweaver_sapcar_exe + netweaver_swpm_sar = var.netweaver_swpm_sar + netweaver_swpm_extract_dir = var.netweaver_swpm_extract_dir + netweaver_sapexe_folder = var.netweaver_sapexe_folder + netweaver_additional_dvds = var.netweaver_additional_dvds + netweaver_nfs_share = var.drbd_enabled ? "${local.drbd_cluster_vip}:/HA1" : var.netweaver_nfs_share + reg_code = var.reg_code + reg_email = var.reg_email + reg_additional_modules = var.reg_additional_modules + ha_sap_deployment_repo = var.ha_sap_deployment_repo + provisioner = var.provisioner + background = var.background + monitoring_enabled = var.monitoring_enabled + devel_mode = var.devel_mode } diff --git a/libvirt/modules/drbd_node/main.tf b/libvirt/modules/drbd_node/main.tf index f53509841..90daf4764 100644 --- a/libvirt/modules/drbd_node/main.tf +++ b/libvirt/modules/drbd_node/main.tf @@ -1,32 +1,33 @@ -resource "libvirt_volume" "drbd_main_disk" { - name = "${terraform.workspace}-${var.name}${var.drbd_count > 1 ? "-${count.index + 1}" : ""}-main-disk" - base_volume_id = var.base_image_id - pool = var.pool +resource "libvirt_volume" "drbd_image_disk" { count = var.drbd_count + name = "${terraform.workspace}-${var.name}-${count.index + 1}-main-disk" + source = var.source_image + base_volume_name = var.volume_name + pool = var.storage_pool } -resource "libvirt_volume" "drbd_disk" { - name = "${terraform.workspace}-${var.name}${var.drbd_count > 1 ? "-${count.index + 1}" : ""}-drbd-disk" - pool = var.pool +resource "libvirt_volume" "drbd_data_disk" { + name = "${terraform.workspace}-${var.name}-${count.index + 1}-drbd-disk" + pool = var.storage_pool count = var.drbd_count size = var.drbd_disk_size } resource "libvirt_domain" "drbd_domain" { - name = "${terraform.workspace}-${var.name}${var.drbd_count > 1 ? "-${count.index + 1}" : ""}" + name = "${terraform.workspace}-${var.name}-${count.index + 1}" memory = var.memory vcpu = var.vcpu count = var.drbd_count qemu_agent = true - dynamic "disk" { + dynamic "disk" { for_each = [ - { - "vol_id" = element(libvirt_volume.drbd_main_disk.*.id, count.index) - }, - { - "vol_id" = element(libvirt_volume.drbd_disk.*.id, count.index) - }, - ] + { + "vol_id" = element(libvirt_volume.drbd_image_disk.*.id, count.index) + }, + { + "vol_id" = element(libvirt_volume.drbd_data_disk.*.id, count.index) + }, + ] content { volume_id = disk.value.vol_id } @@ -34,19 +35,18 @@ resource "libvirt_domain" "drbd_domain" { // handle additional disks dynamic "disk" { - for_each = slice( - [ - { - // we set null but it will never reached because the slice with 0 cut it off - "volume_id" = var.shared_storage_type == "shared-disk" ? var.sbd_disk_id : "null" - }, - ], 0, var.shared_storage_type == "shared-disk" ? 1 : 0, ) - content { - volume_id = disk.value.volume_id - } + for_each = slice( + [ + { + // we set null but it will never reached because the slice with 0 cut it off + "volume_id" = var.shared_storage_type == "shared-disk" ? var.sbd_disk_id : "null" + }, + ], 0, var.shared_storage_type == "shared-disk" ? 1 : 0, ) + content { + volume_id = disk.value.volume_id + } } - network_interface { wait_for_lease = true network_name = var.network_name @@ -56,8 +56,8 @@ resource "libvirt_domain" "drbd_domain" { network_interface { wait_for_lease = false - network_id = var.network_id - hostname = "${var.name}${var.drbd_count > 1 ? "0${count.index + 1}" : ""}" + network_name = var.isolated_network_name + network_id = var.isolated_network_id addresses = [element(var.host_ips, count.index)] } @@ -91,8 +91,18 @@ resource "libvirt_domain" "drbd_domain" { output "output_data" { value = { id = libvirt_domain.drbd_domain.*.id - hostname = libvirt_domain.drbd_domain.*.name + name = libvirt_domain.drbd_domain.*.name private_addresses = var.host_ips addresses = libvirt_domain.drbd_domain.*.network_interface.0.addresses.0 } } + +module "drbd_on_destroy" { + source = "../../../generic_modules/on_destroy" + node_count = var.drbd_count + instance_ids = libvirt_domain.drbd_domain.*.id + user = "root" + password = "linux" + public_ips = libvirt_domain.drbd_domain.*.network_interface.0.addresses.0 + dependencies = [libvirt_domain.drbd_domain] +} diff --git a/libvirt/modules/drbd_node/salt_provisioner.tf b/libvirt/modules/drbd_node/salt_provisioner.tf index 99249507c..af6ba67fa 100644 --- a/libvirt/modules/drbd_node/salt_provisioner.tf +++ b/libvirt/modules/drbd_node/salt_provisioner.tf @@ -2,15 +2,6 @@ # It will be executed if 'provisioner' is set to 'salt' (default option) and the # libvirt_domain.domain (drbd_node) resources are created (check triggers option). -# Template file to launch the salt provisioning script -data "template_file" "drbd_salt_provisioner" { - template = file("../salt/salt_provisioner_script.tpl") - - vars = { - regcode = var.reg_code - } -} - resource "null_resource" "drbd_node_provisioner" { count = var.provisioner == "salt" ? var.drbd_count : 0 triggers = { @@ -24,34 +15,26 @@ resource "null_resource" "drbd_node_provisioner" { } provisioner "file" { - source = "../salt" - destination = "/tmp" - } - - provisioner "file" { - content = data.template_file.drbd_salt_provisioner.rendered - destination = "/tmp/salt_provisioner.sh" - } - - provisioner "file" { - content = < 1 ? "0${count.index + 1}" : ""} + content = < /tmp/provisioning.log ${var.background ? "&" : ""}", - "return_code=$? && sleep 1 && exit $return_code", - ] # Workaround to let the process start in background properly - } +module "drbd_provision" { + source = "../../../generic_modules/salt_provisioner" + node_count = var.provisioner == "salt" ? var.drbd_count : 0 + instance_ids = null_resource.drbd_node_provisioner.*.id + user = "root" + password = "linux" + public_ips = libvirt_domain.drbd_domain.*.network_interface.0.addresses.0 + background = var.background } diff --git a/libvirt/modules/drbd_node/variables.tf b/libvirt/modules/drbd_node/variables.tf index ac8fcd3fd..47734e5a3 100644 --- a/libvirt/modules/drbd_node/variables.tf +++ b/libvirt/modules/drbd_node/variables.tf @@ -33,6 +33,7 @@ variable "ha_sap_deployment_repo" { variable "devel_mode" { description = "Whether or not to install the HA/SAP packages from the `ha_sap_deployment_repo`" + type = bool default = false } @@ -67,6 +68,11 @@ variable "host_ips" { type = list(string) } +variable "drbd_cluster_vip" { + description = "IP address used to configure the drbd cluster floating IP. It must be in other subnet than the machines!" + type = string +} + variable "shared_storage_type" { description = "used shared storage type for fencing (sbd). Available options: iscsi, shared-disk." type = string @@ -86,14 +92,22 @@ variable "provisioner" { variable "background" { description = "Run the provisioner execution in background if set to true finishing terraform execution" + type = bool default = false } // Provider-specific variables -variable "base_image_id" { - description = "base image id which the module will use. You can create a baseimage and module will use it. Created in main.tf" +variable "source_image" { + description = "Source image used to boot the machines (qcow2 format). It's possible to specify the path to a local (relative to the machine running the terraform command) image or a remote one. Remote images have to be specified using HTTP(S) urls for now." type = string + default = "" +} + +variable "volume_name" { + description = "Already existing volume name used to boot the machines. It must be in the same storage pool. It's only used if source_image is not provided" + type = string + default = "" } variable "memory" { @@ -111,8 +125,13 @@ variable "mac" { default = "" } -variable "network_id" { - description = "network id to be injected into domain. normally the isolated network is created in main.tf" +variable "isolated_network_id" { + description = "Network id, internally created by terraform" + type = string +} + +variable "isolated_network_name" { + description = "Network name to attach the isolated network interface" type = string } @@ -130,6 +149,7 @@ variable "bridge" { variable "monitoring_enabled" { description = "enable the host to be monitored by exporters, e.g node_exporter" + type = bool default = false } @@ -139,7 +159,7 @@ variable "sbd_disk_id" { type = string } -variable "pool" { +variable "storage_pool" { description = "libvirt storage pool name for VM disks" default = "default" } diff --git a/libvirt/modules/hana_node/main.tf b/libvirt/modules/hana_node/main.tf index d141b4fa4..44e80d076 100644 --- a/libvirt/modules/hana_node/main.tf +++ b/libvirt/modules/hana_node/main.tf @@ -1,32 +1,33 @@ -resource "libvirt_volume" "hana_main_disk" { - name = "${terraform.workspace}-${var.name}${var.hana_count > 1 ? "-${count.index + 1}" : ""}-main-disk" - base_volume_id = var.base_image_id - pool = var.pool +resource "libvirt_volume" "hana_image_disk" { count = var.hana_count + name = "${terraform.workspace}-${var.name}-${count.index + 1}-main-disk" + source = var.source_image + base_volume_name = var.volume_name + pool = var.storage_pool } -resource "libvirt_volume" "hana_disk" { - name = "${terraform.workspace}-${var.name}${var.hana_count > 1 ? "-${count.index + 1}" : ""}-hana-disk" - pool = var.pool +resource "libvirt_volume" "hana_data_disk" { + name = "${terraform.workspace}-${var.name}-${count.index + 1}-hana-disk" + pool = var.storage_pool count = var.hana_count size = var.hana_disk_size } resource "libvirt_domain" "hana_domain" { - name = "${terraform.workspace}-${var.name}${var.hana_count > 1 ? "-${count.index + 1}" : ""}" + name = "${terraform.workspace}-${var.name}-${count.index + 1}" memory = var.memory vcpu = var.vcpu count = var.hana_count qemu_agent = true - dynamic "disk" { + dynamic "disk" { for_each = [ - { - "vol_id" = element(libvirt_volume.hana_main_disk.*.id, count.index) - }, - { - "vol_id" = element(libvirt_volume.hana_disk.*.id, count.index) - }, - ] + { + "vol_id" = element(libvirt_volume.hana_image_disk.*.id, count.index) + }, + { + "vol_id" = element(libvirt_volume.hana_data_disk.*.id, count.index) + }, + ] content { volume_id = disk.value.vol_id } @@ -34,18 +35,18 @@ resource "libvirt_domain" "hana_domain" { // handle additional disks dynamic "disk" { - for_each = slice( - [ - { - // we set null but it will never reached because the slice with 0 cut it off - "volume_id" = var.shared_storage_type == "shared-disk" ? var.sbd_disk_id : "null" - }, - ], 0, var.shared_storage_type == "shared-disk" ? 1 : 0, ) - content { - volume_id = disk.value.volume_id - } -} - + for_each = slice( + [ + { + // we set null but it will never reached because the slice with 0 cut it off + "volume_id" = var.shared_storage_type == "shared-disk" ? var.sbd_disk_id : "null" + }, + ], 0, var.shared_storage_type == "shared-disk" ? 1 : 0 + ) + content { + volume_id = disk.value.volume_id + } + } network_interface { wait_for_lease = true @@ -56,8 +57,8 @@ resource "libvirt_domain" "hana_domain" { network_interface { wait_for_lease = false - network_id = var.network_id - hostname = "${var.name}${var.hana_count > 1 ? "0${count.index + 1}" : ""}" + network_name = var.isolated_network_name + network_id = var.isolated_network_id addresses = [element(var.host_ips, count.index)] } @@ -91,8 +92,18 @@ resource "libvirt_domain" "hana_domain" { output "output_data" { value = { id = libvirt_domain.hana_domain.*.id - hostname = libvirt_domain.hana_domain.*.name + name = libvirt_domain.hana_domain.*.name private_addresses = var.host_ips addresses = libvirt_domain.hana_domain.*.network_interface.0.addresses.0 } } + +module "hana_on_destroy" { + source = "../../../generic_modules/on_destroy" + node_count = var.hana_count + instance_ids = libvirt_domain.hana_domain.*.id + user = "root" + password = "linux" + public_ips = libvirt_domain.hana_domain.*.network_interface.0.addresses.0 + dependencies = [libvirt_domain.hana_domain] +} diff --git a/libvirt/modules/hana_node/salt_provisioner.tf b/libvirt/modules/hana_node/salt_provisioner.tf index a9ccd64fa..26ccdd97c 100644 --- a/libvirt/modules/hana_node/salt_provisioner.tf +++ b/libvirt/modules/hana_node/salt_provisioner.tf @@ -2,15 +2,6 @@ # It will be executed if 'provisioner' is set to 'salt' (default option) and the # libvirt_domain.domain (hana_node) resources are created (check triggers option). -# Template file to launch the salt provisioning script -data "template_file" "hana_salt_provisioner" { - template = file("../salt/salt_provisioner_script.tpl") - - vars = { - regcode = var.reg_code - } -} - resource "null_resource" "hana_node_provisioner" { count = var.provisioner == "salt" ? var.hana_count : 0 triggers = { @@ -24,25 +15,15 @@ resource "null_resource" "hana_node_provisioner" { } provisioner "file" { - source = "../salt" - destination = "/tmp" - } - - provisioner "file" { - content = data.template_file.hana_salt_provisioner.rendered - destination = "/tmp/salt_provisioner.sh" - } - - provisioner "file" { - content = < 1 ? "0${count.index + 1}" : ""} + content = < /tmp/provisioning.log ${var.background ? "&" : ""}", - "return_code=$? && sleep 1 && exit $return_code", - ] # Workaround to let the process start in background properly - } - } +module "hana_provision" { + source = "../../../generic_modules/salt_provisioner" + node_count = var.provisioner == "salt" ? var.hana_count : 0 + instance_ids = null_resource.hana_node_provisioner.*.id + user = "root" + password = "linux" + public_ips = libvirt_domain.hana_domain.*.network_interface.0.addresses.0 + background = var.background +} diff --git a/libvirt/modules/hana_node/variables.tf b/libvirt/modules/hana_node/variables.tf index 1e636b91f..c031cb21e 100644 --- a/libvirt/modules/hana_node/variables.tf +++ b/libvirt/modules/hana_node/variables.tf @@ -33,6 +33,7 @@ variable "ha_sap_deployment_repo" { variable "devel_mode" { description = "Whether or not to install the HA/SAP packages from the `ha_sap_deployment_repo`" + type = bool default = false } @@ -73,6 +74,11 @@ variable "host_ips" { type = list(string) } +variable "hana_cluster_vip" { + description = "IP address used to configure the hana cluster floating IP. It must be in other subnet than the machines!" + type = string +} + variable "shared_storage_type" { description = "used shared storage type for fencing (sbd). Available options: iscsi, shared-disk." type = string @@ -100,6 +106,30 @@ variable "hana_inst_folder" { type = string } +variable "hana_platform_folder" { + description = "Path to the hana platform media, relative to the 'hana_inst_media' mounting point" + type = string + default = "" +} + +variable "hana_sapcar_exe" { + description = "Path to the sapcar executable, relative to the 'hana_inst_media' mounting point" + type = string + default = "" +} + +variable "hdbserver_sar" { + description = "Path to the HANA database server installation sar archive, relative to the 'hana_inst_media' mounting point" + type = string + default = "" +} + +variable "hana_extract_dir" { + description = "Absolute path to folder where SAP HANA sar archive will be extracted" + type = string + default = "/sapmedia/HANA" +} + variable "scenario_type" { description = "Deployed scenario type. Available options: performance-optimized, cost-optimized" default = "performance-optimized" @@ -112,14 +142,22 @@ variable "provisioner" { variable "background" { description = "Run the provisioner execution in background if set to true finishing terraform execution" + type = bool default = false } // Provider-specific variables -variable "base_image_id" { - description = "base image id which the module will use. You can create a baseimage and module will use it. Created in main.tf" +variable "source_image" { + description = "Source image used to boot the machines (qcow2 format). It's possible to specify the path to a local (relative to the machine running the terraform command) image or a remote one. Remote images have to be specified using HTTP(S) urls for now." + type = string + default = "" +} + +variable "volume_name" { + description = "Already existing volume name used to boot the machines. It must be in the same storage pool. It's only used if source_image is not provided" type = string + default = "" } variable "memory" { @@ -137,11 +175,21 @@ variable "mac" { default = "" } -variable "network_id" { - description = "network id to be injected into domain. normally the isolated network is created in main.tf" +variable "isolated_network_id" { + description = "Network id, internally created by terraform" type = string } +variable "isolated_network_name" { + description = "Network name to attach the isolated network interface" + type = string +} + +variable "storage_pool" { + description = "libvirt storage pool name for VM disks" + default = "default" +} + variable "network_name" { description = "libvirt NAT network name for VMs, use empty string for bridged networking" default = "" @@ -152,15 +200,11 @@ variable "bridge" { default = "" } -variable "pool" { - description = "libvirt storage pool name for VM disks" - default = "default" -} - // monitoring variable "monitoring_enabled" { description = "enable the host to be monitored by exporters, e.g node_exporter" + type = bool default = false } @@ -168,6 +212,7 @@ variable "monitoring_enabled" { variable "qa_mode" { description = "define qa mode (Disable extra packages outside images)" + type = bool default = false } diff --git a/libvirt/modules/iscsi_server/main.tf b/libvirt/modules/iscsi_server/main.tf index 3542fa2fd..5a065b29c 100644 --- a/libvirt/modules/iscsi_server/main.tf +++ b/libvirt/modules/iscsi_server/main.tf @@ -3,17 +3,18 @@ terraform { } resource "libvirt_volume" "iscsi_image_disk" { - name = format("%s-iscsi-disk", terraform.workspace) - source = var.iscsi_image - pool = var.pool - count = var.iscsi_count + count = var.iscsi_count + name = format("%s-iscsi-disk", terraform.workspace) + source = var.source_image + base_volume_name = var.volume_name + pool = var.storage_pool } resource "libvirt_volume" "iscsi_dev_disk" { - name = format("%s-iscsi-dev", terraform.workspace) - pool = var.pool - size = "10000000000" # 10GB count = var.iscsi_count + name = format("%s-iscsi-dev", terraform.workspace) + pool = var.storage_pool + size = "10000000000" # 10GB } resource "libvirt_domain" "iscsisrv" { @@ -25,28 +26,29 @@ resource "libvirt_domain" "iscsisrv" { dynamic "disk" { for_each = [ - { - "vol_id" = element(libvirt_volume.iscsi_image_disk.*.id, count.index) - }, - { - "vol_id" = element(libvirt_volume.iscsi_dev_disk.*.id, count.index) - }] + { + "vol_id" = element(libvirt_volume.iscsi_image_disk.*.id, count.index) + }, + { + "vol_id" = element(libvirt_volume.iscsi_dev_disk.*.id, count.index) + }] content { volume_id = disk.value.vol_id } } network_interface { - network_name = var.network_name + network_name = var.nat_network_name bridge = var.bridge mac = var.mac wait_for_lease = true } network_interface { - network_id = var.network_id - mac = var.mac - addresses = [var.iscsi_srv_ip] + network_name = var.isolated_network_name + network_id = var.isolated_network_id + mac = var.mac + addresses = [var.iscsi_srv_ip] } console { @@ -75,8 +77,18 @@ resource "libvirt_domain" "iscsisrv" { output "output_data" { value = { id = libvirt_domain.iscsisrv.*.id - hostname = libvirt_domain.iscsisrv.*.name + name = libvirt_domain.iscsisrv.*.name private_addresses = [var.iscsi_srv_ip] addresses = libvirt_domain.iscsisrv.*.network_interface.0.addresses.0 } } + +module "iscsi_on_destroy" { + source = "../../../generic_modules/on_destroy" + node_count = var.iscsi_count + instance_ids = libvirt_domain.iscsisrv.*.id + user = "root" + password = "linux" + public_ips = libvirt_domain.iscsisrv.*.network_interface.0.addresses.0 + dependencies = [libvirt_domain.iscsisrv] +} diff --git a/libvirt/modules/iscsi_server/salt_provisioner.tf b/libvirt/modules/iscsi_server/salt_provisioner.tf index 4fd157a1d..fa99dfce4 100644 --- a/libvirt/modules/iscsi_server/salt_provisioner.tf +++ b/libvirt/modules/iscsi_server/salt_provisioner.tf @@ -1,12 +1,3 @@ -# Template file to launch the salt provisioning script -data "template_file" "salt_provisioner" { - template = file("../salt/salt_provisioner_script.tpl") - - vars = { - regcode = var.reg_code - } -} - resource "null_resource" "iscsi_provisioner" { count = var.provisioner == "salt" ? var.iscsi_count : 0 @@ -21,17 +12,7 @@ resource "null_resource" "iscsi_provisioner" { } provisioner "file" { - source = "../salt" - destination = "/tmp/salt" - } - - provisioner "file" { - content = data.template_file.salt_provisioner.rendered - destination = "/tmp/salt_provisioner.sh" - } - - provisioner "file" { - content = < /tmp/provisioning.log ${var.background ? "&" : ""}", - "return_code=$? && sleep 1 && exit $return_code", - ] # Workaround to let the process start in background properly - } +module "iscsi_provision" { + source = "../../../generic_modules/salt_provisioner" + node_count = var.provisioner == "salt" ? var.iscsi_count : 0 + instance_ids = null_resource.iscsi_provisioner.*.id + user = "root" + password = "linux" + public_ips = libvirt_domain.iscsisrv.*.network_interface.0.addresses.0 + background = var.background } diff --git a/libvirt/modules/iscsi_server/variables.tf b/libvirt/modules/iscsi_server/variables.tf index 563b6dadc..f9ce55801 100644 --- a/libvirt/modules/iscsi_server/variables.tf +++ b/libvirt/modules/iscsi_server/variables.tf @@ -1,8 +1,3 @@ -variable "iscsi_image" { - description = "iscsi server base image" - type = string -} - variable "iscsi_srv_ip" { description = "iscsi server address" type = string @@ -61,11 +56,24 @@ variable "provisioner" { variable "background" { description = "Run the provisioner execution in background if set to true finishing terraform execution" + type = bool default = false } // Provider-specific variables +variable "source_image" { + description = "Source image used to boot the machines (qcow2 format). It's possible to specify the path to a local (relative to the machine running the terraform command) image or a remote one. Remote images have to be specified using HTTP(S) urls for now." + type = string + default = "" +} + +variable "volume_name" { + description = "Already existing volume name used to boot the machines. It must be in the same storage pool. It's only used if source_image is not provided" + type = string + default = "" +} + variable "memory" { description = "RAM memory in MiB" default = 512 @@ -81,17 +89,17 @@ variable "mac" { default = "" } -variable "network_id" { - description = "network id to be injected into domain. normally the isolated network is created in main.tf" +variable "isolated_network_id" { + description = "Network id, internally created by terraform" type = string } -variable "pool" { - description = "libvirt storage pool name for VM disks" - default = "default" +variable "isolated_network_name" { + description = "Network name to attach the isolated network interface" + type = string } -variable "network_name" { +variable "nat_network_name" { description = "libvirt NAT network name for VMs, use empty string for bridged networking" default = "" } @@ -101,9 +109,14 @@ variable "bridge" { default = "" } +variable "storage_pool" { + description = "libvirt storage pool name for VM disks" + default = "default" +} # Specific QA variables variable "qa_mode" { description = "define qa mode (Disable extra packages outside images)" + type = bool default = false } diff --git a/libvirt/modules/monitoring/main.tf b/libvirt/modules/monitoring/main.tf index 446e6084a..4beef1f66 100644 --- a/libvirt/modules/monitoring/main.tf +++ b/libvirt/modules/monitoring/main.tf @@ -2,12 +2,12 @@ terraform { required_version = ">= 0.12" } -resource "libvirt_volume" "monitoring_main_disk" { - name = format("%s-monitoring-disk", terraform.workspace) - source = var.monitoring_image - base_volume_id = var.monitoring_image == "" ? var.base_image_id: "" - pool = var.pool - count = var.monitoring_enabled == true ? 1 : 0 +resource "libvirt_volume" "monitoring_image_disk" { + count = var.monitoring_enabled == true ? 1 : 0 + name = format("%s-monitoring-disk", terraform.workspace) + source = var.source_image + base_volume_name = var.volume_name + pool = var.storage_pool } resource "libvirt_domain" "monitoring_domain" { @@ -18,7 +18,7 @@ resource "libvirt_domain" "monitoring_domain" { qemu_agent = true disk { - volume_id = libvirt_volume.monitoring_main_disk.0.id + volume_id = libvirt_volume.monitoring_image_disk.0.id } network_interface { @@ -30,8 +30,8 @@ resource "libvirt_domain" "monitoring_domain" { network_interface { wait_for_lease = false - network_id = var.network_id - hostname = "${terraform.workspace}-${var.name}" + network_name = var.isolated_network_name + network_id = var.isolated_network_id addresses = [var.monitoring_srv_ip] } @@ -61,8 +61,18 @@ resource "libvirt_domain" "monitoring_domain" { output "output_data" { value = { id = join("", libvirt_domain.monitoring_domain.*.id) - hostname = join("", libvirt_domain.monitoring_domain.*.name) + name = join("", libvirt_domain.monitoring_domain.*.name) private_address = var.monitoring_srv_ip address = join("", libvirt_domain.monitoring_domain.*.network_interface.0.addresses.0) } } + +module "monitoring_on_destroy" { + source = "../../../generic_modules/on_destroy" + node_count = var.monitoring_enabled ? 1 : 0 + instance_ids = libvirt_domain.monitoring_domain.*.id + user = "root" + password = "linux" + public_ips = libvirt_domain.monitoring_domain.*.network_interface.0.addresses.0 + dependencies = [libvirt_domain.monitoring_domain] +} diff --git a/libvirt/modules/monitoring/salt_provisioner.tf b/libvirt/modules/monitoring/salt_provisioner.tf index 04bfc5659..f4aa1aa17 100644 --- a/libvirt/modules/monitoring/salt_provisioner.tf +++ b/libvirt/modules/monitoring/salt_provisioner.tf @@ -1,12 +1,3 @@ -# Template file to launch the salt provisioning script -data "template_file" "monitoring_salt_provisioner" { - template = file("../salt/salt_provisioner_script.tpl") - - vars = { - regcode = var.reg_code - } -} - resource "null_resource" "monitoring_provisioner" { count = var.provisioner == "salt" && var.monitoring_enabled ? 1 : 0 triggers = { @@ -20,42 +11,35 @@ resource "null_resource" "monitoring_provisioner" { } provisioner "file" { - source = "../salt" - destination = "/tmp" - } - - provisioner "file" { - content = data.template_file.monitoring_salt_provisioner.rendered - destination = "/tmp/salt_provisioner.sh" - } - - provisioner "file" { - content = < /tmp/provisioning.log ${var.background ? "&" : ""}", - "return_code=$? && sleep 1 && exit $return_code", - ] # Workaround to let the process start in background properly - } - } +module "monitoring_provision" { + source = "../../../generic_modules/salt_provisioner" + node_count = var.provisioner == "salt" && var.monitoring_enabled ? 1 : 0 + instance_ids = null_resource.monitoring_provisioner.*.id + user = "root" + password = "linux" + public_ips = libvirt_domain.monitoring_domain.*.network_interface.0.addresses.0 + background = var.background +} diff --git a/libvirt/modules/monitoring/variables.tf b/libvirt/modules/monitoring/variables.tf index deff1098a..8a3fe16fa 100644 --- a/libvirt/modules/monitoring/variables.tf +++ b/libvirt/modules/monitoring/variables.tf @@ -78,6 +78,7 @@ variable "provisioner" { variable "background" { description = "Run the provisioner execution in background if set to true finishing terraform execution" + type = bool default = false } @@ -88,9 +89,16 @@ variable "monitoring_srv_ip" { // Provider-specific variables -variable "base_image_id" { - description = "it is the centralized images used by the module. It is created in main.tf" +variable "source_image" { + description = "Source image used to boot the machines (qcow2 format). It's possible to specify the path to a local (relative to the machine running the terraform command) image or a remote one. Remote images have to be specified using HTTP(S) urls for now." type = string + default = "" +} + +variable "volume_name" { + description = "Already existing volume name used to boot the machines. It must be in the same storage pool. It's only used if source_image is not provided" + type = string + default = "" } variable "memory" { @@ -108,31 +116,34 @@ variable "cpu_model" { default = "" } +variable "isolated_network_id" { + description = "Network id, internally created by terraform" + type = string +} -variable "network_id" { - description = "network id to be injected into domain. normally the isolated network is created in main.tf" +variable "isolated_network_name" { + description = "Network name to attach the isolated network interface" type = string } -variable "pool" { +variable "storage_pool" { description = "libvirt storage pool name for VM disks" default = "default" } - -variable "monitored_hosts" { - description = "IPs of hosts you want to monitor" +variable "hana_targets" { + description = "IPs of HANA hosts you want to monitor; the last one is assumed to be the virtual IP of the active HA instance." type = list(string) } -variable "drbd_monitored_hosts" { +variable "drbd_targets" { description = "IPs of DRBD hosts you want to monitor" type = list(string) default = [] } -variable "nw_monitored_hosts" { - description = "IPs of Netweaver hosts you want to monitor" +variable "netweaver_targets" { + description = "IPs of Netweaver hosts you want to monitor; the first two are assumed to be the virtual IPs of the HA instances." type = list(string) default = [] } diff --git a/libvirt/modules/netweaver_node/main.tf b/libvirt/modules/netweaver_node/main.tf index 16e58527d..b9c396f59 100644 --- a/libvirt/modules/netweaver_node/main.tf +++ b/libvirt/modules/netweaver_node/main.tf @@ -1,12 +1,13 @@ -resource "libvirt_volume" "netweaver_main_disk" { - name = "${terraform.workspace}-${var.name}${var.netweaver_count > 1 ? "-${count.index + 1}" : ""}-main-disk" - base_volume_id = var.base_image_id - pool = var.pool +resource "libvirt_volume" "netweaver_image_disk" { count = var.netweaver_count + name = "${terraform.workspace}-${var.name}-${count.index + 1}-main-disk" + source = var.source_image + base_volume_name = var.volume_name + pool = var.storage_pool } resource "libvirt_domain" "netweaver_domain" { - name = "${terraform.workspace}-${var.name}${var.netweaver_count > 1 ? "-${count.index + 1}" : ""}" + name = "${terraform.workspace}-${var.name}-${count.index + 1}" memory = var.memory vcpu = var.vcpu count = var.netweaver_count @@ -14,12 +15,12 @@ resource "libvirt_domain" "netweaver_domain" { dynamic "disk" { for_each = [ - { - "vol_id" = element(libvirt_volume.netweaver_main_disk.*.id, count.index) - }, - { - "vol_id" = var.shared_disk_id - }, + { + "vol_id" = element(libvirt_volume.netweaver_image_disk.*.id, count.index) + }, + { + "vol_id" = var.shared_disk_id + }, ] content { volume_id = disk.value.vol_id @@ -35,8 +36,8 @@ resource "libvirt_domain" "netweaver_domain" { network_interface { wait_for_lease = false - network_id = var.network_id - hostname = "${var.name}${var.netweaver_count > 1 ? "0${count.index + 1}" : ""}" + network_name = var.isolated_network_name + network_id = var.isolated_network_id addresses = [element(var.host_ips, count.index)] } @@ -70,8 +71,18 @@ resource "libvirt_domain" "netweaver_domain" { output "output_data" { value = { id = libvirt_domain.netweaver_domain.*.id - hostname = libvirt_domain.netweaver_domain.*.name + name = libvirt_domain.netweaver_domain.*.name private_addresses = var.host_ips addresses = libvirt_domain.netweaver_domain.*.network_interface.0.addresses.0 } } + +module "netweaver_on_destroy" { + source = "../../../generic_modules/on_destroy" + node_count = var.netweaver_count + instance_ids = libvirt_domain.netweaver_domain.*.id + user = "root" + password = "linux" + public_ips = libvirt_domain.netweaver_domain.*.network_interface.0.addresses.0 + dependencies = [libvirt_domain.netweaver_domain] +} diff --git a/libvirt/modules/netweaver_node/salt_provisioner.tf b/libvirt/modules/netweaver_node/salt_provisioner.tf index b8c354802..a7e562a88 100644 --- a/libvirt/modules/netweaver_node/salt_provisioner.tf +++ b/libvirt/modules/netweaver_node/salt_provisioner.tf @@ -2,15 +2,6 @@ # It will be executed if 'provisioner' is set to 'salt' (default option) and the # libvirt_domain.domain (netweaver_node) resources are created (check triggers option). -# Template file to launch the salt provisioning script -data "template_file" "netweaver_salt_provisioner" { - template = file("../salt/salt_provisioner_script.tpl") - - vars = { - regcode = var.reg_code - } -} - resource "null_resource" "netweaver_node_provisioner" { count = var.provisioner == "salt" ? var.netweaver_count : 0 triggers = { @@ -24,32 +15,30 @@ resource "null_resource" "netweaver_node_provisioner" { } provisioner "file" { - source = "../salt" - destination = "/tmp" - } - - provisioner "file" { - content = data.template_file.netweaver_salt_provisioner.rendered - destination = "/tmp/salt_provisioner.sh" - } - - provisioner "file" { -content = < 1 ? "0${count.index + 1}" : ""} +hostname: ${var.name}0${count.index + 1} network_domain: ${var.network_domain} timezone: ${var.timezone} reg_code: ${var.reg_code} reg_email: ${var.reg_email} -reg_additional_modules: {${join(", ",formatlist("'%s': '%s'",keys(var.reg_additional_modules),values(var.reg_additional_modules),),)}} +reg_additional_modules: {${join(", ", formatlist("'%s': '%s'", keys(var.reg_additional_modules), values(var.reg_additional_modules), ), )}} additional_packages: [${join(", ", formatlist("'%s'", var.additional_packages))}] authorized_keys: [${trimspace(file(var.public_key_location))}] host_ips: [${join(", ", formatlist("'%s'", var.host_ips))}] virtual_host_ips: [${join(", ", formatlist("'%s'", var.virtual_host_ips))}] host_ip: ${element(var.host_ips, count.index)} +hana_ip: ${var.hana_ip} provider: libvirt role: netweaver_node +netweaver_product_id: ${var.netweaver_product_id} netweaver_inst_media: ${var.netweaver_inst_media} +netweaver_swpm_folder: ${var.netweaver_swpm_folder} +netweaver_sapcar_exe: ${var.netweaver_sapcar_exe} +netweaver_swpm_sar: ${var.netweaver_swpm_sar} +netweaver_swpm_extract_dir: ${var.netweaver_swpm_extract_dir} +netweaver_sapexe_folder: ${var.netweaver_sapexe_folder} +netweaver_additional_dvds: [${join(", ", formatlist("'%s'", var.netweaver_additional_dvds))}] netweaver_nfs_share: ${var.netweaver_nfs_share} ascs_instance_number: ${var.ascs_instance_number} ers_instance_number: ${var.ers_instance_number} @@ -59,14 +48,18 @@ ha_sap_deployment_repo: ${var.ha_sap_deployment_repo} shared_storage_type: shared-disk sbd_disk_device: /dev/vdb1 monitoring_enabled: ${var.monitoring_enabled} +devel_mode: ${var.devel_mode} EOF - destination = "/tmp/grains" - } + destination = "/tmp/grains" + } +} - provisioner "remote-exec" { - inline = [ - "${var.background ? "nohup" : ""} sh /tmp/salt_provisioner.sh > /tmp/provisioning.log ${var.background ? "&" : ""}", - "return_code=$? && sleep 1 && exit $return_code", - ] # Workaround to let the process start in background properly - } - } +module "netweaver_provision" { + source = "../../../generic_modules/salt_provisioner" + node_count = var.provisioner == "salt" ? var.netweaver_count : 0 + instance_ids = null_resource.netweaver_node_provisioner.*.id + user = "root" + password = "linux" + public_ips = libvirt_domain.netweaver_domain.*.network_interface.0.addresses.0 + background = var.background +} diff --git a/libvirt/modules/netweaver_node/variables.tf b/libvirt/modules/netweaver_node/variables.tf index 51f00caaf..0a3fb576b 100644 --- a/libvirt/modules/netweaver_node/variables.tf +++ b/libvirt/modules/netweaver_node/variables.tf @@ -60,9 +60,56 @@ variable "shared_disk_id" { type = string } +variable "hana_ip" { + type = string + description = "Ip address of the hana database" +} + +variable "netweaver_product_id" { + description = "Netweaver installation product. Even though the module is about Netweaver, it can be used to install other SAP instances like S4/HANA" + type = string + default = "NW750.HDB.ABAPHA" +} + variable "netweaver_inst_media" { - description = "URL of the NFS share where the SAP Netweaver software installer is stored. This media shall be mounted in `/root/netweaver_inst_media`" + description = "URL of the NFS share where the SAP Netweaver software installer is stored. This media shall be mounted in `/sapmedia/NW`" + type = string +} + +variable "netweaver_swpm_folder" { + description = "Netweaver software SWPM folder, path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_sapcar_exe" { + description = "Path to sapcar executable, relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_swpm_sar" { + description = "SWPM installer sar archive containing the installer, path relative from the `netweaver_inst_media` mounted point" type = string + default = "" +} + +variable "netweaver_swpm_extract_dir" { + description = "Extraction path for Netweaver software SWPM folder, if SWPM sar file is provided" + type = string + default = "/sapmedia/NW/SWPM" +} + +variable "netweaver_sapexe_folder" { + description = "Software folder where needed sapexe `SAR` executables are stored (sapexe, sapexedb, saphostagent), path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" +} + +variable "netweaver_additional_dvds" { + description = "Software folder with additional SAP software needed to install netweaver (NW export folder and HANA HDB client for example), path relative from the `netweaver_inst_media` mounted point" + type = list + default = [] } variable "netweaver_nfs_share" { @@ -101,6 +148,7 @@ variable "provisioner" { variable "background" { description = "Run the provisioner execution in background if set to true finishing terraform execution" + type = bool default = false } @@ -108,14 +156,22 @@ variable "background" { variable "monitoring_enabled" { description = "enable the host to be monitored by exporters, e.g node_exporter" + type = bool default = false } // Provider-specific variables -variable "base_image_id" { - description = "base image id which the module will use. You can create a baseimage and module will use it. Created in main.tf" +variable "source_image" { + description = "Source image used to boot the machines (qcow2 format). It's possible to specify the path to a local (relative to the machine running the terraform command) image or a remote one. Remote images have to be specified using HTTP(S) urls for now." type = string + default = "" +} + +variable "volume_name" { + description = "Already existing volume name used to boot the machines. It must be in the same storage pool. It's only used if source_image is not provided" + type = string + default = "" } variable "memory" { @@ -133,8 +189,13 @@ variable "mac" { default = "" } -variable "network_id" { - description = "network id to be injected into domain. normally the isolated network is created in main.tf" +variable "isolated_network_id" { + description = "Network id, internally created by terraform" + type = string +} + +variable "isolated_network_name" { + description = "Network name to attach the isolated network interface" type = string } @@ -153,7 +214,13 @@ variable "bridge" { default = "" } -variable "pool" { +variable "storage_pool" { description = "libvirt storage pool name for VM disks" default = "default" } + +variable "devel_mode" { + description = "Whether or not to give preference to packages from `ha_sap_deployment_repo`" + type = bool + default = false +} diff --git a/libvirt/outputs.tf b/libvirt/outputs.tf index c3576eb0a..8d9562223 100644 --- a/libvirt/outputs.tf +++ b/libvirt/outputs.tf @@ -9,7 +9,7 @@ output "cluster_nodes_public_ip" { } output "cluster_nodes_name" { - value = module.hana_node.output_data.hostname + value = module.hana_node.output_data.name } output "cluster_nodes_public_name" { @@ -25,7 +25,7 @@ output "drbd_public_ip" { } output "drbd_name" { - value = module.drbd_node.output_data.hostname + value = module.drbd_node.output_data.name } output "drbd_public_name" { @@ -41,7 +41,7 @@ output "iscsisrv_public_ip" { } output "iscsisrv_name" { - value = module.iscsi_server.output_data.hostname + value = module.iscsi_server.output_data.name } output "iscsisrv_public_name" { @@ -57,7 +57,7 @@ output "monitoring_public_ip" { } output "monitoring_name" { - value = module.monitoring.output_data.hostname + value = module.monitoring.output_data.name } output "monitoring_public_name" { @@ -73,7 +73,7 @@ output "netweaver_nodes_public_ip" { } output "netweaver_nodes_name" { - value = module.netweaver_node.output_data.hostname + value = module.netweaver_node.output_data.name } output "netweaver_nodes_public_name" { diff --git a/libvirt/terraform.tfvars.example b/libvirt/terraform.tfvars.example index 6cd2c38d6..6c0322afa 100644 --- a/libvirt/terraform.tfvars.example +++ b/libvirt/terraform.tfvars.example @@ -1,18 +1,51 @@ +# qemu uri, this example is to run locally qemu_uri = "qemu:///system" -hana_inst_media = "url-to-your-nfs-share" -base_image = "url-to-your-sles4sap-image" + +# Use already existing network +#network_name = "my-network" +# Due to some internal limitations, the iprange of the existing network must be defined +# The iprange must be defined for already existing network and to create a new one iprange = "192.168.XXX.Y/24" -host_ips = ["192.168.XXX.Y", "192.168.XXX.Y+1"] + +# Base image configuration. This images will be used for all deployed machines unless the specific image is defined +# The source image has preference over the `volume_name` parameter +#source_image = "url-to-your-sles4sap-image" +# Use an already existing image. The image must be in the same storage pool defined in `storage_pool` parameter +# This option is way faster as the image must not be downloaded +#volume_name = "SLES4SAP-15_SP1" + +# Set specific image for hana (it's the same for iscsi, monitoring, netweaver and drbd) +# This option has preference over base image options +# hana_source_image = "url-to-your-sles4sap-image" +# hana_volume_name = "SLES4SAP-15_SP0" + +hana_inst_media = "url-to-your-nfs-share:/sapdata/sap_inst_media/51053381" +# Or you can combine the `hana_inst_media` with `hana_platform_folder` variable. +#hana_inst_media = "url-to-your-nfs-share:/sapdata/sap_inst_media" +# Specify the path to already extracted HANA platform installation media, relative to hana_inst_media mounting point. +# This will have preference over hdbserver sar archive installation media +#hana_platform_folder = "51053381" + +# Or specify the path to the sapcar executable & HANA database server installation sar archive, relative to the 'hana_inst_media' mounting point +# The sar archive will be extracted to path specified at hdbserver_extract_dir (optional, by default /sapmedia/HANA) +# Make sure to use the latest/compatible version of sapcar executable, otherwise file may be extracted incorrectly +hana_sapcar_exe = "SAPCAR" +hdbserver_sar = "IMDB_SERVER.SAR" +hana_extract_dir = "/sapmedia/HDBSERVER" +#hana_ips = ["192.168.XXX.Y", "192.168.XXX.Y+1"] # Shared storage type information shared_storage_type = "iscsi" -iscsi_srv_ip = "192.168.XXX.Y+6" -iscsi_image = "url-to-your-sles4sap-image" # sles15 or above +#iscsi_srv_ip = "192.168.XXX.Y+6" # Repository url used to install HA/SAP deployment packages" # The latest RPM packages can be found at: # https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/{YOUR OS VERSION} # Contains the salt formulas rpm packages. +# To auto detect the SLE version +#ha_sap_deployment_repo = "http://download.opensuse.org/repositories/network:/ha-clustering:/Factory/" +# Specific SLE version used in all the created machines +#ha_sap_deployment_repo = "http://download.opensuse.org/repositories/network:/ha-clustering:/Factory/SLE_15/" ha_sap_deployment_repo = "" # Whether or not to install HA/SAP packages from ha_sap_deployment_repo @@ -41,25 +74,37 @@ devel_mode = false # Monitoring variables -# Custom sles4sap image for the monitoring server. If not used the same image than the hana nodes will be used -#monitoring_image = "url-to-your-sles4sap-image" - # Enable the host to be monitored by exporters #monitoring_enabled = true # IP address of the machine where prometheus and grafana are running -monitoring_srv_ip = "192.168.XXX.Y+7" +#monitoring_srv_ip = "192.168.XXX.Y+7" # Netweaver variables # NFS share with netweaver installation folders #netweaver_inst_media = "url-to-your-nfs-share" +# Netweaver installation required folders +# SAP SWPM installation folder, relative to the netweaver_inst_media mounting point +#netweaver_swpm_folder = "your_swpm" +# Or specify the path to the sapcar executable & SWPM installer sar archive, relative to the netweaver_inst_media mounting point +# The sar archive will be extracted to path specified at netweaver_swpm_extract_dir (optional, by default /sapmedia/NW/SWPM) +#netweaver_sapcar_exe = "your_sapcar_exe_file_path" +#netweaver_swpm_sar = "your_swpm_sar_file_path" +#netweaver_swpm_extract_dir = "location_to_extract_swpm_sar_absolute_path" +# Folder where needed SAR executables (sapexe, sapdbexe) are stored, relative to the netweaver_inst_media mounting point +#netweaver_sapexe_folder = "kernel_nw75_sar" +# Additional folders (added in start_dir.cd), relative to the netweaver_inst_media mounting point +#netweaver_additional_dvds = ["dvd1", "dvd2"] + # NFS share to store the Netweaver shared files #netweaver_nfs_share = "url-to-your-netweaver-sapmnt-nfs-share" # IP addresses of the machines hosting Netweaver instances -#nw_ips = ["192.168.XXX.Y+2", "192.168.XXX.Y+3", "192.168.XXX.Y+4", "192.168.XXX.Y+5"] +#netweaver_ips = ["192.168.XXX.Y+2", "192.168.XXX.Y+3", "192.168.XXX.Y+4", "192.168.XXX.Y+5"] +#netweaver_virtual_ips = ["192.168.XXX.Y+6", "192.168.XXX.Y+7", "192.168.XXX.Y+8", "192.168.XXX.Y+9"] + # Enable/disable Netweaver deployment #netweaver_enabled = true @@ -89,3 +134,8 @@ monitoring_srv_ip = "192.168.XXX.Y+7" # qa_mode must be set to true for executing hwcct # true or false (default) #hwcct = false + +# Pre deployment + +# Enable all some pre deployment steps (disabled by default) +#pre_deployment = true diff --git a/libvirt/variables.tf b/libvirt/variables.tf index 7a231ff96..9b6eab83c 100644 --- a/libvirt/variables.tf +++ b/libvirt/variables.tf @@ -1,21 +1,26 @@ +# +# Libvirt related variables +# variable "qemu_uri" { description = "URI to connect with the qemu-service." default = "qemu:///system" } -variable "base_image" { - description = "Image of the sap hana nodes" - type = string -} - variable "storage_pool" { description = "libvirt storage pool name for VM disks" + type = string default = "default" } +variable "network_name" { + description = "Already existing virtual network name. If it's not provided a new one will be created" + type = string + default = "" +} + variable "iprange" { - description = "IP range of the isolated network" - default = "192.168.106.0/24" + description = "IP range of the isolated network (it must be provided even when the network_name is given, due to terraform-libvirt-provider limitations we cannot get the current network data)" + type = string } variable "isolated_network_bridge" { @@ -24,175 +29,436 @@ variable "isolated_network_bridge" { default = "" } +variable "source_image" { + description = "Source image used to boot the machines (qcow2 format). It's possible to specify the path to a local (relative to the machine running the terraform command) image or a remote one. Remote images have to be specified using HTTP(S) urls for now. Specific node images have preference over this value" + type = string + default = "" +} + +variable "volume_name" { + description = "Already existing volume name used to boot the machines. It must be in the same storage pool. It's only used if source_image is not provided. Specific node images have preference over this value" + type = string + default = "" +} + +# Deployment variables +# +variable "reg_code" { + description = "If informed, register the product using SUSEConnect" + default = "" +} + +variable "reg_email" { + description = "Email used for the registration" + default = "" +} + +# The module format must follow SUSEConnect convention: +# // +# Example: Suggested modules for SLES for SAP 15 +# - sle-module-basesystem/15/x86_64 +# - sle-module-desktop-applications/15/x86_64 +# - sle-module-server-applications/15/x86_64 +# - sle-ha/15/x86_64 (Need the same regcode as SLES for SAP) +# - sle-module-sap-applications/15/x86_64 +variable "reg_additional_modules" { + description = "Map of the modules to be registered. Module name = Regcode, when needed." + type = map(string) + default = {} +} + +# Repository url used to install HA/SAP deployment packages" +# The latest RPM packages can be found at: +# https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/{YOUR OS VERSION} +# Contains the salt formulas rpm packages. +variable "ha_sap_deployment_repo" { + description = "Repository url used to install HA/SAP deployment packages. If SLE version is not set, the deployment will automatically detect the current OS version" + type = string +} + +variable "devel_mode" { + description = "Increase ha_sap_deployment_repo repository priority to get the packages from this repository instead of SLE official channels" + type = bool + default = false +} + +variable "provisioner" { + description = "Used provisioner option. Available options: salt. Let empty to not use any provisioner" + default = "salt" +} + +variable "background" { + description = "Run the provisioner execution in background if set to true finishing terraform execution" + type = bool + default = false +} + +# +# Hana related variables + +variable "hana_count" { + description = "Number of hana nodes" + type = number + default = 2 +} + +variable "hana_source_image" { + description = "Source image used to boot the hana machines (qcow2 format). It's possible to specify the path to a local (relative to the machine running the terraform command) image or a remote one. Remote images have to be specified using HTTP(S) urls for now." + type = string + default = "" +} + +variable "hana_volume_name" { + description = "Already existing volume name used to boot the hana machines. It must be in the same storage pool. It's only used if source_image is not provided" + type = string + default = "" +} + +variable "hana_node_vcpu" { + description = "Number of CPUs for the HANA machines" + type = number + default = 4 +} + +variable "hana_node_memory" { + description = "Memory (in MBs) for the HANA machines" + type = number + default = 32678 +} + +variable "hana_node_disk_size" { + description = "Disk size (in bytes) for the HANA machines" + type = number + default = 68719476736 +} + +variable "hana_ips" { + description = "ip addresses to set to the hana nodes" + type = list(string) + default = [] +} + variable "hana_inst_media" { description = "URL of the NFS share where the SAP HANA software installer is stored. This media shall be mounted in `hana_inst_folder`" type = string } variable "hana_inst_folder" { - description = "Folder where SAP HANA installation files are stored" + description = "Folder where SAP HANA installation files are mounted" type = string - default = "/root/hana_inst_media" + default = "/sapmedia/HANA" } -variable "hana_fstype" { - description = "Filesystem type to use for HANA" +variable "hana_platform_folder" { + description = "Path to the hana platform media, relative to the 'hana_inst_media' mounting point" type = string - default = "xfs" + default = "" } -variable "netweaver_inst_media" { - description = "URL of the NFS share where the SAP Netweaver software installer is stored. This media shall be mounted in `/root/netweaver_inst_media`" +variable "hana_sapcar_exe" { + description = "Path to the sapcar executable, relative to the 'hana_inst_media' mounting point" type = string default = "" } -variable "netweaver_nfs_share" { - description = "URL of the NFS share where /sapmnt and /usr/sap/{sid}/SYS will be mounted. This folder must have the sapmnt and usrsapsys folders" +variable "hdbserver_sar" { + description = "Path to the HANA database server installation sar archive, relative to the 'hana_inst_media' mounting point" type = string default = "" } -variable "host_ips" { - description = "IP addresses of the hana nodes" - type = list(string) - default = ["192.168.106.15", "192.168.106.16"] +variable "hana_extract_dir" { + description = "Absolute path to folder where SAP HANA sar archive will be extracted" + type = string + default = "/sapmedia/HANA" } -variable "nw_ips" { - description = "IP addresses of the netweaver nodes" - type = list(string) - default = ["192.168.106.17", "192.168.106.18", "192.168.106.19", "192.168.106.20"] +variable "hana_fstype" { + description = "Filesystem type to use for HANA" + type = string + default = "xfs" } -variable "nw_virtual_ips" { - description = "IP addresses of the netweaver nodes" - type = list(string) - default = ["192.168.106.30", "192.168.106.31", "192.168.106.32", "192.168.106.33"] +variable "hana_cluster_vip" { + description = "IP address used to configure the hana cluster floating IP. It must be in other subnet than the machines!" + type = string + default = "" +} + +variable "scenario_type" { + description = "Deployed scenario type. Available options: performance-optimized, cost-optimized" + default = "performance-optimized" +} + +# +# iSCSI server related variables +# +variable "iscsi_vcpu" { + description = "Number of CPUs for the iSCSI server" + type = number + default = 2 +} + +variable "iscsi_memory" { + description = "Memory size (in MBs) for the iSCSI server" + type = number + default = 4096 } variable "shared_storage_type" { - description = "used shared storage type for fencing (sbd). Available options: iscsi, shared-disk." + description = "Used shared storage type for fencing (sbd). Available options: iscsi, shared-disk." type = string default = "iscsi" } -variable "drbd_shared_storage_type" { - description = "used shared storage type for fencing (sbd) for DRBD cluster. Available options: iscsi, shared-disk." +variable "sbd_disk_size" { + description = "Disk size (in bytes) for the SBD disk" + type = number + default = 10737418240 +} + +variable "iscsi_source_image" { + description = "Source image used to boot the iscsi machines (qcow2 format). It's possible to specify the path to a local (relative to the machine running the terraform command) image or a remote one. Remote images have to be specified using HTTP(S) urls for now." type = string - default = "iscsi" + default = "" } -variable "iscsi_image" { - description = "iscsi server base image (only used if shared_storage_type is iscsi)" +variable "iscsi_volume_name" { + description = "Already existing volume name used to boot the iscsi machines. It must be in the same storage pool. It's only used if iscsi_source_image is not provided" type = string default = "" } variable "iscsi_srv_ip" { - description = "iscsi server address (only used if shared_storage_type is iscsi)" + description = "iSCSI server address (only used if shared_storage_type is iscsi)" type = string - default = "192.168.106.21" + default = "" } variable "iscsi_disks" { - description = "number of partitions attach to iscsi server. 0 means `all`." + description = "Number of partitions attach to iscsi server. 0 means `all`." + type = number default = 0 } +# +# Monitoring related variables +# +variable "monitoring_enabled" { + description = "Enable the host to be monitored by exporters, e.g node_exporter" + type = bool + default = false +} + +variable "monitoring_source_image" { + description = "Source image used to boot the monitoring machines (qcow2 format). It's possible to specify the path to a local (relative to the machine running the terraform command) image or a remote one. Remote images have to be specified using HTTP(S) urls for now." + type = string + default = "" +} + +variable "monitoring_volume_name" { + description = "Already existing volume name used to boot the monitoring machines. It must be in the same storage pool. It's only used if monitoring_source_image is not provided" + type = string + default = "" +} + +variable "monitoring_vcpu" { + description = "Number of CPUs for the monitor machine" + type = number + default = 4 +} + +variable "monitoring_memory" { + description = "Memory (in MBs) for the monitor machine" + type = number + default = 4096 +} + variable "monitoring_srv_ip" { - description = "monitoring server address" + description = "Monitoring server address" type = string - default = "192.168.106.22" + default = "" } -variable "monitoring_image" { - description = "monitoring server base image (if not set, the same image as the hana nodes will be used)" +# +# Netweaver related variables +# +variable "netweaver_enabled" { + description = "Enable SAP Netweaver deployment" + type = bool + default = false +} + +variable "netweaver_source_image" { + description = "Source image used to boot the netweaver machines (qcow2 format). It's possible to specify the path to a local (relative to the machine running the terraform command) image or a remote one. Remote images have to be specified using HTTP(S) urls for now." type = string default = "" } -variable "drbd_ips" { - description = "IP addresses of the drbd nodes" +variable "netweaver_volume_name" { + description = "Already existing volume name used to boot the netweaver machines. It must be in the same storage pool. It's only used if netweaver_source_image is not provided" + type = string + default = "" +} + +variable "netweaver_node_vcpu" { + description = "Number of CPUs for the NetWeaver machines" + type = number + default = 4 +} + +variable "netweaver_node_memory" { + description = "Memory (in MBs) for the NetWeaver machines" + type = number + default = 8192 +} + +variable "netweaver_shared_disk_size" { + description = "Shared disk size (in bytes) for the NetWeaver machines" + type = number + default = 68719476736 +} + +variable "netweaver_ips" { + description = "IP addresses of the netweaver nodes" type = list(string) - default = ["192.168.106.23", "192.168.106.24"] + default = [] } -variable "reg_code" { - description = "If informed, register the product using SUSEConnect" - default = "" +variable "netweaver_virtual_ips" { + description = "IP addresses of the netweaver nodes" + type = list(string) + default = [] } -variable "reg_email" { - description = "Email used for the registration" +variable "netweaver_nfs_share" { + description = "URL of the NFS share where /sapmnt and /usr/sap/{sid}/SYS will be mounted. This folder must have the sapmnt and usrsapsys folders" + type = string default = "" } -# The module format must follow SUSEConnect convention: -# // -# Example: Suggested modules for SLES for SAP 15 -# - sle-module-basesystem/15/x86_64 -# - sle-module-desktop-applications/15/x86_64 -# - sle-module-server-applications/15/x86_64 -# - sle-ha/15/x86_64 (Need the same regcode as SLES for SAP) -# - sle-module-sap-applications/15/x86_64 -variable "reg_additional_modules" { - description = "Map of the modules to be registered. Module name = Regcode, when needed." - type = map(string) - default = {} +variable "netweaver_product_id" { + description = "Netweaver installation product. Even though the module is about Netweaver, it can be used to install other SAP instances like S4/HANA" + type = string + default = "NW750.HDB.ABAPHA" } -# Repository url used to install HA/SAP deployment packages" -# The latest RPM packages can be found at: -# https://download.opensuse.org/repositories/network:/ha-clustering:/Factory/{YOUR OS VERSION} -# Contains the salt formulas rpm packages. -variable "ha_sap_deployment_repo" { - description = "Repository url used to install HA/SAP deployment packages" +variable "netweaver_inst_media" { + description = "URL of the NFS share where the SAP Netweaver software installer is stored. This media shall be mounted in `/sapmedia/NW`" type = string + default = "" } -variable "devel_mode" { - description = "whether or not to install HA/SAP packages from ha_sap_deployment_repo" - default = false +variable "netweaver_swpm_folder" { + description = "Netweaver software SWPM folder, path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" } -variable "scenario_type" { - description = "Deployed scenario type. Available options: performance-optimized, cost-optimized" - default = "performance-optimized" +variable "netweaver_sapcar_exe" { + description = "Path to sapcar executable, relative from the `netweaver_inst_media` mounted point" + type = string + default = "" } -variable "provisioner" { - description = "Used provisioner option. Available options: salt. Let empty to not use any provisioner" - default = "salt" +variable "netweaver_swpm_sar" { + description = "SWPM installer sar archive containing the installer, path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" } -variable "background" { - description = "Run the provisioner execution in background if set to true finishing terraform execution" - default = false +variable "netweaver_swpm_extract_dir" { + description = "Extraction path for Netweaver software SWPM folder, if SWPM sar file is provided" + type = string + default = "/sapmedia/NW/SWPM" } -variable "monitoring_enabled" { - description = "enable the host to be monitored by exporters, e.g node_exporter" - default = false +variable "netweaver_sapexe_folder" { + description = "Software folder where needed sapexe `SAR` executables are stored (sapexe, sapexedb, saphostagent), path relative from the `netweaver_inst_media` mounted point" + type = string + default = "" } -variable "netweaver_enabled" { - description = "enable SAP Netweaver deployment" - default = false +variable "netweaver_additional_dvds" { + description = "Software folder with additional SAP software needed to install netweaver (NW export folder and HANA HDB client for example), path relative from the `netweaver_inst_media` mounted point" + type = list + default = [] } +# +# DRBD related variables +# variable "drbd_enabled" { - description = "enable the DRBD cluster for nfs" + description = "Enable the drbd cluster for nfs" + type = bool default = false } +variable "drbd_source_image" { + description = "Source image used to bot the drbd machines (qcow2 format). It's possible to specify the path to a local (relative to the machine running the terraform command) image or a remote one. Remote images have to be specified using HTTP(S) urls for now." + type = string + default = "" +} + +variable "drbd_volume_name" { + description = "Already existing volume name boot to create the drbd machines. It must be in the same storage pool. It's only used if drbd_source_image is not provided" + type = string + default = "" +} + variable "drbd_count" { - description = "number of DRBD hosts for cluster" + description = "Number of drbd machines to create the cluster" default = 2 } +variable "drbd_node_vcpu" { + description = "Number of CPUs for the DRBD machines" + type = number + default = 1 +} + +variable "drbd_node_memory" { + description = "Memory (in MBs) for the DRBD machines" + type = number + default = 1024 +} + +variable "drbd_disk_size" { + description = "Disk size (in bytes) for the DRBD machines" + type = number + default = 10737418240 +} + +variable "drbd_shared_disk_size" { + description = "Shared disk size (in bytes) for the DRBD machines" + type = number + default = 104857600 +} + +variable "drbd_ips" { + description = "IP addresses of the drbd nodes" + type = list(string) + default = [] +} + +variable "drbd_cluster_vip" { + description = "IP address used to configure the drbd cluster floating IP. It must be in other subnet than the machines!" + type = string + default = "" +} + +variable "drbd_shared_storage_type" { + description = "Used shared storage type for fencing (sbd) for drbd cluster. Available options: iscsi, shared-disk." + type = string + default = "iscsi" +} + +# +# Specific QA variables +# variable "qa_mode" { - description = "define qa mode (Disable extra packages outside images)" + description = "Enable test/qa mode (disable extra packages usage not coming in the image)" + type = bool default = false } @@ -201,3 +467,12 @@ variable "hwcct" { type = bool default = false } + +# +# Pre deployment +# +variable "pre_deployment" { + description = "Enable pre deployment local execution. Only available for clients running Linux" + type = bool + default = false +} diff --git a/pillar_examples/README.md b/pillar_examples/README.md index 36160cd14..f4a602367 100644 --- a/pillar_examples/README.md +++ b/pillar_examples/README.md @@ -1,49 +1,42 @@ # Pillar examples -This folder stores pillar examples to use in the Salt provisioning. - ---- -- [SAP HANA and HA cluster](#sap-hana-and-ha-cluster) -- [DRBD cluster](#drbd-cluster-for-nfs) ---- -## SAP HANA and HA cluster -Depending on the provider used to deploy SAP HANA and the HA cluster, -the required parameters are slightly different, even though most of them -match. - -For user wants to deploy DRBD(NFS) cluster with HANA, -[DRBD cluster](#drbd-cluster-for-nfs) is also necessary to configure. - -Two possibilities here: - - - For a preconfigured environment, you can use pillar files which are in [HANA automatic directory](./automatic/hana). +This folder stores pillar examples to use in the Salt provisioning. +To run an initial deployment without specific customization the usage of the pillar files stored in the `automatic` folder is recommended, as this files are customized with parameters coming from terraform execution. The pillar files stored there are able to deploy a basic functional set of clusters in all of the available cloud providers. - **Could be used for testing purpose and not for production as they have default settings.** +The usage of the pillar files is really simple. Basically, copy the content of the examples directories in the next locations. +- `salt/hana_node/files/pillar/` for HANA configuration. +- `salt/drbd_node/files/pillar/` for DRBD configuration. +- `salt/netweaver_node/files/pillar/` for NETWEAVER configuration. - From git top-level folder, copy files: +The next commands can be used for that: - `cp pillar_examples/automatic/hana/*.sls salt/hana_node/files/pillar/` +``` +cp pillar_examples/automatic/hana/*.sls salt/hana_node/files/pillar/ +cp pillar_examples/automatic/drbd/*.sls salt/drbd_node/files/pillar/ +cp pillar_examples/automatic/netweaver/*.sls salt/netweaver_node/files/pillar/ +``` - - For a customize and production environment, you must use pillar files which are in your choosen [provider directory](../pillar_examples) (AWS, Azure, GCP, Libvirt). +Besides this option, the `terraform.tfvars` `pre_deployment` variable will execute these operations if it's enabled before the deployment. - From git top-level folder, copy files: +**`pre_deployment` usage only works in clients using Linux** - `cp pillar_examples/$PROVIDER/*.sls salt/hana_node/files/pillar` +For more advanced options, continue reading. - Please, **pay attention:** different from the previous case (preconfigured environment or automatic), the pillars must be customized, otherwise deployment will fail. +--- +- [SAP HANA and HA cluster](#sap-hana-and-ha-cluster) +- [DRBD cluster](#drbd-cluster-for-nfs) +- [SAP NETWEAVER and HA cluster](#sap-netweaver-and-ha-cluster) -All the information about how to tune the deployment is available in: -- https://github.com/SUSE/saphanabootstrap-formula (to manipulate the hana.sls file) -- https://github.com/SUSE/habootstrap-formula (to manipulate the cluster.sls file) +--- +# Advanced pillar configuration -### Libvirt specifics +The salt execution formulas are implemented in different projects. You can find all of the pillar options in each of them. -One thing is different with Libvirt provider, in pillar's directory, you will find two directories about HANA profiles (cost_optimized and performance_optimized). -Choose one according to your needs. +- https://github.com/SUSE/saphanabootstrap-formula (HANA configuration) +- https://github.com/SUSE/habootstrap-formula (HA cluster configuration) +- https://github.com/SUSE/drbd-formula (DRBD configuration) +- https://github.com/SUSE/sapnwbootstrap-formula (NETWEAVER or S4/HANA configuration) -Finally, if instead of deploying SAP HANA and the cluster together, to only -deploy one of them update the salt/hana_node/files/salt/top.sls file only using -the desired component and removing/commenting the other. # Pillar encryption @@ -59,20 +52,3 @@ SaltStack GPG renderer provides a secure encryption/decryption of pillar data. T - Only passwordless gpg keys are supported, and the already existing keys cannot be used. - If a masterless approach is used (as in the current automated deployment) the gpg private key must be imported in all the nodes. This might require the copy/paste of the keys. - -## DRBD cluster (for NFS) -Depending on the provider used to deploy DRBD cluster for NFS, -the required parameters are slightly different, even though most -of them match. - - - For a preconfigured environment, you can use pillar files which are in [DRBD automatic directory](./automatic/drbd) - - **Could be used for testing purpose and not for production as they have default settings.** - - From git top-level folder, copy files: - - `cp pillar_examples/automatic/drbd/*.sls salt/drbd_node/files/pillar/` - -All the information about how to tune the deployment is available in: -- https://github.com/SUSE/drbd-formula (to manipulate the drbd.sls file) -- https://github.com/SUSE/habootstrap-formula (to manipulate the cluster.sls file) diff --git a/pillar_examples/automatic/drbd/cluster.sls b/pillar_examples/automatic/drbd/cluster.sls index 377732c51..0cd434748 100644 --- a/pillar_examples/automatic/drbd/cluster.sls +++ b/pillar_examples/automatic/drbd/cluster.sls @@ -1,3 +1,9 @@ +{% if not grains.get('sbd_disk_device') %} +{% set sbd_disk_device = salt['cmd.run']('lsscsi | grep "LIO-ORG" | awk "{ if (NR=='~grains['sbd_disk_index']~') print \$NF }"', python_shell=true) %} +{% else %} +{% set sbd_disk_device = grains['sbd_disk_device'] %} +{% endif %} + cluster: install_packages: true name: 'drbd_cluster' @@ -14,24 +20,37 @@ cluster: module: softdog device: /dev/watchdog sbd: - device: {{ grains['sbd_disk_device'] }} + device: {{ sbd_disk_device }} ntp: pool.ntp.org {% if grains['provider'] == 'libvirt' %} sshkeys: overwrite: true password: linux {% endif %} - {% if grains.get('monitoring_enabled', False) %} - ha_exporter: true - {% else %} - ha_exporter: false + {% if grains['provider'] == 'azure' %} + corosync: + totem: + token: 30000 + token_retransmits_before_loss_const: 10 + join: 60 + consensus: 36000 + max_messages: 20 {% endif %} - + monitoring_enabled: {{ grains['monitoring_enabled']|default(False) }} configure: method: 'update' template: source: /srv/salt/drbd_files/templates/drbd_cluster.j2 parameters: - {% if grains['provider']== "azure" %} + {% if grains['provider'] == 'aws' %} + virtual_ip: {{ grains['drbd_cluster_vip'] }} + route_table: {{ grains['route_table'] }} + cluster_profile: {{ grains['aws_cluster_profile'] }} + instance_tag: {{ grains['aws_instance_tag'] }} + {% elif grains['provider']== "azure" %} probe: 61000 + {% elif grains['provider'] == 'gcp' %} + virtual_ip: {{ grains['drbd_cluster_vip'] }} + vpc_network_name: {{ grains['vpc_network_name'] }} + route_table: {{ grains['route_table'] }} {% endif %} diff --git a/pillar_examples/automatic/drbd/drbd.sls b/pillar_examples/automatic/drbd/drbd.sls index 8782517c5..1661af485 100644 --- a/pillar_examples/automatic/drbd/drbd.sls +++ b/pillar_examples/automatic/drbd/drbd.sls @@ -1,3 +1,5 @@ +{% set drbd_disk_device = salt['cmd.run']('realpath '~grains['drbd_disk_device']) %} + drbd: promotion: {{ grains['name_prefix'] }}01 @@ -62,15 +64,11 @@ drbd: resource: - name: "sapdata" device: "/dev/drbd1" - disk: {{ grains['drbd_disk_device'] }}1 + disk: {{ drbd_disk_device }}1 file_system: "xfs" mount_point: "/mnt/sapdata/HA1" - {%- if grains['provider'] == 'gcp' %} virtual_ip: {{ grains['drbd_cluster_vip'] }} - {%- else %} - virtual_ip: {{ ".".join(grains['host_ip'].split('.')[0:-1]) }}.201 - {%- endif %} nodes: - name: {{ grains['name_prefix'] }}01 diff --git a/pillar_examples/automatic/hana/cluster.sls b/pillar_examples/automatic/hana/cluster.sls index 801d19b99..1029bdebe 100644 --- a/pillar_examples/automatic/hana/cluster.sls +++ b/pillar_examples/automatic/hana/cluster.sls @@ -1,4 +1,9 @@ {% import_yaml "/root/salt/hana_node/files/pillar/hana.sls" as hana %} +{% if not grains.get('sbd_disk_device') %} +{% set sbd_disk_device = salt['cmd.run']('lsscsi | grep "LIO-ORG" | awk "{ if (NR=='~grains['sbd_disk_index']~') print \$NF }"', python_shell=true) %} +{% else %} +{% set sbd_disk_device = grains['sbd_disk_device'] %} +{% endif %} cluster: {% if grains.get('qa_mode') %} @@ -12,11 +17,12 @@ cluster: interface: eth0 unicast: True {% endif %} + join_timeout: 180 watchdog: module: softdog device: /dev/watchdog sbd: - device: {{ grains['sbd_disk_device'] }} + device: {{ sbd_disk_device }} ntp: pool.ntp.org {% if grains['provider'] == 'libvirt' %} sshkeys: @@ -25,11 +31,16 @@ cluster: {% endif %} resource_agents: - SAPHanaSR - {% if grains.get('monitoring_enabled', False) %} - ha_exporter: true - {% else %} - ha_exporter: false + {% if grains['provider'] == 'azure' %} + corosync: + totem: + token: 30000 + token_retransmits_before_loss_const: 10 + join: 60 + consensus: 36000 + max_messages: 20 {% endif %} + monitoring_enabled: {{ grains['monitoring_enabled']|default(False) }} {% if grains['init_type']|default('all') != 'skip-hana' %} configure: method: update @@ -38,18 +49,15 @@ cluster: parameters: sid: {{ hana.hana.nodes[0].sid }} instance: {{ hana.hana.nodes[0].instance }} - {% if grains['provider'] == 'azure' %} - virtual_ip: {{ grains['azure_lb_ip'] }} - {% elif grains['provider'] == 'gcp' %} - virtual_ip: {{ grains['hana_cluster_vip'] }} - {% elif grains['provider'] == 'aws' %} - virtual_ip: {{ grains['hana_cluster_vip'] }} + {% if grains['provider'] == 'aws' %} route_table: {{ grains['route_table'] }} cluster_profile: {{ grains['aws_cluster_profile'] }} instance_tag: {{ grains['aws_instance_tag'] }} - {% else %} - virtual_ip: {{ ".".join(grains['host_ips'][0].split('.')[0:-1]) }}.200 + {% elif grains['provider'] == 'gcp' %} + route_table: {{ grains['route_table'] }} + vpc_network_name: {{ grains['vpc_network_name'] }} {% endif %} + virtual_ip: {{ grains['hana_cluster_vip'] }} virtual_ip_mask: 24 {% if grains['scenario_type'] == 'cost-optimized' %} prefer_takeover: false diff --git a/pillar_examples/automatic/hana/hana.sls b/pillar_examples/automatic/hana/hana.sls index abde2f02a..9cbf9da04 100644 --- a/pillar_examples/automatic/hana/hana.sls +++ b/pillar_examples/automatic/hana/hana.sls @@ -2,14 +2,25 @@ hana: {% if grains.get('qa_mode') %} install_packages: false {% endif %} + {%- if grains.get('hana_platform_folder', False) %} + software_path: {{ grains['hana_inst_folder'] }}/{{ grains['hana_platform_folder'] }} + {%- elif grains.get('hana_sapcar_exe', False) and grains.get('hdbserver_sar', False) %} + sapcar_exe_file: {{ grains['hana_inst_folder'] }}/{{ grains['hana_sapcar_exe'] }} + hdbserver_sar_file: {{ grains['hana_inst_folder'] }}/{{ grains['hdbserver_sar'] }} + {%- else %} + software_path: {{ grains['hana_inst_folder'] }} + {%- endif %} + {%- if grains.get('hana_extract_dir', False) %} + hdbserver_extract_dir: {{ grains['hana_extract_dir'] }} + {%- endif %} saptune_solution: 'HANA' + monitoring_enabled: {{ grains['monitoring_enabled']|default(False) }} nodes: - host: {{ grains['name_prefix'] }}01 sid: prd instance: "00" password: YourPassword1234 install: - software_path: {{ grains['hana_inst_folder'] }} root_user: root {% if grains['provider'] == 'libvirt' %} root_password: linux @@ -48,7 +59,6 @@ hana: preload_column_tables: False {% endif %} install: - software_path: {{ grains['hana_inst_folder'] }} root_user: root {% if grains['provider'] == 'libvirt' %} root_password: linux @@ -74,7 +84,6 @@ hana: global_allocation_limit: '28600' preload_column_tables: False install: - software_path: {{ grains['hana_inst_folder'] }} root_user: root {% if grains['provider'] == 'libvirt' %} root_password: linux diff --git a/salt/netweaver_node/files/pillar/cluster.sls b/pillar_examples/automatic/netweaver/cluster.sls similarity index 69% rename from salt/netweaver_node/files/pillar/cluster.sls rename to pillar_examples/automatic/netweaver/cluster.sls index 0a562ba0f..c76e633f6 100644 --- a/salt/netweaver_node/files/pillar/cluster.sls +++ b/pillar_examples/automatic/netweaver/cluster.sls @@ -1,5 +1,9 @@ {%- import_yaml "/root/salt/netweaver_node/files/pillar/netweaver.sls" as netweaver %} -{%- set iprange = ".".join(grains['host_ips'][0].split('.')[0:-1]) %} +{%- if not grains.get('sbd_disk_device') %} +{%- set sbd_disk_device = salt['cmd.run']('lsscsi | grep "LIO-ORG" | awk "{ if (NR=='~grains['sbd_disk_index']~') print \$NF }"', python_shell=true) %} +{%- else %} +{%- set sbd_disk_device = grains['sbd_disk_device'] %} +{%- endif %} cluster: install_packages: true @@ -15,7 +19,7 @@ cluster: module: softdog device: /dev/watchdog sbd: - device: {{ grains['sbd_disk_device'] }} + device: {{ sbd_disk_device }} join_timeout: 180 wait_for_initialization: 20 ntp: pool.ntp.org @@ -24,14 +28,16 @@ cluster: overwrite: true password: linux {%- endif %} - {%- for node in netweaver.netweaver.nodes if node.host == grains['host'] %} - {%- if grains.get('monitoring_enabled', False) and node.sap_instance in ['ascs', 'ers'] %} - ha_exporter: true - {%- else %} - ha_exporter: false - {%- endif %} - {%- endfor %} - + {% if grains['provider'] == 'azure' %} + corosync: + totem: + token: 30000 + token_retransmits_before_loss_const: 10 + join: 60 + consensus: 36000 + max_messages: 20 + {% endif %} + monitoring_enabled: {{ grains['monitoring_enabled']|default(False) }} configure: method: update template: @@ -59,4 +65,8 @@ cluster: route_table: {{ grains['route_table'] }} cluster_profile: {{ grains['aws_cluster_profile'] }} instance_tag: {{ grains['aws_instance_tag'] }} + {%- elif grains['provider'] == 'gcp' %} + ascs_route_name: {{ grains['ascs_route_name'] }} + ers_route_name: {{ grains['ers_route_name'] }} + vpc_network_name: {{ grains['vpc_network_name'] }} {%- endif %} diff --git a/salt/netweaver_node/files/pillar/netweaver.sls b/pillar_examples/automatic/netweaver/netweaver.sls similarity index 74% rename from salt/netweaver_node/files/pillar/netweaver.sls rename to pillar_examples/automatic/netweaver/netweaver.sls index e03942ab2..3ab37b6c2 100644 --- a/salt/netweaver_node/files/pillar/netweaver.sls +++ b/pillar_examples/automatic/netweaver/netweaver.sls @@ -24,24 +24,31 @@ netweaver: sap_adm_password: SuSE1234 master_password: SuSE1234 sapmnt_inst_media: "{{ grains['netweaver_nfs_share'] }}" - swpm_folder: /netweaver_inst_media/SWPM_10_SP26_6 - sapexe_folder: /netweaver_inst_media/kernel_nw75_sar - additional_dvds: - - /netweaver_inst_media/51050829_3 # NW Export folder - - /netweaver_inst_media/51053381 # HANA HDB Client folder + {%- if grains.get('netweaver_swpm_folder', False) %} + swpm_folder: /sapmedia/NW/{{ grains['netweaver_swpm_folder'] }} + {%- endif %} + {%- if grains.get('netweaver_sapcar_exe', False) and grains.get('netweaver_swpm_sar', False) %} + sapcar_exe_file: /sapmedia/NW/{{ grains['netweaver_sapcar_exe'] }} + swpm_sar_file: /sapmedia/NW/{{ grains['netweaver_swpm_sar'] }} + {%- endif %} + {%- if grains.get('netweaver_swpm_extract_dir', False) %} + swpm_extract_dir: {{ grains['netweaver_swpm_extract_dir'] }} + {%- endif %} + sapexe_folder: /sapmedia/NW/{{ grains['netweaver_sapexe_folder'] }} + additional_dvds: {%- if not grains['netweaver_additional_dvds'] %} [] + {%- else %} + {%- for dvd in grains['netweaver_additional_dvds'] %} + - /sapmedia/NW/{{ dvd }} + {%- endfor %} + {%- endif %} # apply by default the netweaver solution saptune_solution: 'NETWEAVER' + monitoring_enabled: {{ grains['monitoring_enabled']|default(False) }} + hana: -# We have to unify the usage of this parameter, the aws option looks better -{%- if grains['provider'] == 'gcp' %} - host: {{ grains['hana_cluster_vip'] }} -{%- elif grains['provider'] == 'aws' %} host: {{ grains['hana_ip'] }} -{%- else %} - host: {{ iprange }}.200 -{%- endif %} sid: PRD instance: '00' password: YourPassword1234 @@ -50,7 +57,7 @@ netweaver: name: SAPABAP1 password: SuSE1234 - product_id: NW750.HDB.ABAPHA + product_id: {{ grains['netweaver_product_id'] }} {%- if grains['provider'] == 'aws' %} nfs_options: rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 @@ -108,6 +115,9 @@ netweaver: root_user: root root_password: linux sap_instance: pas + # Add for S4/HANA + #extra_parameters: + # NW_liveCache.useLiveCache: "false" - host: {{ grains['name_prefix'] }}04 virtual_host: sapha1aas @@ -118,3 +128,5 @@ netweaver: root_user: root root_password: linux sap_instance: aas + # Add for S4/HANA + #attempts: 500 diff --git a/pillar_examples/aws/hana.sls b/pillar_examples/aws/hana.sls index 0faccbdda..809f90eaa 100644 --- a/pillar_examples/aws/hana.sls +++ b/pillar_examples/aws/hana.sls @@ -7,7 +7,7 @@ hana: instance: "00" password: 'SET YOUR PASSWORD' install: - software_path: '/root/hana_inst_media/' + software_path: '/sapmedia/HANA' root_user: 'root' root_password: '' system_user_password: 'SET YOUR PASSWORD' @@ -30,7 +30,7 @@ hana: instance: "00" password: 'SET YOUR PASSWORD' install: - software_path: '/root/hana_inst_media/' + software_path: '/sapmedia/HANA' root_user: 'root' root_password: '' system_user_password: 'SET YOUR PASSWORD' diff --git a/pillar_examples/azure/hana.sls b/pillar_examples/azure/hana.sls index 64498c88c..d947ac21b 100644 --- a/pillar_examples/azure/hana.sls +++ b/pillar_examples/azure/hana.sls @@ -6,7 +6,7 @@ hana: instance: "00" password: 'SET YOUR PASSWORD' install: - software_path: '/root/hana_inst_media' + software_path: '/sapmedia/HANA' root_user: 'root' root_password: '' system_user_password: 'SET YOUR PASSWORD' @@ -29,7 +29,7 @@ hana: instance: "00" password: 'SET YOUR PASSWORD' install: - software_path: '/root/hana_inst_media' + software_path: '/sapmedia/HANA' root_user: 'root' root_password: '' system_user_password: 'SET YOUR PASSWORD' diff --git a/pillar_examples/libvirt/cost_optimized/hana.sls b/pillar_examples/libvirt/cost_optimized/hana.sls index 81ba9da60..b53eb947d 100644 --- a/pillar_examples/libvirt/cost_optimized/hana.sls +++ b/pillar_examples/libvirt/cost_optimized/hana.sls @@ -6,7 +6,7 @@ hana: instance: "00" password: 'SET YOUR PASSWORD' install: - software_path: '/root/hana_inst_media' + software_path: '/sapmedia/HANA' root_user: 'root' root_password: 'linux' system_user_password: 'SET YOUR PASSWORD' @@ -33,7 +33,7 @@ hana: global_allocation_limit: '32100' preload_column_tables: False install: - software_path: '/root/hana_inst_media' + software_path: '/sapmedia/HANA' root_user: 'root' root_password: 'linux' system_user_password: 'SET YOUR PASSWORD' @@ -54,7 +54,7 @@ hana: global_allocation_limit: '28600' preload_column_tables: False install: - software_path: '/root/hana_inst_media' + software_path: '/sapmedia/HANA' root_user: 'root' root_password: 'linux' system_user_password: 'SET YOUR PASSWORD' diff --git a/pillar_examples/libvirt/performance_optimized/hana.sls b/pillar_examples/libvirt/performance_optimized/hana.sls index fd04bfa8c..bdea867da 100644 --- a/pillar_examples/libvirt/performance_optimized/hana.sls +++ b/pillar_examples/libvirt/performance_optimized/hana.sls @@ -6,7 +6,7 @@ hana: instance: "00" password: 'SET YOUR PASSWORD' install: - software_path: '/root/hana_inst_media' + software_path: '/sapmedia/HANA' root_user: 'root' root_password: 'linux' system_user_password: 'SET YOUR PASSWORD' @@ -29,7 +29,7 @@ hana: instance: "00" password: 'SET YOUR PASSWORD' install: - software_path: '/root/hana_inst_media' + software_path: '/sapmedia/HANA' root_user: 'root' root_password: 'linux' system_user_password: 'SET YOUR PASSWORD' diff --git a/salt/cluster_node/iscsi_initiator.sls b/salt/cluster_node/iscsi_initiator.sls index f29e4201f..02302d16d 100644 --- a/salt/cluster_node/iscsi_initiator.sls +++ b/salt/cluster_node/iscsi_initiator.sls @@ -4,6 +4,13 @@ open-iscsi: attempts: 3 interval: 15 +# lsscsi is used to retrieve the sbd disk in the automatic pillar files +lsscsi: + pkg.installed: + - retry: + attempts: 3 + interval: 15 + /etc/iscsi/initiatorname.iscsi: file.replace: - pattern: "^InitiatorName=.*" @@ -14,6 +21,12 @@ open-iscsi: - pattern: "^node.startup = manual" - repl: "node.startup = automatic" +iscsi-queue-depth: + file.replace: + - name: "/etc/iscsi/iscsid.conf" + - pattern: "^node.session.queue_depth = [0-9]*" + - repl: "node.session.queue_depth = 64" + iscsi: service.running: - enable: True diff --git a/salt/default/pkgs.sls b/salt/default/pkgs.sls index 1999de67f..40f370fd0 100644 --- a/salt/default/pkgs.sls +++ b/salt/default/pkgs.sls @@ -5,6 +5,4 @@ install_additional_packages: {% for package in grains['additional_packages'] %} - {{ package }} {% endfor %} - - require: - - sls: default.repos {% endif %} diff --git a/salt/deployment.sh b/salt/deployment.sh deleted file mode 100644 index a7c62feff..000000000 --- a/salt/deployment.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -xe - -# Execute the states within /root/salt/pre_installation -# This first execution is done to configure the salt minion and install the iscsi formula -salt-call --local --file-root=/root/salt \ - --log-level=info \ - --log-file=/tmp/salt-pre-installation.log \ - --log-file-level=debug \ - --retcode-passthrough \ - --force-color state.apply pre_installation || exit 1 - -# Execute the states defined in /root/salt/top.sls -# This execution is done to pre configure the cluster nodes, the support machines and install the formulas -salt-call --local \ - --pillar-root=/root/salt/pillar/ \ - --log-level=info \ - --log-file=/tmp/salt-deployment.log \ - --log-file-level=debug \ - --retcode-passthrough \ - --force-color state.highstate saltenv=predeployment || exit 1 diff --git a/salt/drbd_node/files/templates/drbd_cluster.j2 b/salt/drbd_node/files/templates/drbd_cluster.j2 index 970a5b679..39f75a62b 100644 --- a/salt/drbd_node/files/templates/drbd_cluster.j2 +++ b/salt/drbd_node/files/templates/drbd_cluster.j2 @@ -8,54 +8,90 @@ # rsc_defaults \ - resource-stickiness="1000" \ - migration-threshold="5000" + resource-stickiness="1000" \ + migration-threshold="5000" op_defaults \ - timeout="600" + timeout="600" primitive nfsserver systemd:nfs-server \ - op monitor interval=30s + op monitor interval=30s clone cl-nfsserver nfsserver \ - meta interleave=true + meta interleave=true # Need to edit drbd pillar file. -{% for res in drbd.resource %} +{%- for res in drbd.resource %} primitive drbd-{{ res.name }} ocf:linbit:drbd \ - params drbd_resource={{ res.name }} \ - op monitor interval=15 role=Master \ - op monitor interval=30 role=Slave + params drbd_resource={{ res.name }} \ + op monitor interval=15 role=Master \ + op monitor interval=30 role=Slave ms ms_{{ res.name }} drbd-{{ res.name }} \ - meta master-max=1 master-node-max=1 \ - meta clone-max={{ res.nodes | length }} clone-node-max=1 \ - meta notify=true target-role=Started + meta master-max=1 master-node-max=1 \ + meta clone-max={{ res.nodes | length }} clone-node-max=1 \ + meta notify=true target-role=Started primitive fs_{{ res.name }} ocf:heartbeat:Filesystem \ - params device={{ res.device }} directory={{ res.mount_point }} fstype={{ res.file_system }} \ - options=noatime,nodiratime \ - op monitor interval="20" timeout="40s" + params device={{ res.device }} directory={{ res.mount_point }} fstype={{ res.file_system }} \ + options=noatime,nodiratime \ + op monitor interval="20" timeout="40s" -{% set nic = "nic="~pillar.cluster.interface|default('eth0')|json if cloud_provider == "google-cloud-platform" else "nic="~pillar.cluster.interface|json if pillar.cluster.interface is defined else "" %} -{%- set cidr_netmask = "cidr_netmask=32" if cloud_provider == "google-cloud-platform" else "" %} +{%- if cloud_provider not in ["amazon-web-services", "google-cloud-platform"] %} +{%- set cidr_netmask = "cidr_netmask="~data.virtual_ip_mask|default("32") %} +{%- set nic = "nic="~pillar.cluster.interface|json if pillar.cluster.interface is defined else "" %} primitive vip_{{ res.name }}_nfs IPaddr2 \ - params ip={{ res.virtual_ip }} {{ cidr_netmask }} {{ nic }} \ - op monitor interval=10 timeout=20 + params ip={{ res.virtual_ip }} {{ cidr_netmask }} {{ nic }} \ + op monitor interval=10 timeout=20 -{% if cloud_provider == "microsoft-azure" %} -primitive nc_{{ res.name }}_nfs anything \ - params binfile="/usr/bin/socat" cmdline_options="-U TCP-LISTEN:{{ data.probe }},backlog=10,fork,reuseaddr /dev/null" \ +{%- if cloud_provider == "microsoft-azure" %} +primitive rsc_socat_{{ res.name }}_nfs azure-lb \ + params port={{ data.probe }} \ op monitor timeout=20s interval=10 depth=0 -{% endif %} + +{%- endif %} + +{%- elif cloud_provider == "google-cloud-platform" %} +primitive rsc_gcp_stonith_{{ res.name }}_{{ grains['host'] }} stonith:fence_gce \ + params plug={{ grains['gcp_instance_name'] }} pcmk_host_map="{{ grains['host'] }}:{{ grains['gcp_instance_name'] }}" \ + meta target-role=Started + +primitive vip_{{ res.name }}_nfs ocf:heartbeat:gcp-vpc-move-route \ + params ip={{ data.virtual_ip }} vpc_network={{ data.vpc_network_name }} route_name={{ data.route_table }} \ + op start interval=0 timeout=180 \ + op stop interval=0 timeout=180 \ + op monitor interval=60 timeout=60 + +{%- elif cloud_provider == "amazon-web-services" %} + +property $id="cib-bootstrap-options" \ + stonith-enabled="true" \ + stonith-action="off" \ + stonith-timeout="150s" + +primitive rsc_aws_stonith_{{ res.name }}_nfs stonith:external/ec2 \ + params tag={{ data.instance_tag }} profile={{ data.cluster_profile }} \ + op start interval=0 timeout=180 \ + op stop interval=0 timeout=180 \ + op monitor interval=120 timeout=60 \ + meta target-role=Started + +primitive vip_{{ res.name }}_nfs ocf:suse:aws-vpc-move-ip \ + params ip={{ data.virtual_ip }} routing_table={{ data.route_table }} \ + interface={{ pillar.cluster.interface|default('eth0')|json }} profile={{ data.cluster_profile }} \ + op start interval=0 timeout=180 \ + op stop interval=0 timeout=180 \ + op monitor interval=60 timeout=60 + +{%- endif %} primitive exportfs_work_{{ res.name }} exportfs \ - params directory={{ res.mount_point }} fsid={{ nfsid + loop.index0 }} \ - options="rw,no_root_squash" clientspec="*" wait_for_leasetime_on_stop=true \ - op monitor interval=30s + params directory={{ res.mount_point }} fsid={{ nfsid + loop.index0 }} \ + options="rw,no_root_squash" clientspec="*" wait_for_leasetime_on_stop=true \ + op monitor interval=30s -group g-nfs_{{ res.name }} fs_{{ res.name }} vip_{{ res.name }}_nfs exportfs_work_{{ res.name }} {% if cloud_provider == "microsoft-azure" %} nc_{{ res.name }}_nfs {% endif %} +group g-nfs_{{ res.name }} fs_{{ res.name }} vip_{{ res.name }}_nfs exportfs_work_{{ res.name }} {% if cloud_provider == "microsoft-azure" %} rsc_socat_{{ res.name }}_nfs {% endif %} order o_drbd_{{ res.name }}-before-fs_{{ res.name }} \ ms_{{ res.name }}:promote g-nfs_{{ res.name }}:start @@ -63,4 +99,4 @@ order o_drbd_{{ res.name }}-before-fs_{{ res.name }} \ colocation c_fs_{{ res.name }}-with_drbd_{{ res.name }} \ inf: g-nfs_{{ res.name }} ms_{{ res.name }}:Master\ -{% endfor %} +{%- endfor %} diff --git a/salt/formula.sh b/salt/formula.sh deleted file mode 100644 index 8ecc49c1b..000000000 --- a/salt/formula.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -xe - -salt-call --local \ - --log-level=info \ - --log-file=/tmp/salt-formula.log \ - --log-file-level=debug \ - --retcode-passthrough \ - --force-color state.highstate saltenv=base || exit 1 diff --git a/salt/hana_node/download_hana_inst.sls b/salt/hana_node/download_hana_inst.sls index d0effb7ed..98a9f6d84 100644 --- a/salt/hana_node/download_hana_inst.sls +++ b/salt/hana_node/download_hana_inst.sls @@ -8,13 +8,14 @@ download_files_from_s3: {% elif grains['provider'] == 'gcp' %} +{% set hana_inst_disk_device = salt['cmd.run']('realpath '~grains['hana_inst_disk_device']) %} hana_inst_partition: cmd.run: - name: | - /usr/sbin/parted -s {{ grains['hana_inst_disk_device'] }} mklabel msdos && \ - /usr/sbin/parted -s {{ grains['hana_inst_disk_device'] }} mkpart primary ext2 1M 100% && sleep 1 && \ - /sbin/mkfs -t xfs {{ grains['hana_inst_disk_device'] }}1 - - unless: ls {{ grains['hana_inst_disk_device'] }}1 + /usr/sbin/parted -s {{ hana_inst_disk_device }} mklabel msdos && \ + /usr/sbin/parted -s {{ hana_inst_disk_device }} mkpart primary ext2 1M 100% && sleep 1 && \ + /sbin/mkfs -t xfs {{ hana_inst_disk_device }}1 + - unless: ls {{ hana_inst_disk_device }}1 - require: - pkg: parted @@ -26,7 +27,7 @@ hana_inst_directory: - makedirs: True mount.mounted: - name: {{ grains['hana_inst_folder'] }} - - device: {{ grains['hana_inst_disk_device'] }}1 + - device: {{ hana_inst_disk_device }}1 - fstype: xfs - mkmnt: True - persist: True diff --git a/salt/install-salt-minion.sh b/salt/install-salt-minion.sh deleted file mode 100644 index 0bced8736..000000000 --- a/salt/install-salt-minion.sh +++ /dev/null @@ -1,75 +0,0 @@ -#!/bin/bash -REGISTER="false" -DEREGISTER="false" - -function print_help(){ - cat <<-EOF - -Install the salt-minion - -If registration code is provided it will be installed from the official SUSE Repositories. - -For this, a Registration Code is required and your system will be -connected to the SUSE Customer Center. - -Supported Options: - - -r [REG CODE] Registration Code that will be used to register the system. - -d Deregister the System after the salt-minion installation (only makes sense if -r flag is set) - -h Show this help. - -EOF -} - - - -function install_salt_minion(){ - - # If required, REGISTER - if [[ $REGISTER != "false" ]]; then - # Check SLE version - source /etc/os-release - # Register the system on SCC - SUSEConnect -r "$REGISTER" - - # Register the modules accordingly with the SLE version. - if [[ $VERSION_ID =~ ^12\.? ]]; then - SUSEConnect -p sle-module-adv-systems-management/12/x86_64 - elif [[ $VERSION_ID =~ ^15\.? ]]; then - SUSEConnect -p sle-module-basesystem/$VERSION_ID/x86_64 - else - echo "SLE Product version not supported by this script. Please, use version 12 or higher." - exit 1 - fi - fi - - # We have to force refresh the repos and the keys (keys may change during lifetime of this OS/image) - zypper --non-interactive --gpg-auto-import-keys refresh --force --services - zypper --non-interactive install salt-minion - - # If required, DEREGISTER - if [[ $REGISTER != "false" && $DEREGISTER != "false" ]]; then - SUSEConnect -d - fi -} - - -while getopts ":hdr:" opt; do - case $opt in - h) - print_help - exit 0 - ;; - d) - DEREGISTER="true" - ;; - r) - REGISTER="$OPTARG" - ;; - *) - echo "Invalid option -$OPTARG" >&2 - exit 1 - ;; - esac -done -install_salt_minion diff --git a/salt/monitoring/grafana.sls b/salt/monitoring/grafana.sls new file mode 100644 index 000000000..035644366 --- /dev/null +++ b/salt/monitoring/grafana.sls @@ -0,0 +1,52 @@ +grafana: + pkg.installed: + - name: grafana + - retry: + attempts: 3 + interval: 15 + +grafana_anonymous_login_configuration: + file.blockreplace: + - name: /etc/grafana/grafana.ini + - marker_start: '#################################### Anonymous Auth ######################' + - marker_end: '#################################### Github Auth ##########################' + - content: | + [auth.anonymous] + enabled = true + org_name = Main Org. + org_role = Admin + - require: + - pkg: grafana + +grafana_provisioning: + file.recurse: + - name: /etc/grafana/provisioning + - source: salt://monitoring/grafana/provisioning + - clean: True + - user: grafana + - group: grafana + - require: + - pkg: grafana + +grafana_provisioning_datasources: + file.managed: + - name: /etc/grafana/provisioning/datasources/datasources.yml + - source: salt://monitoring/grafana/datasources.yml.j2 + - template: jinja + - makedirs: True + - user: grafana + - group: grafana + - require: + - pkg: grafana + - file: grafana_provisioning + +grafana_service: + service.running: + - name: grafana-server + - enable: True + - restart: True + - require: + - pkg: grafana + - file: grafana_anonymous_login_configuration + - file: grafana_provisioning + - file: grafana_provisioning_datasources diff --git a/salt/monitoring/provisioning/datasources/prometheus_localhost.yml b/salt/monitoring/grafana/datasources.yml.j2 similarity index 80% rename from salt/monitoring/provisioning/datasources/prometheus_localhost.yml rename to salt/monitoring/grafana/datasources.yml.j2 index 286601a63..0d47c4679 100644 --- a/salt/monitoring/provisioning/datasources/prometheus_localhost.yml +++ b/salt/monitoring/grafana/datasources.yml.j2 @@ -5,7 +5,7 @@ datasources: - name: Prometheus type: prometheus access: proxy - url: http://localhost:9090/ + url: http://{{ grains['public_ip'] }}:9090 basicAuth: False isDefault: True editable: true diff --git a/salt/monitoring/grafana/provisioning/dashboards/dashboards.yaml b/salt/monitoring/grafana/provisioning/dashboards/dashboards.yaml new file mode 100644 index 000000000..eed212bd6 --- /dev/null +++ b/salt/monitoring/grafana/provisioning/dashboards/dashboards.yaml @@ -0,0 +1,10 @@ +apiVersion: 1 + +providers: +- name: SUSE + folder: '' + type: file + allowUiUpdates: true + editable: true + options: + path: /etc/grafana/provisioning diff --git a/salt/monitoring/provisioning/dashboards/cluster-status-real-time-alerts.json b/salt/monitoring/grafana/provisioning/dashboards/ha-cluster-details.json similarity index 59% rename from salt/monitoring/provisioning/dashboards/cluster-status-real-time-alerts.json rename to salt/monitoring/grafana/provisioning/dashboards/ha-cluster-details.json index ad0455546..f840fd0af 100644 --- a/salt/monitoring/provisioning/dashboards/cluster-status-real-time-alerts.json +++ b/salt/monitoring/grafana/provisioning/dashboards/ha-cluster-details.json @@ -19,6 +19,12 @@ "name": "Grafana", "version": "6.3.5" }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, { "type": "datasource", "id": "prometheus", @@ -56,44 +62,88 @@ "gnetId": null, "graphTooltip": 0, "id": null, - "iteration": 1580985999837, + "iteration": 1588173658321, "links": [], "panels": [ { - "collapsed": false, "gridPos": { - "h": 1, - "w": 24, + "h": 5, + "w": 3, "x": 0, "y": 0 }, - "id": 155, - "panels": [], - "title": "Alerts", - "type": "row" + "id": 92, + "options": { + "fieldOptions": { + "calcs": [ + "last" + ], + "defaults": { + "decimals": 0, + "mappings": [], + "max": 1, + "min": 0, + "thresholds": [ + { + "color": "dark-red", + "value": null + }, + { + "color": "#EAB839", + "value": 0.5 + }, + { + "color": "dark-green", + "value": 1 + } + ], + "unit": "percentunit" + }, + "override": {}, + "values": false + }, + "orientation": "auto", + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "6.3.5", + "targets": [ + { + "expr": "count(ha_cluster_pacemaker_nodes{instance=\"$dc_instance\", status=\"online\"} == 1) / count(count(ha_cluster_pacemaker_nodes{instance=\"$dc_instance\"}) by (node))", + "instant": true, + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Online nodes", + "type": "gauge" }, { + "cacheTimeout": null, "columns": [], "fontSize": "100%", "gridPos": { - "h": 3, - "w": 24, - "x": 0, - "y": 1 + "h": 5, + "w": 7, + "x": 3, + "y": 0 }, - "id": 151, + "id": 98, + "links": [], "options": {}, - "pageSize": 6, + "pageSize": null, + "pluginVersion": "6.3.5", "scroll": true, "showHeader": true, "sort": { - "col": 0, - "desc": true + "col": 2, + "desc": false }, "styles": [ { "alias": "Name", - "colorMode": "row", + "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", @@ -102,13 +152,13 @@ "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, - "pattern": "alertname", + "pattern": "node", "thresholds": [], "type": "number", "unit": "short" }, { - "alias": "State", + "alias": "Type", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -118,30 +168,44 @@ "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, - "pattern": "alertstate", + "pattern": "type", "thresholds": [], "type": "string", - "unit": "short", - "valueMaps": [] + "unit": "short" }, { - "alias": "", - "colorMode": null, + "alias": "Online", + "colorMode": "value", "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, + "decimals": 0, + "link": false, "mappingType": 1, - "pattern": "Time", - "thresholds": [], - "type": "date", - "unit": "short" + "pattern": "Value #A", + "sanitize": false, + "thresholds": [ + "0", + "1" + ], + "type": "string", + "unit": "short", + "valueMaps": [ + { + "text": "Yes", + "value": "1" + }, + { + "text": "No", + "value": "0" + } + ] }, { - "alias": "", + "alias": "Ip", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -151,168 +215,125 @@ "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, - "pattern": "__name__", + "pattern": "ip", "thresholds": [], - "type": "hidden", + "type": "string", "unit": "short" }, + { + "alias": "Unclean", + "colorMode": "cell", + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #C", + "thresholds": [ + "1", + "1" + ], + "type": "string", + "unit": "short", + "valueMaps": [ + { + "text": "Yes", + "value": "1" + }, + { + "text": "No", + "value": "0" + } + ] + }, { "alias": "", - "colorMode": "row", + "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, - "mappingType": 1, "pattern": "/.*/", "thresholds": [], - "type": "number", + "type": "hidden", "unit": "short" } ], "targets": [ { - "expr": "ALERTS{job=\"$hacluster\"}", + "expr": "sum(label_replace(label_replace(node_uname_info{job=\"$cluster\"}, 'node', '$1', 'nodename', '(.*)'), 'ip', '$1', 'instance','(.*):.*')) by (node,ip)", "format": "table", + "hide": true, "instant": true, - "intervalFactor": 1, - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "", - "transform": "table", - "type": "table" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 4 - }, - "id": 149, - "panels": [], - "title": "Pacemaker and Corosync", - "type": "row" - }, - { - "cacheTimeout": null, - "gridPos": { - "h": 5, - "w": 9, - "x": 0, - "y": 5 - }, - "id": 104, - "links": [], - "options": { - "displayMode": "gradient", - "fieldOptions": { - "calcs": [ - "last" - ], - "defaults": { - "mappings": [ - { - "from": "", - "id": 1, - "operator": "", - "text": "0", - "to": "", - "type": 1, - "value": "null" - } - ], - "max": 2, - "min": 0, - "thresholds": [ - { - "color": "dark-red", - "value": null - }, - { - "color": "dark-green", - "value": 0 - } - ], - "title": "" - }, - "override": {}, - "values": false - }, - "orientation": "horizontal" - }, - "pluginVersion": "6.3.5", - "targets": [ - { - "expr": "count(ha_cluster_pacemaker_nodes{instance=~\"$node_ip:\\\\d+\", status=\"expected_up\"})", - "instant": true, - "legendFormat": "Expected Up", - "refId": "A" + "refId": "B" }, { - "expr": "count(ha_cluster_pacemaker_nodes{instance=~\"$node_ip:\\\\d+\", status=\"dc\"})", + "expr": "sum(ha_cluster_pacemaker_nodes{status=\"unclean\", instance=\"$dc_instance\"}) by (node)", + "format": "table", + "hide": false, "instant": true, - "legendFormat": "DC", "refId": "C" }, { - "expr": "count(ha_cluster_pacemaker_nodes{instance=~\"$node_ip:\\\\d+\", status=\"online\", type=\"member\"})", - "instant": true, - "legendFormat": "Members", - "refId": "D" - }, - { - "expr": "count(ha_cluster_pacemaker_nodes{instance=~\"$node_ip:\\\\d+\", status=\"online\", type=\"remote\"})", + "expr": "sum(ha_cluster_pacemaker_nodes{status=\"online\", instance=\"$dc_instance\"}) by (node)", + "format": "table", + "hide": false, "instant": true, - "legendFormat": "Remote", - "refId": "E" + "legendFormat": "", + "refId": "A" }, { - "expr": "count(ha_cluster_pacemaker_nodes{instance=~\"$node_ip:\\\\d+\", status=\"online\", type=\"ping\"})", + "expr": "sum(ha_cluster_pacemaker_nodes{instance=\"$dc_instance\"}) by (type)", + "format": "table", + "hide": false, "instant": true, - "legendFormat": "Ping", - "refId": "F" + "refId": "D" } ], "timeFrom": null, "timeShift": null, "title": "Nodes", - "type": "bargauge" + "transform": "table", + "type": "table" }, { "gridPos": { - "h": 8, - "w": 4, - "x": 9, - "y": 5 + "h": 5, + "w": 3, + "x": 10, + "y": 0 }, - "id": 92, + "id": 168, "options": { "fieldOptions": { "calcs": [ - "mean" + "last" ], "defaults": { + "decimals": 0, "mappings": [], - "max": 2, + "max": 1, "min": 0, "thresholds": [ { "color": "dark-red", "value": null }, + { + "color": "#EAB839", + "value": 0.5 + }, { "color": "dark-green", "value": 1 } - ] + ], + "unit": "percentunit" }, "override": {}, "values": false @@ -324,22 +345,22 @@ "pluginVersion": "6.3.5", "targets": [ { - "expr": "count(ha_cluster_pacemaker_nodes{instance=~\"$node_ip:\\\\d+\", status=\"online\"})", + "expr": "count(ha_cluster_pacemaker_resources{instance=\"$dc_instance\", status=\"active\"} == 1) / count(count(ha_cluster_pacemaker_resources{instance=\"$dc_instance\"}) by (resource,node))", "instant": true, "refId": "A" } ], "timeFrom": null, "timeShift": null, - "title": "Online nodes", + "title": "Active Resources", "type": "gauge" }, { "gridPos": { - "h": 8, - "w": 6, + "h": 5, + "w": 7, "x": 13, - "y": 5 + "y": 0 }, "id": 140, "options": { @@ -371,25 +392,25 @@ "pluginVersion": "6.3.5", "targets": [ { - "expr": "ha_cluster_corosync_quorum_votes{instance=~\"$node_ip:\\\\d+\",type=\"expected_votes\"}", + "expr": "ha_cluster_corosync_quorum_votes{instance=\"$dc_instance\",type=\"expected_votes\"}", "instant": true, "legendFormat": "Expected votes", "refId": "A" }, { - "expr": "ha_cluster_corosync_quorum_votes{instance=~\"$node_ip:\\\\d+\",type=\"highest_expected\"}", + "expr": "ha_cluster_corosync_quorum_votes{instance=\"$dc_instance\",type=\"highest_expected\"}", "instant": true, "legendFormat": "Highest expected votes", "refId": "B" }, { - "expr": "ha_cluster_corosync_quorum_votes{instance=~\"$node_ip:\\\\d+\", type=\"total_votes\"}", + "expr": "ha_cluster_corosync_quorum_votes{instance=\"$dc_instance\", type=\"total_votes\"}", "instant": true, "legendFormat": "Total votes", "refId": "C" }, { - "expr": "ha_cluster_corosync_quorum_votes{instance=~\"$node_ip:\\\\d+\", type=\"quorum\"}", + "expr": "ha_cluster_corosync_quorum_votes{instance=\"$dc_instance\", type=\"quorum\"}", "instant": true, "legendFormat": "Quorum", "refId": "D" @@ -405,9 +426,9 @@ "colorBackground": true, "colorValue": false, "colors": [ - "#299c46", - "#37872D", - "#d44a3a" + "#F2495C", + "#F2495C", + "#37872D" ], "format": "none", "gauge": { @@ -418,10 +439,10 @@ "thresholdMarkers": true }, "gridPos": { - "h": 4, - "w": 3, - "x": 19, - "y": 5 + "h": 3, + "w": 2, + "x": 20, + "y": 0 }, "id": 138, "interval": null, @@ -463,12 +484,12 @@ "tableColumn": "", "targets": [ { - "expr": "ha_cluster_corosync_quorate{instance=~\"$node_ip:\\\\d+\"}", + "expr": "ha_cluster_corosync_quorate{instance=\"$dc_instance\"}", "instant": true, "refId": "A" } ], - "thresholds": "0.1", + "thresholds": "0,1", "timeFrom": null, "timeShift": null, "title": "Cluster quorate", @@ -482,28 +503,27 @@ }, { "op": "=", - "text": "YES", + "text": "OK", "value": "1" }, { "op": "=", - "text": "NO", + "text": "KO", "value": "0" } ], - "valueName": "avg" + "valueName": "current" }, { "cacheTimeout": null, - "colorBackground": false, + "colorBackground": true, "colorValue": false, "colors": [ "#299c46", - "rgba(237, 129, 40, 0.89)", + "#37872D", "#d44a3a" ], - "decimals": null, - "format": "dateTimeFromNow", + "format": "none", "gauge": { "maxValue": 100, "minValue": 0, @@ -515,9 +535,9 @@ "h": 3, "w": 2, "x": 22, - "y": 5 + "y": 0 }, - "id": 159, + "id": 136, "interval": null, "links": [], "mappingType": 1, @@ -557,15 +577,15 @@ "tableColumn": "", "targets": [ { - "expr": "ha_cluster_pacemaker_config_last_change{instance=~\"$node_ip:\\\\d+\"} * 1000 - 3600000", - "instant": false, + "expr": "ha_cluster_corosync_ring_errors{instance=\"$dc_instance\"}", + "instant": true, "refId": "A" } ], - "thresholds": "", + "thresholds": "0,1", "timeFrom": null, "timeShift": null, - "title": "Last CIB change", + "title": "Faulty Corosync Rings", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -575,7 +595,7 @@ "value": "null" } ], - "valueName": "current" + "valueName": "avg" }, { "cacheTimeout": null, @@ -586,7 +606,8 @@ "rgba(237, 129, 40, 0.89)", "#d44a3a" ], - "format": "none", + "decimals": null, + "format": "dateTimeFromNow", "gauge": { "maxValue": 100, "minValue": 0, @@ -595,12 +616,12 @@ "thresholdMarkers": true }, "gridPos": { - "h": 3, - "w": 2, - "x": 22, - "y": 8 + "h": 2, + "w": 4, + "x": 20, + "y": 3 }, - "id": 112, + "id": 159, "interval": null, "links": [], "mappingType": 1, @@ -640,7 +661,7 @@ "tableColumn": "", "targets": [ { - "expr": "count(ha_cluster_pacemaker_resources{instance=~\"$node_ip:\\\\d+\", status=\"active\"})", + "expr": "ha_cluster_pacemaker_config_last_change{instance=\"$dc_instance\"} * 1000", "instant": true, "refId": "A" } @@ -648,543 +669,237 @@ "thresholds": "", "timeFrom": null, "timeShift": null, - "title": "Resouces configured", + "title": "Last CIB change", "type": "singlestat", - "valueFontSize": "100%", + "valueFontSize": "50%", "valueMaps": [ { "op": "=", - "text": "0", + "text": "N/A", "value": "null" } ], - "valueName": "avg" - }, - { - "cacheTimeout": null, - "colorBackground": true, - "colorValue": false, - "colors": [ - "#299c46", - "#37872D", - "#d44a3a" - ], - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 4, - "w": 3, - "x": 19, - "y": 9 - }, - "id": 136, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "options": {}, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false, - "ymax": null, - "ymin": null - }, - "tableColumn": "", - "targets": [ - { - "expr": "ha_cluster_corosync_ring_errors{instance=~\"$node_ip:\\\\d+\"}", - "instant": true, - "refId": "A" - } - ], - "thresholds": "0,1", - "timeFrom": null, - "timeShift": null, - "title": "Corosync Ring errors total", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" + "valueName": "current" }, { + "aliasColors": { + "dc": "blue", + "expected_up": "dark-green", + "maintenance": "yellow", + "online": "green", + "pending": "dark-yellow", + "shutdown": "orange", + "standby": "light-yellow", + "standby_onfail": "semi-dark-red", + "unclean": "dark-red" + }, + "bars": false, "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "fill": 4, + "fillGradient": 0, "gridPos": { - "h": 5, - "w": 9, + "h": 6, + "w": 10, "x": 0, - "y": 10 + "y": 5 }, - "id": 98, + "id": 104, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, "links": [], + "nullPointMode": "null as zero", "options": { - "displayMode": "gradient", - "fieldOptions": { - "calcs": [ - "last" - ], - "defaults": { - "mappings": [ - { - "from": "", - "id": 1, - "operator": "", - "text": "0", - "to": "", - "type": 1, - "value": "null" - } - ], - "max": 2, - "min": 0, - "thresholds": [ - { - "color": "green", - "value": null - }, - { - "color": "yellow", - "value": 1 - }, - { - "color": "red", - "value": 2 - } - ] - }, - "override": {}, - "values": false - }, - "orientation": "horizontal" + "dataLinks": [] }, + "percentage": false, "pluginVersion": "6.3.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, "targets": [ { - "expr": "sum(count(ha_cluster_pacemaker_nodes{instance=~\"$node_ip:\\\\d+\", status=\"pending\"}) or up{instance=~\"$node_ip:\\\\d+\"} * 0)", - "hide": false, - "instant": true, - "legendFormat": "Pending", - "refId": "A" - }, - { - "expr": "sum(count(ha_cluster_pacemaker_nodes{instance=~\"$node_ip:\\\\d+\", status=\"standby\"}) or up{instance=~\"$node_ip:\\\\d+\"} * 0)", - "hide": false, - "instant": true, - "legendFormat": "Stand-by", + "expr": "sum(ha_cluster_pacemaker_nodes{instance=\"$dc_instance\"}) by (status)", + "instant": false, + "legendFormat": "{{status}}", "refId": "B" - }, - { - "expr": "sum(count(ha_cluster_pacemaker_nodes{instance=~\"$node_ip:\\\\d+\", status=\"maintenance\"}) or up{instance=~\"$node_ip:\\\\d+\"} * 0)", - "format": "time_series", - "hide": false, - "instant": true, - "legendFormat": "Maintenance", - "refId": "C" - }, - { - "expr": "sum(count(ha_cluster_pacemaker_nodes{instance=~\"$node_ip:\\\\d+\", status=\"unclean\"}) or up{instance=~\"$node_ip:\\\\d+\"} * 0)", - "hide": false, - "instant": true, - "legendFormat": "Unclean", - "refId": "D" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "", - "type": "bargauge" - }, - { - "cacheTimeout": null, - "colorBackground": true, - "colorValue": false, - "colors": [ - "#d44a3a", - "rgba(237, 129, 40, 0.89)", - "#37872D" - ], - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 2, - "x": 22, - "y": 11 - }, - "id": 118, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "options": {}, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false, - "ymax": null, - "ymin": null - }, - "tableColumn": "", - "targets": [ - { - "expr": "count(ha_cluster_pacemaker_resources{instance=~\"$node_ip:\\\\d+\", role=\"master\",status=\"active\"})", - "instant": true, - "refId": "A" } ], - "thresholds": "0,1", + "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, - "title": "Master resources", - "type": "singlestat", - "valueFontSize": "100%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" - }, - { - "columns": [], - "fontSize": "100%", - "gridPos": { - "h": 6, - "w": 13, - "x": 9, - "y": 13 + "title": "Node statuses over time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" }, - "id": 108, - "options": {}, - "pageSize": null, - "pluginVersion": "6.3.5", - "repeat": null, - "repeatDirection": "h", - "scroll": true, - "showHeader": true, - "sort": { - "col": 2, - "desc": false + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] }, - "styles": [ - { - "alias": "Resource", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "resource", - "thresholds": [], - "type": "string", - "unit": "short" - }, - { - "alias": "Status", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "status", - "thresholds": [], - "type": "string", - "unit": "short", - "valueMaps": [] - }, - { - "alias": "Migration threshold", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 0, - "mappingType": 1, - "pattern": "Value #B", - "thresholds": [], - "type": "number", - "unit": "none" - }, - { - "alias": "Node", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "node", - "thresholds": [], - "type": "string", - "unit": "short" - }, + "yaxes": [ { - "alias": "Fail count", - "colorMode": "value", - "colors": [ - "rgba(50, 172, 45, 0.97)", - "rgba(237, 129, 40, 0.89)", - "rgba(245, 54, 54, 0.9)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 0, - "mappingType": 1, - "pattern": "Value #C", - "thresholds": [ - "1", - "100" - ], - "type": "number", - "unit": "none" - }, - { - "alias": "Role", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "role", - "thresholds": [], - "type": "string", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "decimals": 2, - "pattern": "/.*/", - "thresholds": [], - "type": "hidden", - "unit": "short" - } - ], - "targets": [ - { - "expr": "sum(ha_cluster_pacemaker_resources{instance=~\"$node_ip:\\\\d+\"}) by(resource, status, node, role)", - "format": "table", - "instant": true, - "legendFormat": "", - "refId": "A" - }, - { - "expr": "sum(ha_cluster_pacemaker_migration_threshold{instance=~\"$node_ip:\\\\d+\"}) by(resource, node)", - "format": "table", - "hide": false, - "instant": true, - "refId": "B" - }, - { - "expr": "sum(ha_cluster_pacemaker_fail_count{instance=~\"$node_ip:\\\\d+\"}) by(resource, node)", - "format": "table", - "hide": false, - "instant": true, - "refId": "C" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Resources", - "transform": "table", - "type": "table" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 2, - "x": 22, - "y": 14 - }, - "id": 119, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "options": {}, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true } ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false, - "ymax": null, - "ymin": null + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "blocked": "orange", + "failed": "red", + "failure_ignored": "yellow" }, - "tableColumn": "", + "bars": false, + "dashLength": 10, + "dashes": false, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 14, + "x": 10, + "y": 5 + }, + "id": 167, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, "targets": [ { - "expr": "count(ha_cluster_pacemaker_resources{instance=~\"$node_ip:\\\\d+\", role=\"slave\", status=\"active\"})", - "instant": true, + "expr": "sum(ha_cluster_pacemaker_resources{instance=\"$dc_instance\"}) by (status)", + "legendFormat": "{{status}}", "refId": "A" } ], - "thresholds": "", + "thresholds": [], "timeFrom": null, + "timeRegions": [], "timeShift": null, - "title": "Slave resources", - "type": "singlestat", - "valueFontSize": "100%", - "valueMaps": [ + "title": "Resource statuses over time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ { - "op": "=", - "text": "N/A", - "value": "null" + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true } ], - "valueName": "avg" + "yaxis": { + "align": false, + "alignLevel": null + } }, { "columns": [], "fontSize": "100%", "gridPos": { - "h": 8, - "w": 5, + "h": 10, + "w": 10, "x": 0, - "y": 15 + "y": 11 }, "id": 157, "options": {}, "pageSize": 10, + "repeat": "node", + "repeatDirection": "v", "scroll": true, "showHeader": true, "sort": { - "col": 0, - "desc": true + "col": 3, + "desc": false }, "styles": [ { - "alias": "", + "alias": "Service", "dateFormat": "YYYY-MM-DD HH:mm:ss", "pattern": "name", + "preserveFormat": false, + "sanitize": false, "type": "string" }, { @@ -1217,6 +932,22 @@ } ] }, + { + "alias": "Node", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "nodename", + "thresholds": [], + "type": "number", + "unit": "short" + }, { "alias": "", "colorMode": null, @@ -1234,7 +965,7 @@ ], "targets": [ { - "expr": "node_systemd_unit_state{name=~\"(corosync|pacemaker|sbd|hanadb_exporter|prometheus-ha_cluster_exporter|hawk).*\", instance=~\"$node_ip:9100\", state=\"active\"}", + "expr": "node_systemd_unit_state{name=~\"((corosync|pacemaker|sbd|prometheus|hawk).*)\", state=\"active\"} + on(instance) group_left(nodename) 0 * node_uname_info{job=\"$cluster\"}", "format": "table", "instant": true, "refId": "A" @@ -1247,28 +978,29 @@ "type": "table" }, { - "cacheTimeout": null, "columns": [], "fontSize": "100%", "gridPos": { - "h": 4, - "w": 4, - "x": 5, - "y": 15 + "h": 10, + "w": 14, + "x": 10, + "y": 11 }, - "id": 142, - "links": [], + "id": 108, "options": {}, "pageSize": null, + "pluginVersion": "6.3.5", + "repeat": null, + "repeatDirection": "h", "scroll": true, "showHeader": true, "sort": { - "col": 0, - "desc": true + "col": 3, + "desc": false }, "styles": [ { - "alias": "Device", + "alias": "Resource", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -1278,41 +1010,30 @@ "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, - "pattern": "device", + "pattern": "resource", "thresholds": [], "type": "string", "unit": "short" }, { "alias": "Status", - "colorMode": "cell", + "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, - "pattern": "Value #B", - "thresholds": [ - "1", - "1" - ], + "pattern": "status", + "thresholds": [], "type": "string", "unit": "short", - "valueMaps": [ - { - "text": "Healthy", - "value": "1" - }, - { - "text": "Unealthy", - "value": "0" - } - ] + "valueMaps": [] }, { - "alias": "", + "alias": "Migration threshold", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -1320,179 +1041,50 @@ "rgba(50, 172, 45, 0.97)" ], "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, + "decimals": 0, "mappingType": 1, - "pattern": "/.*/", + "pattern": "Value #B", "thresholds": [], - "type": "hidden", - "unit": "short" - } - ], - "targets": [ - { - "expr": "count(ha_cluster_sbd_devices{instance=~\"$node_ip:\\\\d+\"}) by (device,status)", - "format": "table", - "instant": true, - "refId": "A" - }, - { - "expr": "count(ha_cluster_sbd_devices{instance=~\"$node_ip:\\\\d+\",status=\"healthy\"}) by (device,status)", - "format": "table", - "instant": true, - "refId": "B" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "SBD Devices", - "transform": "table", - "type": "table" - }, - { - "cacheTimeout": null, - "colorBackground": true, - "colorPrefix": false, - "colorValue": false, - "colors": [ - "#299c46", - "#C4162A", - "#C4162A" - ], - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 2, - "x": 22, - "y": 17 - }, - "id": 116, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 + "type": "number", + "unit": "none" }, { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "options": {}, - "pluginVersion": "6.3.3", - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false, - "ymax": null, - "ymin": null - }, - "tableColumn": "", - "targets": [ - { - "expr": "count(ha_cluster_pacemaker_resources{instance=~\"$node_ip:\\\\d+\", status=\"failed\"})", - "instant": true, - "refId": "A" - } - ], - "thresholds": "0,1", - "timeFrom": null, - "timeShift": null, - "title": "Resources failed", - "type": "singlestat", - "valueFontSize": "100%", - "valueMaps": [ - { - "op": "=", - "text": "0", - "value": "null" - } - ], - "valueName": "avg" - }, - { - "columns": [], - "fontSize": "100%", - "gridPos": { - "h": 4, - "w": 13, - "x": 9, - "y": 19 - }, - "id": 165, - "options": {}, - "pageSize": null, - "scroll": true, - "showHeader": true, - "sort": { - "col": 0, - "desc": true - }, - "styles": [ - { - "alias": "Score", - "colorMode": "cell", + "alias": "Node", + "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 0, + "decimals": 2, "mappingType": 1, - "pattern": "Value", - "preserveFormat": true, - "rangeMaps": [], - "sanitize": false, - "thresholds": [ - "0", - "" - ], - "type": "number", - "unit": "none", - "valueMaps": [] + "pattern": "node", + "thresholds": [], + "type": "string", + "unit": "short" }, { - "alias": "Constraint ID", - "colorMode": null, + "alias": "Fail count", + "colorMode": "value", "colors": [ - "rgba(245, 54, 54, 0.9)", + "rgba(50, 172, 45, 0.97)", "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" + "rgba(245, 54, 54, 0.9)" ], "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, + "decimals": 0, "mappingType": 1, - "pattern": "constraint", - "thresholds": [], - "type": "string", - "unit": "short" + "pattern": "Value #C", + "thresholds": [ + "1", + "100" + ], + "type": "number", + "unit": "none" }, { - "alias": "Resource", + "alias": "Role", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -1502,13 +1094,13 @@ "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, - "pattern": "resource", + "pattern": "role", "thresholds": [], "type": "string", "unit": "short" }, { - "alias": "Node", + "alias": "Type", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -1518,27 +1110,11 @@ "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, - "pattern": "node", + "pattern": "agent", "thresholds": [], "type": "number", "unit": "short" }, - { - "alias": "Role", - "colorMode": null, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "mappingType": 1, - "pattern": "role", - "thresholds": [], - "type": "string", - "unit": "short" - }, { "alias": "", "colorMode": null, @@ -1548,135 +1124,153 @@ "rgba(50, 172, 45, 0.97)" ], "decimals": 2, - "mappingType": 1, "pattern": "/.*/", "thresholds": [], "type": "hidden", - "unit": "short", - "valueMaps": [] + "unit": "short" } ], "targets": [ { - "expr": "clamp_min(clamp_max(ha_cluster_pacemaker_location_constraints{instance=~\"$node_ip:\\\\d+\"}, 1000000), -1000000)", + "expr": "sum(ha_cluster_pacemaker_resources{instance=\"$dc_instance\"} == 1) by(resource, node, status, role, agent)", "format": "table", - "hide": false, "instant": true, + "legendFormat": "", "refId": "A" + }, + { + "expr": "sum(ha_cluster_pacemaker_migration_threshold{instance=\"$dc_instance\"}) by(resource, node)", + "format": "table", + "hide": false, + "instant": true, + "refId": "B" + }, + { + "expr": "sum(ha_cluster_pacemaker_fail_count{instance=\"$dc_instance\"}) by(resource, node)", + "format": "table", + "hide": false, + "instant": true, + "refId": "C" } ], "timeFrom": null, "timeShift": null, - "title": "Resource location constraints", + "title": "Resources", "transform": "table", "type": "table" }, { "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, + "columns": [], + "fontSize": "100%", "gridPos": { "h": 3, - "w": 2, - "x": 22, - "y": 20 + "w": 10, + "x": 0, + "y": 21 }, - "id": 114, - "interval": null, + "id": 142, "links": [], - "mappingType": 1, - "mappingTypes": [ + "options": {}, + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 0, + "desc": true + }, + "styles": [ { - "name": "value to text", - "value": 1 + "alias": "Device", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "device", + "thresholds": [], + "type": "string", + "unit": "short" }, { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "options": {}, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ + "alias": "Status", + "colorMode": "cell", + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "mappingType": 1, + "pattern": "Value #B", + "thresholds": [ + "1", + "1" + ], + "type": "string", + "unit": "short", + "valueMaps": [ + { + "text": "Healthy", + "value": "1" + }, + { + "text": "Unealthy", + "value": "0" + } + ] + }, { - "from": "null", - "text": "N/A", - "to": "null" + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" } ], - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false, - "ymax": null, - "ymin": null - }, - "tableColumn": "", "targets": [ { - "expr": "count(ha_cluster_pacemaker_resources{instance=~\"$node_ip:\\\\d+\", status=\"disabled\"})", + "expr": "count(ha_cluster_sbd_devices{instance=\"$dc_instance\"}) by (device,status)", + "format": "table", "instant": true, "refId": "A" + }, + { + "expr": "count(ha_cluster_sbd_devices{instance=\"$dc_instance\",status=\"healthy\"}) by (device,status)", + "format": "table", + "instant": true, + "refId": "B" } ], - "thresholds": "", "timeFrom": null, "timeShift": null, - "title": "Resources disabled", - "type": "singlestat", - "valueFontSize": "100%", - "valueMaps": [ - { - "op": "=", - "text": "0", - "value": "null" - } - ], - "valueName": "avg" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 23 - }, - "id": 163, - "panels": [], - "title": "DRBD", - "type": "row" + "title": "SBD Devices", + "transform": "table", + "type": "table" }, { "columns": [], "fontSize": "100%", "gridPos": { - "h": 6, - "w": 9, - "x": 0, - "y": 24 + "h": 3, + "w": 14, + "x": 10, + "y": 21 }, - "id": 161, + "id": 165, "options": {}, "pageSize": null, "scroll": true, @@ -1687,21 +1281,30 @@ }, "styles": [ { - "alias": "", - "colorMode": null, + "alias": "Score", + "colorMode": "cell", "colors": [ "rgba(245, 54, 54, 0.9)", "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], - "decimals": 2, - "pattern": "__name__", - "thresholds": [], - "type": "hidden", - "unit": "short" + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "Value", + "preserveFormat": true, + "rangeMaps": [], + "sanitize": false, + "thresholds": [ + "0", + "" + ], + "type": "number", + "unit": "none", + "valueMaps": [] }, { - "alias": "", + "alias": "Constraint ID", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -1711,13 +1314,13 @@ "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, - "pattern": "job", + "pattern": "constraint", "thresholds": [], - "type": "hidden", + "type": "string", "unit": "short" }, { - "alias": "", + "alias": "Resource", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -1727,13 +1330,13 @@ "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, - "pattern": "Time", + "pattern": "resource", "thresholds": [], - "type": "hidden", + "type": "string", "unit": "short" }, { - "alias": "", + "alias": "Node", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -1743,13 +1346,13 @@ "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, - "pattern": "Value", + "pattern": "node", "thresholds": [], - "type": "hidden", + "type": "number", "unit": "short" }, { - "alias": "", + "alias": "Role", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -1759,47 +1362,45 @@ "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, - "pattern": "instance", + "pattern": "role", "thresholds": [], - "type": "hidden", + "type": "string", "unit": "short" }, { - "alias": "drbd disk status", + "alias": "", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", - "#56A64B", + "rgba(237, 129, 40, 0.89)", "rgba(50, 172, 45, 0.97)" ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "mappingType": 1, - "pattern": "disk_state", - "thresholds": [ - "" - ], - "type": "string", + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", "unit": "short", "valueMaps": [] } ], "targets": [ { - "expr": "ha_cluster_drbd_resources{job=\"$hacluster\"}", + "expr": "clamp_min(clamp_max(ha_cluster_pacemaker_location_constraints{instance=\"$dc_instance\"}, 1000000), -1000000)", "format": "table", + "hide": false, "instant": true, "refId": "A" } ], "timeFrom": null, "timeShift": null, - "title": "Drbd disks resources ", + "title": "Resource location constraints", "transform": "table", "type": "table" } ], - "refresh": "5s", + "refresh": false, "schemaVersion": 19, "style": "dark", "tags": [], @@ -1829,9 +1430,9 @@ "definition": "Cluster name", "hide": 0, "includeAll": false, - "label": "HA Cluster", + "label": "Cluster", "multi": false, - "name": "hacluster", + "name": "cluster", "options": [], "query": "label_values(job)", "refresh": 2, @@ -1848,38 +1449,16 @@ "allValue": null, "current": {}, "datasource": "$data_source", - "definition": "Node name", - "hide": 0, - "includeAll": false, - "label": "Node", - "multi": false, - "name": "node_name", - "options": [], - "query": "query_result(label_replace(ha_cluster_pacemaker_nodes{job='$hacluster'}, 'ip', '$1', 'instance','(.*):.*') + on(ip) group_left(nodename) label_replace(node_uname_info, 'ip', '$1', 'instance','(.*):.*'))", - "refresh": 2, - "regex": ".*nodename=\"([^,]+)\".*", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": {}, - "datasource": "$data_source", - "definition": "Node IP", + "definition": "DC Exporter Instance", "hide": 2, "includeAll": false, "label": null, "multi": false, - "name": "node_ip", + "name": "dc_instance", "options": [], - "query": "label_values(node_uname_info{nodename=\"$node_name\", job=\"$hacluster\"}, instance)", + "query": "query_result(label_replace((ha_cluster_pacemaker_nodes{job='$cluster',status='dc'} == 1), 'ip', '$1', 'instance', '(.*):.*') + on(node, ip) group_left label_replace(label_replace(0 * node_uname_info{job='$cluster'}, 'node', '$1', 'nodename', '(.*)'), 'ip', '$1', 'instance','(.*):.*'))", "refresh": 2, - "regex": "^(.*):\\d+$", + "regex": ".*instance=\"(.+?)\".*", "skipUrlSync": false, "sort": 0, "tagValuesQuery": "", @@ -1891,7 +1470,7 @@ ] }, "time": { - "from": "now-1h", + "from": "now-30m", "to": "now" }, "timepicker": { @@ -1920,7 +1499,7 @@ ] }, "timezone": "", - "title": "HA Cluster Status", + "title": "HA Cluster details", "uid": "Q5YJpwtZk1", - "version": 2 + "version": 3 } diff --git a/salt/monitoring/provisioning/dashboards/multi-cluster-overview.json b/salt/monitoring/grafana/provisioning/dashboards/multi-cluster-overview.json similarity index 98% rename from salt/monitoring/provisioning/dashboards/multi-cluster-overview.json rename to salt/monitoring/grafana/provisioning/dashboards/multi-cluster-overview.json index 4ef913d25..b645de409 100644 --- a/salt/monitoring/provisioning/dashboards/multi-cluster-overview.json +++ b/salt/monitoring/grafana/provisioning/dashboards/multi-cluster-overview.json @@ -731,7 +731,7 @@ "repeatDirection": "h", "targets": [ { - "expr": "count(ha_cluster_pacemaker_nodes{status=\"online\"}) by (job) / count(ha_cluster_pacemaker_nodes{status=\"expected_up\"}) by (job)", + "expr": "count(count(ha_cluster_pacemaker_nodes{status=\"online\"} == 1) by (job,node)) by (job) / count(count(ha_cluster_pacemaker_nodes) by (job,node)) by (job)", "hide": false, "instant": false, "legendFormat": "{{job}}", @@ -787,7 +787,7 @@ "pluginVersion": "6.3.5", "targets": [ { - "expr": "count(ha_cluster_pacemaker_resources{status=\"active\"}) by (job) / count(ha_cluster_pacemaker_resources) by (job)", + "expr": "count(count(ha_cluster_pacemaker_resources{status=\"active\"} == 1) by (resource,node,job)) by (job) / count(count(ha_cluster_pacemaker_resources) by (resource,job,node)) by (job)", "interval": "", "legendFormat": "{{job}}", "refId": "A" @@ -963,7 +963,7 @@ "steppedLine": true, "targets": [ { - "expr": "count(count(ha_cluster_pacemaker_nodes{status=\"online\"}) by (node,job)) by (job)", + "expr": "count(count(ha_cluster_pacemaker_nodes{status=\"online\"} == 1) by (node,job)) by (job)", "interval": "", "intervalFactor": 1, "legendFormat": "{{job}}", @@ -1053,7 +1053,7 @@ "steppedLine": true, "targets": [ { - "expr": "count(ha_cluster_pacemaker_resources{status=\"active\"}) by (node,job) / count(count(ha_cluster_pacemaker_resources) by (node,instance,job)) by (node,job)", + "expr": "count(count(ha_cluster_pacemaker_resources{status=\"active\"} == 1) by (resource,node,job)) by (job,node)", "interval": "", "legendFormat": "{{node}}", "refId": "A" @@ -1325,7 +1325,7 @@ "list": [] }, "time": { - "from": "now-1h", + "from": "now-30m", "to": "now" }, "timepicker": { @@ -1345,5 +1345,5 @@ "timezone": "", "title": "Multi-Cluster Overview", "uid": "5b0v3y-Wz1", - "version": 8 + "version": 9 } diff --git a/salt/monitoring/provisioning/dashboards/ha-sap-hana-detailed-timestamp.json b/salt/monitoring/grafana/provisioning/dashboards/sap-hana-details.json similarity index 100% rename from salt/monitoring/provisioning/dashboards/ha-sap-hana-detailed-timestamp.json rename to salt/monitoring/grafana/provisioning/dashboards/sap-hana-details.json diff --git a/salt/monitoring/grafana/provisioning/dashboards/sap-netweaver-details.json b/salt/monitoring/grafana/provisioning/dashboards/sap-netweaver-details.json new file mode 100644 index 000000000..cff09978b --- /dev/null +++ b/salt/monitoring/grafana/provisioning/dashboards/sap-netweaver-details.json @@ -0,0 +1,1339 @@ +{ + "__inputs": [], + "__requires": [ + { + "type": "panel", + "id": "bargauge", + "name": "Bar Gauge", + "version": "" + }, + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "6.3.5" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "panel", + "id": "table", + "name": "Table", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 10, + "panels": [], + "title": "Start Service", + "type": "row" + }, + { + "columns": [], + "fontSize": "100%", + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 28, + "options": {}, + "pageSize": null, + "scroll": true, + "showHeader": true, + "sort": { + "col": 0, + "desc": true + }, + "styles": [ + { + "alias": "Features", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "features", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Number", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "mappingType": 1, + "pattern": "instance_number", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Start priority", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 1, + "mappingType": 1, + "pattern": "start_priority", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Status", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value", + "thresholds": [], + "type": "string", + "unit": "short", + "valueMaps": [ + { + "text": "GRAY", + "value": "1" + }, + { + "text": "GREEN", + "value": "2" + }, + { + "text": "YELLOW", + "value": "3" + }, + { + "text": "RED", + "value": "4" + } + ] + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "SID", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Hostname", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "instance_hostname", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Name", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "instance_name", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sap_start_service_instances", + "format": "table", + "instant": true, + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Instances", + "transform": "table", + "type": "table" + }, + { + "columns": [], + "fontSize": "100%", + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 2, + "options": {}, + "pageSize": null, + "pluginVersion": "6.3.5", + "scroll": true, + "showHeader": true, + "sort": { + "col": 3, + "desc": false + }, + "styles": [ + { + "alias": "Code", + "colorMode": null, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "#FF9830", + "rgba(245, 54, 54, 0.9)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "mappingType": 1, + "pattern": "Value", + "thresholds": [ + "3", + "4" + ], + "type": "string", + "unit": "short", + "valueMaps": [ + { + "text": "GRAY", + "value": "1" + }, + { + "text": "GREEN", + "value": "2" + }, + { + "text": "YELLOW", + "value": "3" + }, + { + "text": "RED", + "value": "4" + } + ] + }, + { + "alias": "PID", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "pid", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Status", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "status", + "thresholds": [], + "type": "number", + "unit": "short" + }, + { + "alias": "Process name", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "name", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Instance name", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "instance_name", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Instance nr.", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "instance_number", + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "/.*/", + "thresholds": [], + "type": "hidden", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sap_start_service_processes", + "format": "table", + "hide": false, + "instant": true, + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Processes", + "transform": "table", + "type": "table" + }, + { + "aliasColors": { + "GRAY": "rgb(128, 128, 128)", + "GREEN": "green", + "RED": "red", + "YELLOW": "yellow" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 7 + }, + "id": 30, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "count(sap_start_service_instances == 1)", + "legendFormat": "GRAY", + "refId": "A" + }, + { + "expr": "count(sap_start_service_instances == 2)", + "legendFormat": "GREEN", + "refId": "B" + }, + { + "expr": "count(sap_start_service_instances == 3)", + "legendFormat": "YELLOW", + "refId": "C" + }, + { + "expr": "count(sap_start_service_instances == 4)", + "legendFormat": "RED", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Instance Status", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "GRAY": "rgb(128, 128, 128)", + "GREEN": "green", + "RED": "red", + "YELLOW": "yellow" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 7 + }, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "count(sap_start_service_processes == 1)", + "interval": "", + "legendFormat": "GRAY", + "refId": "A" + }, + { + "expr": "count(sap_start_service_processes == 2)", + "legendFormat": "GREEN", + "refId": "B" + }, + { + "expr": "count(sap_start_service_processes == 3)", + "legendFormat": "YELLOW", + "refId": "C" + }, + { + "expr": "count(sap_start_service_processes == 4)", + "legendFormat": "RED", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Process Status", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 15 + }, + "id": 22, + "panels": [], + "title": "Enqueue Server", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 16 + }, + "id": 20, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sap_enqueue_server_locks_now", + "legendFormat": "Locks", + "refId": "A" + }, + { + "expr": "sap_enqueue_server_owner_now", + "legendFormat": "Lock owners", + "refId": "C" + }, + { + "expr": "sap_enqueue_server_arguments_now", + "legendFormat": "Lock arguments", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Lock Table Entries", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 16 + }, + "id": 24, + "interval": "", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(sap_enqueue_server_enqueue_requests[5m])", + "legendFormat": "Lock", + "refId": "A" + }, + { + "expr": "rate(sap_enqueue_server_enqueue_rejects[5m])", + "legendFormat": "Rejected", + "refId": "B" + }, + { + "expr": "rate(sap_enqueue_server_dequeue_requests[5m])", + "legendFormat": "Release", + "refId": "C" + }, + { + "expr": "rate(sap_enqueue_server_dequeue_all_requests[5m])", + "legendFormat": "LUW release", + "refId": "D" + }, + { + "expr": "rate(sap_enqueue_server_cleanup_requests[5m])", + "legendFormat": "Server release", + "refId": "E" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests / sec", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "Lock": "dark-red", + "Release": "semi-dark-red" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 16 + }, + "id": 26, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(sap_enqueue_server_enqueue_errors[5m])", + "legendFormat": "Lock", + "refId": "A" + }, + { + "expr": "rate(sap_enqueue_server_dequeue_errors[5m])", + "legendFormat": "Release", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Errors / sec", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 12, + "panels": [], + "title": "Work Process Dispatcher", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 5, + "w": 8, + "x": 0, + "y": 25 + }, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sap_dispatcher_queue_now", + "legendFormat": "{{type}} {{instance_name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Dispatcher Queue Depth", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "gridPos": { + "h": 15, + "w": 8, + "x": 8, + "y": 25 + }, + "id": 18, + "options": { + "displayMode": "basic", + "fieldOptions": { + "calcs": [ + "last" + ], + "defaults": { + "mappings": [], + "min": 0, + "thresholds": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 40 + }, + { + "color": "red", + "value": 100 + } + ] + }, + "override": {}, + "values": true + }, + "orientation": "horizontal" + }, + "pluginVersion": "6.3.5", + "targets": [ + { + "expr": "sap_dispatcher_queue_high", + "instant": true, + "legendFormat": "{{type}} {{instance_name}}", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Dispatcher Queue Depth High", + "type": "bargauge" + }, + { + "cacheTimeout": null, + "gridPos": { + "h": 15, + "w": 8, + "x": 16, + "y": 25 + }, + "id": 16, + "links": [], + "options": { + "displayMode": "basic", + "fieldOptions": { + "calcs": [ + "lastNotNull" + ], + "defaults": { + "decimals": 0, + "mappings": [], + "max": 15000, + "min": 0, + "thresholds": [ + { + "color": "green", + "value": null + }, + { + "color": "green", + "value": 5000 + } + ], + "unit": "short" + }, + "override": {}, + "values": true + }, + "orientation": "horizontal" + }, + "pluginVersion": "6.3.5", + "targets": [ + { + "expr": "sap_dispatcher_queue_max", + "instant": true, + "legendFormat": "{{type}} {{instance_name}}", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Dispatcher Queue Depth Threshold", + "type": "bargauge" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 5, + "w": 8, + "x": 0, + "y": 30 + }, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(sap_dispatcher_queue_reads[5m])", + "legendFormat": "{{type}} {{instance_name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Dispatcher Queues Reads / sec", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 5, + "w": 8, + "x": 0, + "y": 35 + }, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(sap_dispatcher_queue_writes[5m])", + "legendFormat": "{{type}} {{instance_name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Dispatcher Queue Writes / sec", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": false, + "schemaVersion": 19, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "SAP NetWeaver Details", + "uid": "lLLUAU_Wk", + "version": 8 +} diff --git a/salt/monitoring/init.sls b/salt/monitoring/init.sls index 4e38c5277..7fa97e626 100644 --- a/salt/monitoring/init.sls +++ b/salt/monitoring/init.sls @@ -2,7 +2,7 @@ {% set repository = repository~'_SP'~grains['osrelease_info'][1] if grains['osrelease_info']|length > 1 else repository %} server_monitoring_repo: - pkgrepo.managed: + pkgrepo.managed: - humanname: Server:Monitoring - baseurl: https://download.opensuse.org/repositories/server:/monitoring/{{ repository }}/ - refresh: True @@ -10,107 +10,10 @@ server_monitoring_repo: - retry: attempts: 3 interval: 15 + - require_in: + - pkg: prometheus + - pkg: grafana -prometheus: - pkg.installed: - - name: golang-github-prometheus-prometheus - - require: - - pkgrepo: server_monitoring_repo - - retry: - attempts: 3 - interval: 15 - -prometheus_shap_configuration: - file.recurse: - - name: /etc/prometheus/ - - makedirs: True - - source: salt://monitoring/prometheus - - template: jinja - - include_empty: True - -prometheus_service: - service.running: - - name: prometheus - - enable: True - - require: - - file: prometheus_shap_configuration - - watch: - - file: prometheus_shap_configuration - -grafana: - pkg.installed: - - name: grafana - - require: - - pkgrepo: server_monitoring_repo - - retry: - attempts: 3 - interval: 15 - -grafana_anonymous_login_configuration: - file.blockreplace: - - name: /etc/grafana/grafana.ini - - marker_start: '#################################### Anonymous Auth ######################' - - marker_end: '#################################### Github Auth ##########################' - - content: | - [auth.anonymous] - enabled = true - org_name = Main Org. - org_role = Admin - - require: - - pkg: grafana - -grafana_port_configuration: - file.replace: - - name: /etc/grafana/grafana.ini - - pattern: ;http_port = 3000 - - repl: http_port = 80 - - require: - - pkg: grafana - -grafana_provisioning_directory: - file.recurse: - - name: /etc/grafana/provisioning - - source: salt://monitoring/provisioning - - clean: True - - user: grafana - - group: grafana - - require: - - pkg: grafana - -grafana_service_configuration: - file.replace: - - name: /usr/lib/systemd/system/grafana-server.service - - pattern: (User|Group)=grafana - - repl: '#\1' - - require: - - pkg: grafana - -grafana_service: - service.running: - - name: grafana-server - - enable: True - - restart: True - - require: - - pkg: grafana - - file: grafana_port_configuration - - file: grafana_provisioning_directory - - file: grafana_service_configuration - - watch: - - file: grafana_port_configuration - - file: grafana_provisioning_directory - - file: grafana_service_configuration - -prometheus-alertmanager: - pkg.installed: - - names: - - golang-github-prometheus-alertmanager - - enable: True - - reload: True - - require: - - service: prometheus_service - - file: prometheus_shap_configuration - - watch: - - file: prometheus_shap_configuration - - retry: - attempts: 3 - interval: 15 +include: + - .prometheus + - .grafana diff --git a/salt/monitoring/prometheus.sls b/salt/monitoring/prometheus.sls new file mode 100644 index 000000000..7e56c7841 --- /dev/null +++ b/salt/monitoring/prometheus.sls @@ -0,0 +1,49 @@ +prometheus: + pkg.installed: + - name: golang-github-prometheus-prometheus + - retry: + attempts: 3 + interval: 15 + +prometheus_alerts: + file.managed: + - name: /etc/prometheus/rules.yml + - source: salt://monitoring/prometheus/rules.yml + - require: + - pkg: prometheus + +prometheus_configuration: + file.managed: + - name: /etc/prometheus/prometheus.yml + - source: salt://monitoring/prometheus/prometheus.yml.j2 + - template: jinja + - require: + - pkg: prometheus + +prometheus_service: + service.running: + - name: prometheus + - enable: True + - require: + - file: prometheus_configuration + - file: prometheus_alerts + - watch: + - file: prometheus_configuration + - file: prometheus_alerts + +prometheus-alertmanager: + pkg.installed: + - names: + - golang-github-prometheus-alertmanager + - enable: True + - reload: True + - require: + - service: prometheus_service + - file: prometheus_configuration + - file: prometheus_alerts + - watch: + - file: prometheus_configuration + - file: prometheus_alerts + - retry: + attempts: 3 + interval: 15 diff --git a/salt/monitoring/prometheus/prometheus.yml b/salt/monitoring/prometheus/prometheus.yml.j2 similarity index 50% rename from salt/monitoring/prometheus/prometheus.yml rename to salt/monitoring/prometheus/prometheus.yml.j2 index 077de854c..435aba09a 100644 --- a/salt/monitoring/prometheus/prometheus.yml +++ b/salt/monitoring/prometheus/prometheus.yml.j2 @@ -14,45 +14,42 @@ rule_files: - /etc/prometheus/rules.yml scrape_configs: - # in a multi-cluster scenario, add another job name called - # hacluster-02 - - job_name: hana-cluster + # we use job_name to group exporters for each cluster + + {% if grains.get('hana_targets', [])|length > 0 %} + - job_name: hana # The HANA scrapping follows a different scrapping time to reduce the execution load into the database # This time was based on users feedback, but should be set accordingly with your environment needs. scrape_interval: 30s scrape_timeout: 30s static_configs: - targets: - {% for ip in grains['monitored_hosts'] %} - - "{{ ip }}:9668" # hanadb_exporter - {% endfor %} - {% for ip in grains['monitored_hosts'] %} + {%- for ip in grains['hana_targets'][0:2] %} - "{{ ip }}:9100" # node_exporter - {% endfor %} - {% for ip in grains['monitored_hosts'] %} - "{{ ip }}:9664" # ha_cluster_exporter - {% endfor %} + {%- endfor %} + - "{{ grains['hana_targets'][2] }}:9668" # hanadb_exporter + {%- endif %} - {% if grains.get('drbd_monitored_hosts', [])|length > 0 %} - - job_name: drbd-cluster + {%- if grains.get('drbd_targets', [])|length > 0 %} + - job_name: drbd static_configs: - targets: - {% for ip in grains['drbd_monitored_hosts'] %} + {%- for ip in grains['drbd_targets'] %} - "{{ ip }}:9100" # node_exporter - {% endfor %} - {% for ip in grains['drbd_monitored_hosts'] %} - "{{ ip }}:9664" # ha_cluster_exporter - {% endfor %} - {% endif %} + {%- endfor %} + {%- endif %} - {% if grains.get('nw_monitored_hosts', [])|length > 0 %} - - job_name: netweaver-cluster + {%- if grains.get('netweaver_targets', [])|length > 0 %} + - job_name: netweaver static_configs: - targets: - {% for ip in grains['nw_monitored_hosts'] %} + {%- for ip in grains['netweaver_targets'] %} - "{{ ip }}:9100" # node_exporter - {% endfor %} - {% for ip in grains['nw_monitored_hosts'][0:2] %} + - "{{ ip }}:9680" # sap_host_exporter + {%- endfor %} + {%- for ip in grains['netweaver_targets'][0:2] %} - "{{ ip }}:9664" # ha_cluster_exporter - {% endfor %} - {% endif %} + {%- endfor %} + {%- endif %} diff --git a/salt/monitoring/prometheus/rules.yml b/salt/monitoring/prometheus/rules.yml index 5c6eacb94..98d7153bd 100644 --- a/salt/monitoring/prometheus/rules.yml +++ b/salt/monitoring/prometheus/rules.yml @@ -21,10 +21,8 @@ groups: labels: severity: page annotations: - {%- raw %} summary: "HANA Internal alert raised for SID {{ $labels.sid }} InsNr {{ $labels.insnr }} DBName {{ $labels.database_name }}" description: "Alert Details: {{ $labels.alert_details }} User Action: {{ $labels.alert_useraction }}" - {%- endraw %} # ha cluster alerts - name: cluster-resources-monitoring diff --git a/salt/monitoring/provisioning/dashboards/shap-dashboard.yaml b/salt/monitoring/provisioning/dashboards/shap-dashboard.yaml deleted file mode 100644 index 5f9489f2f..000000000 --- a/salt/monitoring/provisioning/dashboards/shap-dashboard.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: 1 - -providers: -- name: default - orgId: 1 - folder: '' - type: file - disableDeletion: false - updateIntervalSeconds: 10 # how often Grafana will scan for changed dashboards - options: - path: /etc/grafana/provisioning diff --git a/salt/netweaver_node/installation_files.sls b/salt/netweaver_node/installation_files.sls index 60f4d090c..8ae433aac 100644 --- a/salt/netweaver_node/installation_files.sls +++ b/salt/netweaver_node/installation_files.sls @@ -1,4 +1,4 @@ -{% set sapcd = '/netweaver_inst_media' %} +{% set sapcd = '/sapmedia/NW' %} {% if grains['provider'] == 'libvirt' %} mount_swpm: @@ -22,68 +22,20 @@ mount_swpm: - opts: - vers=3.0,username={{ grains['storage_account_name'] }},password={{ grains['storage_account_key'] }},dir_mode=0777,file_mode=0777,sec=ntlmssp -{% elif grains['provider'] == 'aws' %} - -# In AWS the NW installation software goes in the NFS share -mount_sapcd: - mount.mounted: - - name: {{ sapcd }} - - device: {{ grains['netweaver_nfs_share'] }}/sapcd - - fstype: nfs4 - - mkmnt: True - - persist: True - - opts: - - rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2 - - retry: - attempts: 5 - interval: 60 - -# Download only if it's the first node -{% if grains['host_ip'] == grains['host_ips'][0] %} -download_nw_files_from_s3: - cmd.run: - - name: "aws s3 sync {{ grains['s3_bucket'] }} {{ sapcd }} --region {{ grains['region'] }} --only-show-errors" - - onlyif: "aws s3 sync --dryrun {{ grains['s3_bucket'] }} {{ sapcd }} --region {{ grains['region'] }} | grep download > /dev/null 2>&1" - - output_loglevel: quiet - - hide_output: True -{% endif %} - -wait_until_sw_downloaded: - cmd.run: - - name: | - until ! aws s3 sync --dryrun {{ grains['s3_bucket'] }} {{ sapcd }} \ - --region {{ grains['region'] }} | grep download > /dev/null 2>&1;do sleep 30;done - - output_loglevel: quiet - - hide_output: True - - timeout: 600 - -sapcd_folder: - file.directory: - - name: {{ sapcd }} - - user: root - - group: root - - dir_mode: "0755" - - file_mode: "0755" - - recurse: - - user - - group - - mode - - require: - - wait_until_sw_downloaded - -{% elif grains['provider'] == 'gcp' %} +{% elif grains['provider'] in ['gcp', 'aws'] %} +{% set netweaver_inst_disk_device = salt['cmd.run']('realpath '~grains['netweaver_inst_disk_device']) %} nw_inst_partition: cmd.run: - name: | - /usr/sbin/parted -s {{ grains['nw_inst_disk_device'] }} mklabel msdos && \ - /usr/sbin/parted -s {{ grains['nw_inst_disk_device'] }} mkpart primary ext2 1M 100% && sleep 1 && \ - /sbin/mkfs -t xfs {{ grains['nw_inst_disk_device'] }}1 - - unless: ls {{ grains['nw_inst_disk_device'] }}1 + /usr/sbin/parted -s {{ netweaver_inst_disk_device }} mklabel msdos && \ + /usr/sbin/parted -s {{ netweaver_inst_disk_device }} mkpart primary ext2 1M 100% && sleep 1 && \ + /sbin/mkfs -t xfs {{ netweaver_inst_disk_device }}1 + - unless: ls {{ netweaver_inst_disk_device }}1 mount_swpm: mount.mounted: - name: {{ sapcd }} - - device: {{ grains['nw_inst_disk_device'] }}1 + - device: {{ netweaver_inst_disk_device }}1 - fstype: xfs - mkmnt: True - persist: True @@ -92,6 +44,7 @@ mount_swpm: - require: - cmd: nw_inst_partition +{% if grains['provider'] == 'gcp' %} install_rclone: cmd.run: - name: "curl https://rclone.org/install.sh | sudo bash" @@ -105,6 +58,17 @@ download_files_from_gcp: cmd.run: - name: rclone copy remote:{{ grains['netweaver_software_bucket'] }} {{ sapcd }} +{% elif grains['provider'] == 'aws' %} + +download_files_from_s3: + cmd.run: + - name: "aws s3 sync {{ grains['s3_bucket'] }} {{ sapcd }} --region {{ grains['region'] }} --only-show-errors" + - onlyif: "aws s3 sync --dryrun {{ grains['s3_bucket'] }} {{ sapcd }} --region {{ grains['region'] }} | grep download > /dev/null 2>&1" + - output_loglevel: quiet + - hide_output: True + +{% endif %} + swpm_folder: file.directory: - name: {{ sapcd }} diff --git a/salt/netweaver_node/nfs.sls b/salt/netweaver_node/nfs.sls index 6b6480817..d4c3b4278 100644 --- a/salt/netweaver_node/nfs.sls +++ b/salt/netweaver_node/nfs.sls @@ -9,6 +9,25 @@ install_nfs_client: attempts: 3 interval: 15 +# We cannot use showmount as some of the required ports are not always available +# (aws efs storage or azure load balancers don't serve portmapper 111 and mountd 20048 ports) +netcat-openbsd: + pkg.installed: + - retry: + attempts: 3 + interval: 15 + +{% set nfs_server_ip = grains['netweaver_nfs_share'].split(':')[0] %} +wait_until_nfs_is_ready: + cmd.run: + - name: until nc -zvw5 {{ nfs_server_ip }} 2049;do sleep 30;done + - timeout: 1200 + - require: + - pkg: netcat-openbsd + +# Initialized NFS share folders, only with the first node +# Executing these states in all the nodes might cause errors during deletion, as they try to delete the same files +{% if grains['host_ip'] == grains['host_ips'][0] %} mount_sapmnt_temporary: mount.mounted: - name: /tmp/sapmnt @@ -18,13 +37,9 @@ mount_sapmnt_temporary: - persist: False - opts: - defaults - - retry: - attempts: 30 - interval: 60 + - require: + - wait_until_nfs_is_ready -# Initialized NFS share folders, only with the first node -# Executing these states in all the nodes might cause errors during deletion, as they try to delete the same files -{% if grains['host_ip'] == grains['host_ips'][0] %} /tmp/sapmnt/sapmnt: file.directory: - user: root @@ -71,8 +86,6 @@ mount_sapmnt_temporary: - require: - mount_sapmnt_temporary -{% endif %} - unmount_sapmnt: mount.unmounted: - name: /tmp/sapmnt @@ -83,3 +96,4 @@ unmount_sapmnt: remove_tmp_folder: file.absent: - name: /tmp/sapmnt +{% endif %} diff --git a/salt/pre_installation/ha_repos.sls b/salt/os_setup/ha_repos.sls similarity index 54% rename from salt/pre_installation/ha_repos.sls rename to salt/os_setup/ha_repos.sls index ca9f97dd7..8a2370c06 100644 --- a/salt/pre_installation/ha_repos.sls +++ b/salt/os_setup/ha_repos.sls @@ -5,10 +5,18 @@ allow_all_vendor_changes: - text: solver.allowVendorChange = true {% endif %} +{% if 'SLE_' in grains['ha_sap_deployment_repo'] %} +{% set repository = grains['ha_sap_deployment_repo'] %} +{% else %} +{% set sle_version = 'SLE_'~grains['osrelease_info'][0] %} +{% set sle_version = sle_version~'_SP'~grains['osrelease_info'][1] if grains['osrelease_info']|length > 1 else sle_version %} +{% set repository = grains['ha_sap_deployment_repo']~"/"~sle_version %} +{% endif %} + ha-factory-repo: pkgrepo.managed: - name: ha-factory - - baseurl: {{ grains['ha_sap_deployment_repo'] }} + - baseurl: {{ repository }} - gpgautoimport: True # Reduce the ha-factory priority in order to install HA packages from there {% if grains.get('devel_mode') %} diff --git a/salt/os_setup/init.sls b/salt/os_setup/init.sls new file mode 100644 index 000000000..33cce3864 --- /dev/null +++ b/salt/os_setup/init.sls @@ -0,0 +1,10 @@ +include: + - os_setup.registration + - os_setup.repos + - os_setup.update + - os_setup.ha_repos + - os_setup.minion_configuration + - os_setup.packages + {% if grains['provider'] == 'libvirt' %} + - os_setup.ip_workaround + {% endif %} diff --git a/salt/pre_installation/ip_workaround.sls b/salt/os_setup/ip_workaround.sls similarity index 100% rename from salt/pre_installation/ip_workaround.sls rename to salt/os_setup/ip_workaround.sls diff --git a/salt/pre_installation/minion_configuration.sls b/salt/os_setup/minion_configuration.sls similarity index 100% rename from salt/pre_installation/minion_configuration.sls rename to salt/os_setup/minion_configuration.sls diff --git a/salt/pre_installation/packages.sls b/salt/os_setup/packages.sls similarity index 100% rename from salt/pre_installation/packages.sls rename to salt/os_setup/packages.sls diff --git a/salt/os_setup/registration.sls b/salt/os_setup/registration.sls new file mode 100644 index 000000000..d6a948dff --- /dev/null +++ b/salt/os_setup/registration.sls @@ -0,0 +1,65 @@ +{% if grains['os_family'] == 'Suse' %} +{% if not grains.get('qa_mode') or '_node' not in grains.get('role') %} +{% if grains['reg_code'] %} +{% set reg_code = grains['reg_code'] %} +{% set arch = grains['osarch'] %} +register_system: + cmd.run: + - name: /usr/bin/SUSEConnect -r $reg_code {{ ("-e " ~ grains['reg_email']) if grains['reg_email'] else "" }} + - env: + - reg_code: {{ reg_code }} + - retry: + attempts: 3 + interval: 15 +{% endif %} + + +{% if grains['osmajorrelease'] == 12 %} +# hardcode the 12 version number for the 2 following modules, since they don't offer a sp version only 1. +default_sle_module_adv_systems_management_registration: + cmd.run: + - name: /usr/bin/SUSEConnect -p sle-module-adv-systems-management/12/{{ arch }} -r $reg_code + - env: + - reg_code: {{ reg_code }} + - retry: + attempts: 3 + interval: 15 + +{%- if grains['provider'] in ['gcp', 'aws', 'azure'] %} +default_sle_module_public_cloud_registration: + cmd.run: + - name: /usr/bin/SUSEConnect -p sle-module-public-cloud/12/{{ arch }} -r $reg_code + - env: + - reg_code: {{ reg_code }} + - retry: + attempts: 3 + interval: 15 + +{% endif %} + +{% elif grains['osmajorrelease'] == 15 and grains['provider'] in ['gcp', 'aws', 'azure'] %} +default_sle_module_public_cloud_registration: + cmd.run: + - name: /usr/bin/SUSEConnect -p sle-module-public-cloud/{{ grains['osrelease'] }}/{{ arch }} -r $reg_code + - env: + - reg_code: {{ reg_code }} + - retry: + attempts: 3 + interval: 15 + +{% endif %} + +{% if grains['reg_additional_modules'] %} +{% for module, mod_reg_code in grains['reg_additional_modules'].items() %} +{{ module }}_registration: + cmd.run: + - name: /usr/bin/SUSEConnect -p {{ module }} {{ "-r $mod_reg_code" if mod_reg_code else "" }} + - env: + - mod_reg_code: {{ mod_reg_code }} + - retry: + attempts: 3 + interval: 15 +{% endfor %} +{% endif %} +{% endif %} +{% endif %} diff --git a/salt/pre_installation/repos.sls b/salt/os_setup/repos.sls similarity index 100% rename from salt/pre_installation/repos.sls rename to salt/os_setup/repos.sls diff --git a/salt/pre_installation/update.sls b/salt/os_setup/update.sls similarity index 100% rename from salt/pre_installation/update.sls rename to salt/os_setup/update.sls diff --git a/salt/pillar/iscsi_srv.sls b/salt/pillar/iscsi_srv.sls index 396c0ff77..6ffdcdc4c 100644 --- a/salt/pillar/iscsi_srv.sls +++ b/salt/pillar/iscsi_srv.sls @@ -1,6 +1,7 @@ {% set devicenum = 'abcdefghijklmnopqrstuvwxyz' %} {% set partitions = grains['partitions'] %} {% set num = grains['iscsi_disks'] %} +{% set real_iscsidev = salt['cmd.run']('realpath '~grains['iscsidev']) %} {% if num > 0 and num < partitions|length %} {% set partitions = (partitions|list)[:num] %} @@ -27,9 +28,8 @@ iscsi: attributes: block_size: 512 emulate_write_cache: 0 - queue_depth: 64 unmap_granularity: 0 - dev: {{ grains['iscsidev'] }}{{ loop.index }} + dev: {{ real_iscsidev }}{{ loop.index }} name: sd{{ devicenum[loop.index0] }} plugin: "block" {%- endfor %} diff --git a/salt/pre_installation/init.sls b/salt/pre_installation/init.sls deleted file mode 100644 index df031cc8a..000000000 --- a/salt/pre_installation/init.sls +++ /dev/null @@ -1,10 +0,0 @@ -include: - - pre_installation.registration - - pre_installation.repos - - pre_installation.update - - pre_installation.ha_repos - - pre_installation.minion_configuration - - pre_installation.packages - {% if grains['provider'] == 'libvirt' %} - - pre_installation.ip_workaround - {% endif %} diff --git a/salt/pre_installation/registration.sls b/salt/pre_installation/registration.sls deleted file mode 100644 index 3d5fc7811..000000000 --- a/salt/pre_installation/registration.sls +++ /dev/null @@ -1,27 +0,0 @@ -{% if grains['os_family'] == 'Suse' %} -{% if not grains.get('qa_mode') or '_node' not in grains.get('role') %} -{% if grains['reg_code'] %} -register_system: - cmd.run: - - name: /usr/bin/SUSEConnect -r $reg_code {{ ("-e " ~ grains['reg_email']) if grains['reg_email'] else "" }} - - env: - - reg_code: {{ grains['reg_code'] }} - - retry: - attempts: 3 - interval: 15 -{% endif %} - -{% if grains['reg_additional_modules'] %} -{% for module, mod_reg_code in grains['reg_additional_modules'].items() %} -{{ module }}_registration: - cmd.run: - - name: /usr/bin/SUSEConnect -p {{ module }} {{ "-r $mod_reg_code" if mod_reg_code else "" }} - - env: - - mod_reg_code: {{ mod_reg_code }} - - retry: - attempts: 3 - interval: 15 -{% endfor %} -{% endif %} -{% endif %} -{% endif %} diff --git a/salt/provision.sh b/salt/provision.sh new file mode 100644 index 000000000..f83acfa60 --- /dev/null +++ b/salt/provision.sh @@ -0,0 +1,210 @@ +#!/bin/bash -xe +# Script to provision the machines using salt. It provides different stages to install and +# configure salt and run different salt executions. Find more information in print_help method +# or running `sh provision.sh -h` + +get_grain () { + re="$1:\s*(.*)" + grains_file=$2 + grains_file=${grains_file:="/etc/salt/grains"} + grains_content=$(grep -E $re $grains_file) + if [[ $grains_content =~ $re ]]; then + echo ${BASH_REMATCH[1]}; + return 0 + else + return 1 + fi +} + +salt_output_colored () { + if [[ "$(get_grain qa_mode)" == "true" ]]; then + echo "--no-color" + else + echo "--force-color" + fi +} + +install_salt_minion () { + reg_code=$1 + # If required, register + if [[ $reg_code != "" ]]; then + # Check SLE version + source /etc/os-release + # Register the system on SCC + SUSEConnect -r "$reg_code" + + # Register the modules accordingly with the SLE version. + if [[ $VERSION_ID =~ ^12\.? ]]; then + SUSEConnect -p sle-module-adv-systems-management/12/x86_64 + elif [[ $VERSION_ID =~ ^15\.? ]]; then + SUSEConnect -p sle-module-basesystem/$VERSION_ID/x86_64 + else + echo "SLE Product version not supported by this script. Please, use version 12 or higher." + exit 1 + fi + fi + + # We have to force refresh the repos and the keys (keys may change during lifetime of this OS/image) + zypper --non-interactive --gpg-auto-import-keys refresh --force --services + zypper --non-interactive install salt-minion + + # deregister + if [[ $reg_code != "" ]]; then + SUSEConnect -d + fi +} + +bootstrap_salt () { + mv /tmp/salt /root || true + + # Check if qa_mode is enabled + [[ "$(get_grain qa_mode /tmp/grains)" == "true" ]] && qa_mode=1 + # Get registration code + reg_code=$(get_grain reg_code /tmp/grains) + # Check if salt-call is installed + which salt-call > /dev/null 2>&1 && salt_installed=1 + + # Workaround for the cases where the cloud providers are coming without repositories + # https://www.suse.com/support/kb/doc/?id=7022311 + # Check if the deployment is executed in a cloud provider + [[ "$(get_grain provider /tmp/grains)" =~ aws|azure|gcp ]] && cloud=1 + if [[ ${qa_mode} != 1 && ${cloud} == 1 && "${reg_code}" == "" ]]; then + zypper lr || sudo /usr/sbin/registercloudguest --force-new + fi + + # Install salt if qa_mode is False and salt is not already installed + if [[ ${qa_mode} != 1 && ${salt_installed} != 1 ]]; then + install_salt_minion ${reg_code} + fi + + # Recheck if salt-call is installed. If it's not available stop execution + which salt-call || exit 1 + # Move salt grains to salt folder + mkdir -p /etc/salt;mv /tmp/grains /etc/salt || true +} + +os_setup () { + # Execute the states within /root/salt/os_setup + # This first execution is done to configure the salt minion and install the iscsi formula + salt-call --local --file-root=/root/salt \ + --log-level=info \ + --log-file=/var/log/salt-os-setup.log \ + --log-file-level=debug \ + --retcode-passthrough \ + $(salt_output_colored) \ + state.apply os_setup || exit 1 +} + +predeploy () { + # Execute the states defined in /root/salt/top.sls + # This execution is done to pre configure the cluster nodes, the support machines and install the formulas + salt-call --local \ + --pillar-root=/root/salt/pillar/ \ + --log-level=info \ + --log-file=/var/log/salt-predeployment.log \ + --log-file-level=debug \ + --retcode-passthrough \ + $(salt_output_colored) \ + state.highstate saltenv=predeployment || exit 1 +} + +deploy () { + # Execute SAP and HA installation with the salt formulas + if [[ $(get_grain role) =~ .*_node ]]; then + salt-call --local \ + --log-level=info \ + --log-file=/var/log/salt-deployment.log \ + --log-file-level=debug \ + --retcode-passthrough \ + $(salt_output_colored) \ + state.highstate saltenv=base || exit 1 + fi +} + +run_tests () { + [[ "$(get_grain qa_mode)" == "true" ]] && qa_mode=1 + if [[ ${qa_mode} && $(get_grain role) == hana_node ]]; then + # We need to export HOST with the new hostname set by Salt + # Otherwise, hwcct will error out. + export HOST=$(hostname) + # Execute qa state file + salt-call --local --file-root=/root/salt/ \ + --log-level=info \ + --log-file=/var/log/salt-qa.log \ + --log-file-level=info \ + --retcode-passthrough \ + $(salt_output_colored) \ + state.apply qa_mode || exit 1 + fi +} + +print_help () { + cat <<-EOF +Provision the machines. The provisioning has different steps, so they can be executed depending on +the selected flags. The actions are always executed in the same order (if multiple are selected), +from top to bottom in this help text. + +Supported Options (if no options are provided (excluding -l) all the steps will be executed): + -s Bootstrap salt installation and configuration. It will register to SCC channels if needed + -o Execute OS setup operations. Register to SCC, updated the packages, etc + -p Execute predeployment operations (update hosts and hostnames, install support packages, etc) + -d Execute deployment operations (install sap, ha, drbd, etc) + -q Execute qa tests + -l [LOG_FILE] Append the log output to the provided file + -h Show this help. +EOF +} + +argument_number=0 +while getopts ":hsopdql:" opt; do + argument_number=$((argument_number + 1)) + case $opt in + h) + print_help + exit 0 + ;; + s) + excute_bootstrap_salt=1 + ;; + o) + excute_os_setup=1 + ;; + p) + excute_predeploy=1 + ;; + d) + excute_deploy=1 + ;; + q) + excute_run_tests=1 + ;; + l) + log_to_file=$OPTARG + ;; + *) + echo "Invalid option -$OPTARG" >&2 + print_help + exit 1 + ;; + esac +done + +if [[ -n $log_to_file ]]; then + argument_number=$((argument_number - 1)) + exec 1>> $log_to_file +fi + +if [ $argument_number -eq 0 ]; then + bootstrap_salt + os_setup + predeploy + deploy + run_tests +else + [[ -n $excute_bootstrap_salt ]] && bootstrap_salt + [[ -n $excute_os_setup ]] && os_setup + [[ -n $excute_predeploy ]] && predeploy + [[ -n $excute_deploy ]] && deploy + [[ -n $excute_run_tests ]] && run_tests +fi +exit 0 diff --git a/salt/qa_mode/run_qa_mode.sh b/salt/qa_mode/run_qa_mode.sh deleted file mode 100644 index 26ed1cf4e..000000000 --- a/salt/qa_mode/run_qa_mode.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -xe - -# We need to export HOST with the new hostname set by Salt -# Otherwise, hwcct will error out. -export HOST=$(hostname) - -# Execute qa state file -salt-call --local --file-root=/root/salt/ \ - --log-level=info \ - --log-file=/tmp/salt-qa.log \ - --log-file-level=info \ - --retcode-passthrough \ - --force-color state.apply qa_mode || exit 1 diff --git a/salt/salt_provisioner_script.tpl b/salt/salt_provisioner_script.tpl deleted file mode 100644 index 97753756c..000000000 --- a/salt/salt_provisioner_script.tpl +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash -xe - -mv /tmp/salt /root || true - -# Check if qa_mode is enabled -grep -q 'qa_mode: true' /tmp/grains && QA_MODE=1 - -# Check if salt-call is installed -which salt-call > /dev/null 2>&1 && SALT=1 - -# Disable colors for QA_MODE -if [[ $${QA_MODE} ]]; then - SALT_DIR='/root/salt/' - SALT_CALL_FILES="$${SALT_DIR}deployment.sh $${SALT_DIR}formula.sh $${SALT_DIR}qa_mode/run_qa_mode.sh" - for file in $${SALT_CALL_FILES}; do - sed -i 's/force-color/no-color/g' $${file} - done -fi - -# Workaround for the cases where the cloud providers are coming without repositories -# https://www.suse.com/support/kb/doc/?id=7022311 -# Check if the deployment is executed in a cloud provider -grep -q 'provider: azure\|aws' /tmp/grains && CLOUD=1 -if [[ $${QA_MODE} != 1 && $${CLOUD} == 1 && "${regcode}" == "" ]]; then - zypper lr || sudo /usr/sbin/registercloudguest --force-new -fi - -# Install salt if QA_MODE is False and salt is not already installed -# It will register in SCC to install salt if registration code is provided -[[ "${regcode}" != "" ]] && REGISTER="-d -r ${regcode}" -if [[ $${QA_MODE} != 1 && $${SALT} != 1 ]]; then - sh /root/salt/install-salt-minion.sh $${REGISTER} -fi - -# Recheck if salt-call is installed. If it's not available stop execution -which salt-call || exit 1 - -# Move salt grains to salt folder -mkdir -p /etc/salt;mv /tmp/grains /etc/salt || true - -# Server configuration -sh /root/salt/deployment.sh || exit 1 - -# Salt formulas execution -if grep -q 'role:.*_node' /etc/salt/grains; then - sh /root/salt/formula.sh || exit 1 -fi - -# QA additional tasks -if [[ $${QA_MODE} ]] && grep -q 'role: hana_node' /etc/salt/grains; then - sh /root/salt/qa_mode/run_qa_mode.sh || exit 1 -fi