Skip to content

Commit

Permalink
Merge pull request openshift#2404 from sdodson/logging
Browse files Browse the repository at this point in the history
Logging fixes
  • Loading branch information
sdodson authored Sep 29, 2016
2 parents eff53a3 + 12c41e1 commit 5d52061
Show file tree
Hide file tree
Showing 10 changed files with 201 additions and 52 deletions.
53 changes: 49 additions & 4 deletions inventory/byo/hosts.origin.example
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,6 @@ openshift_release=v1.2
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
#openshift_master_oauth_template=/path/to/login-template.html

# Configure loggingPublicURL in the master config for aggregate logging
# See: https://docs.openshift.org/latest/install_config/aggregate_logging.html
#openshift_master_logging_public_url=https://kibana.example.com

# Configure imagePolicyConfig in the master config
# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
Expand Down Expand Up @@ -373,6 +369,55 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# `/hawkular/metrics` path will break installation of metrics.
#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics

# Logging deployment
#
# Currently logging deployment is disabled by default, enable it by setting this
#openshift_hosted_logging_deploy=true
#
# Logging storage config
# Option A - NFS Host Group
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/logging"
#openshift_hosted_logging_storage_kind=nfs
#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
#openshift_hosted_logging_storage_nfs_directory=/exports
#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/logging"
#openshift_hosted_logging_storage_kind=nfs
#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
#openshift_hosted_logging_storage_host=nfs.example.com
#openshift_hosted_logging_storage_nfs_directory=/exports
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
#openshift_hosted_logging_storage_kind=dynamic
#
# Option D - none -- Logging will use emptydir volumes which are destroyed when
# pods are deleted
#
# Other Logging Options -- Common items you may wish to reconfigure, for the complete
# list of options please see roles/openshift_hosted_logging/README.md
#
# Configure loggingPublicURL in the master config for aggregate logging, defaults
# to https://kibana.{{ openshift_master_default_subdomain }}
#openshift_master_logging_public_url=https://kibana.example.com
# Configure the number of elastic search nodes, unless you're using dynamic provisioning
# this value must be 1
#openshift_hosted_logging_elasticsearch_cluster_size=1
#openshift_hosted_logging_hostname=logging.apps.example.com
# Configure the prefix and version for the deployer image
#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
#openshift_hosted_logging_deployer_version=3.3.0

# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'

Expand Down
52 changes: 48 additions & 4 deletions inventory/byo/hosts.ose.example
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,6 @@ openshift_release=v3.2
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
#openshift_master_oauth_template=/path/to/login-template.html

# Configure loggingPublicURL in the master config for aggregate logging
# See: https://docs.openshift.com/enterprise/latest/install_config/aggregate_logging.html
#openshift_master_logging_public_url=https://kibana.example.com
#
# Configure imagePolicyConfig in the master config
# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
Expand Down Expand Up @@ -372,6 +368,54 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# `/hawkular/metrics` path will break installation of metrics.
#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics

# Logging deployment
#
# Currently logging deployment is disabled by default, enable it by setting this
#openshift_hosted_logging_deploy=true
#
# Logging storage config
# Option A - NFS Host Group
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/logging"
#openshift_hosted_logging_storage_kind=nfs
#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
#openshift_hosted_logging_storage_nfs_directory=/exports
#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/logging"
#openshift_hosted_logging_storage_kind=nfs
#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
#openshift_hosted_logging_storage_host=nfs.example.com
#openshift_hosted_logging_storage_nfs_directory=/exports
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
#openshift_hosted_logging_storage_kind=dynamic
#
# Option D - none -- Logging will use emptydir volumes which are destroyed when
# pods are deleted
#
# Other Logging Options -- Common items you may wish to reconfigure, for the complete
# list of options please see roles/openshift_hosted_logging/README.md
#
# Configure loggingPublicURL in the master config for aggregate logging, defaults
# to https://kibana.{{ openshift_master_default_subdomain }}
#openshift_master_logging_public_url=https://kibana.example.com
# Configure the number of elastic search nodes, unless you're using dynamic provisioning
# this value must be 1
#openshift_hosted_logging_elasticsearch_cluster_size=1
#openshift_hosted_logging_hostname=logging.apps.example.com
# Configure the prefix and version for the deployer image
#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
#openshift_hosted_logging_deployer_version=3.3.0

# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
Expand Down
41 changes: 40 additions & 1 deletion playbooks/common/openshift-cluster/openshift_hosted.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,12 @@
openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
- set_fact:
logging_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift.master.default_subdomain | default('router.default.svc.cluster.local', true))) }}"
logging_ops_hostname: "{{ openshift_hosted_logging_ops_hostname | default('kibana-ops.' ~ (openshift.master.default_subdomain | default('router.default.svc.cluster.local', true))) }}"
logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default(openshift.master.public_api_url) }}"
logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
roles:
- role: openshift_cli
- role: openshift_hosted_facts
Expand All @@ -44,9 +50,42 @@
- role: openshift_hosted
- role: openshift_metrics
when: openshift.hosted.metrics.deploy | bool
- role: openshift_hosted_logging
when: openshift.hosted.logging.deploy | bool
openshift_hosted_logging_hostname: "{{ logging_hostname }}"
openshift_hosted_logging_ops_hostname: "{{ logging_ops_hostname }}"
openshift_hosted_logging_master_public_url: "{{ logging_master_public_url }}"
openshift_hosted_logging_elasticsearch_cluster_size: "{{ logging_elasticsearch_cluster_size }}"
openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else 'false' }}"
openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else '' }}"
openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift.hosted.logging.storage_kind | default(none) is not none else '' }}"
openshift_hosted_logging_elasticsearch_ops_cluster_size: "{{ logging_elasticsearch_ops_cluster_size }}"
openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else 'false' }}"
openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift.hosted.logging.storage_kind | default(none) == 'dynamic' else '' }}"
openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es' if openshift.hosted.logging.storage_kind | default(none) is not none else '' }}"

- role: cockpit-ui
when: openshift.common.deployment_subtype == 'registry'

- name: Configure all masters for logging
serial: 1
handlers:
- include: ../../../roles/openshift_master/handlers/main.yml
static: yes
hosts: oo_masters
tasks:
- openshift_facts:
role: master
local_facts:
logging_public_url: "https://{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain) }}"
when: openshift.hosted.logging.deploy | default(openshift.common.version_gte_3_3_or_1_3)
- modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: assetConfig.loggingPublicURL
yaml_value: "{{ openshift.master.logging_public_url }}"
notify: restart master
when: openshift.hosted.logging.deploy | default(openshift.common.version_gte_3_3_or_1_3)

- name: Configure CA certificate for secure registry
hosts: oo_nodes_to_config
tags:
Expand Down Expand Up @@ -87,7 +126,7 @@
{{ openshift.common.client_binary }} get service docker-registry
--template='{{ '{{' }} .spec.clusterIP {{ '}}' }}'
--config={{ openshift_hosted_kubeconfig }}
-n default
-n default
register: docker_registry_service_ip
when: openshift.common.deployment_subtype == 'registry'
changed_when: false
Expand Down
23 changes: 19 additions & 4 deletions roles/openshift_facts/library/openshift_facts.py
Original file line number Diff line number Diff line change
Expand Up @@ -1821,10 +1821,25 @@ def get_defaults(self, roles, deployment_type, deployment_subtype):
),
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'),
openstack=dict(
filesystem='ext4',
volumeID='123'),
options='*(rw,root_squash)'
),
host=None,
access_modes=['ReadWriteOnce'],
create_pv=True,
create_pvc=False
)
),
logging=dict(
storage=dict(
kind=None,
volume=dict(
name='logging-es',
size='10Gi'
),
nfs=dict(
directory='/exports',
options='*(rw,root_squash)'
),
host=None,
access_modes=['ReadWriteOnce'],
create_pv=True,
Expand Down
3 changes: 2 additions & 1 deletion roles/openshift_hosted_logging/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@
- openshift_hosted_logging_elasticsearch_pvc_dynamic: Set to `true` to have created PersistentVolumeClaims annotated such that their backing storage can be dynamically provisioned (if that is available for your cluster).
- openshift_hosted_logging_elasticsearch_storage_group: Number of a supplemental group ID for access to Elasticsearch storage volumes; backing volumes should allow access by this group ID (defaults to 65534).
- openshift_hosted_logging_elasticsearch_nodeselector: Specify the nodeSelector that Elasticsearch should be use (label=value)
- openshift_hosted_logging_fluentd_nodeselector: The nodeSelector to use for the Fluentd DaemonSet. Defaults to "logging-infra-fluentd=true".
- openshift_hosted_logging_fluentd_nodeselector: The nodeSelector used to determine which nodes to apply the `openshift_hosted_logging_fluentd_nodeselector_label` label to.
- openshift_hosted_logging_fluentd_nodeselector_label: The label applied to nodes included in the Fluentd DaemonSet. Defaults to "logging-infra-fluentd=true".
- openshift_hosted_logging_kibana_nodeselector: Specify the nodeSelector that Kibana should be use (label=value)
- openshift_hosted_logging_curator_nodeselector: Specify the nodeSelector that Curator should be use (label=value)
- openshift_hosted_logging_enable_ops_cluster: If "true", configure a second ES cluster and Kibana for ops logs.
Expand Down
2 changes: 2 additions & 0 deletions roles/openshift_hosted_logging/defaults/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
---
examples_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples"
18 changes: 9 additions & 9 deletions roles/openshift_hosted_logging/tasks/deploy_logging.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@

- name: "Create templates for logging accounts and the deployer"
command: >
{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f /usr/share/openshift/examples/infrastructure-templates/enterprise/logging-deployer.yaml
{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ examples_base }}/infrastructure-templates/{{ 'enterprise' if openshift_deployment_type == 'openshift-enterprise' else 'origin' }}/logging-deployer.yaml
register: template_output
failed_when: "template_output.rc == 1 and 'exists' not in template_output.stderr"

Expand Down Expand Up @@ -82,8 +82,8 @@
shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed"
register: result
until: result.rc == 0
retries: 15
delay: 10
retries: 20
delay: 15

- name: "Process imagestream template"
shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}"
Expand All @@ -102,7 +102,7 @@
until: result.rc == 0
failed_when: result.rc == 1 and 'not found' not in result.stderr
retries: 20
delay: 10
delay: 5

- name: "Wait for component pods to be running"
shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
Expand All @@ -114,7 +114,7 @@
until: result.rc == 0
failed_when: result.rc == 1 or 'Error' in result.stderr
retries: 20
delay: 10
delay: 15

- name: "Wait for ops component pods to be running"
shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
Expand All @@ -127,26 +127,26 @@
until: result.rc == 0
failed_when: result.rc == 1 or 'Error' in result.stderr
retries: 20
delay: 10
delay: 15

- name: "Wait for fluentd DaemonSet to exist"
shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd"
register: result
until: result.rc == 0
failed_when: result.rc == 1 or 'Error' in result.stderr
retries: 20
delay: 10
delay: 5

- name: "Deploy fluentd by labeling the node"
shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{ openshift_hostname }} {{ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else 'logging-infra-fluentd=true' }}"
shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node --overwrite=true {{ '-l openshift_hosted_logging_fluentd_nodeselector' if openshift_hosted_logging_fluentd_nodeselector is defined else '--all' }} {{ openshift_hosted_logging_fluentd_nodeselector_label if openshift_hosted_logging_fluentd_nodeselector_label is defined else 'logging-infra-fluentd=true' }}"

- name: "Wait for fluentd to be running"
shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running"
register: result
until: result.rc == 0
failed_when: result.rc == 1 or 'Error' in result.stderr
retries: 20
delay: 10
delay: 15

- debug:
msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually"
Expand Down
Loading

0 comments on commit 5d52061

Please sign in to comment.