-
Notifications
You must be signed in to change notification settings - Fork 103
/
Copy path__main__.py
142 lines (120 loc) · 4.33 KB
/
__main__.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import os
import pulumi
import pulumi_kubernetes as k8s
from pulumi import Output
from pulumi_kubernetes.helm.v3 import Release, ReleaseArgs, RepositoryOptsArgs
from kic_util import pulumi_config
config = pulumi.Config('logstore')
chart_name = config.get('chart_name')
if not chart_name:
chart_name = 'elasticsearch'
chart_version = config.get('chart_version')
if not chart_version:
chart_version = '19.4.4'
helm_repo_name = config.get('helm_repo_name')
if not helm_repo_name:
helm_repo_name = 'bitnami'
helm_repo_url = config.get('helm_repo_url')
if not helm_repo_url:
helm_repo_url = 'https://charts.bitnami.com/bitnami'
#
# Allow the user to set timeout per helm chart; otherwise
# we default to 5 minutes.
#
helm_timeout = config.get_int('helm_timeout')
if not helm_timeout:
helm_timeout = 300
#
# Define the default replicas for the Elastic components. If not set we default to one copy of each - master, ingest,
# data, and coordinating. This is ideal for smaller installations - K3S, Microk8s, minikube, etc. However, it may fall
# over when running with a high volume of logs.
#
master_replicas = config.get('master_replicas')
if not master_replicas:
master_replicas = 1
ingest_replicas = config.get('ingest_replicas')
if not ingest_replicas:
ingest_replicas = 1
data_replicas = config.get('data_replicas')
if not data_replicas:
data_replicas = 1
coordinating_replicas = config.get('coordinating_replicas')
if not coordinating_replicas:
coordinating_replicas = 1
def project_name_from_project_dir(dirname: str):
script_dir = os.path.dirname(os.path.abspath(__file__))
project_path = os.path.join(script_dir, '..', '..', '..', 'python', 'infrastructure', dirname)
return pulumi_config.get_pulumi_project_name(project_path)
stack_name = pulumi.get_stack()
project_name = pulumi.get_project()
pulumi_user = pulumi_config.get_pulumi_user()
k8_project_name = project_name_from_project_dir('kubeconfig')
k8_stack_ref_id = f"{pulumi_user}/{k8_project_name}/{stack_name}"
k8_stack_ref = pulumi.StackReference(k8_stack_ref_id)
kubeconfig = k8_stack_ref.require_output('kubeconfig').apply(lambda c: str(c))
k8s_provider = k8s.Provider(resource_name=f'ingress-controller',
kubeconfig=kubeconfig)
ns = k8s.core.v1.Namespace(resource_name='logstore',
metadata={'name': 'logstore'},
opts=pulumi.ResourceOptions(provider=k8s_provider))
elastic_release_args = ReleaseArgs(
chart=chart_name,
repository_opts=RepositoryOptsArgs(
repo=helm_repo_url
),
version=chart_version,
namespace=ns.metadata.name,
# Values from Chart's parameters specified hierarchically,
values={
"master": {
"replicas": master_replicas,
"resources": {
"requests": {},
"limits": {}
},
},
"coordinating": {
"replicas": coordinating_replicas
},
"data": {
"replicas": data_replicas,
"resources": {
"requests": {},
"limits": {}
},
},
"global": {
"kibanaEnabled": True
},
"ingest": {
"enabled": True,
"replicas": ingest_replicas,
"resources": {
"requests": {},
"limits": {}
},
}
},
# User configurable timeout
timeout=helm_timeout,
# By default, Release resource will wait till all created resources
# are available. Set this to true to skip waiting on resources being
# available.
skip_await=False,
# If we fail, clean up
cleanup_on_fail=True,
# Provide a name for our release
name="elastic",
# Lint the chart before installing
lint=True,
# Force update if required
force_update=True)
elastic_release = Release("elastic", args=elastic_release_args)
elastic_rname = elastic_release.status.name
elastic_fqdn = Output.concat(elastic_rname, "-elasticsearch.logstore.svc.cluster.local")
kibana_fqdn = Output.concat(elastic_rname, "-kibana.logstore.svc.cluster.local")
pulumi.export('elastic_hostname', pulumi.Output.unsecret(elastic_fqdn))
pulumi.export('kibana_hostname', pulumi.Output.unsecret(kibana_fqdn))
# Print out our status
estatus = elastic_release.status
pulumi.export("logstat_status", estatus)