Skip to content

Commit b33ecc4

Browse files
committed
Add service mesh YAML that includes SA
Signed-off-by: Nathan Fisher <[email protected]>
1 parent ad14a4f commit b33ecc4

File tree

1 file changed

+366
-0
lines changed

1 file changed

+366
-0
lines changed
Lines changed: 366 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,366 @@
1+
################################################################################
2+
# Linkerd Service Mesh
3+
#
4+
# This is a basic Kubernetes config file to deploy a service mesh of Linkerd
5+
# instances onto your Kubernetes cluster that is capable of handling HTTP,
6+
# HTTP/2 and gRPC calls with some reasonable defaults.
7+
#
8+
# To configure your applications to use Linkerd for HTTP traffic you can set the
9+
# `http_proxy` environment variable to `$(NODE_NAME):4140` where `NODE_NAME` is
10+
# the name of node on which the application instance is running. The
11+
# `NODE_NAME` environment variable can be set with the downward API.
12+
#
13+
# If your application does not support the `http_proxy` environment variable or
14+
# if you want to configure your application to use Linkerd for HTTP/2 or gRPC
15+
# traffic, you must configure your application to send traffic directly to
16+
# Linkerd:
17+
#
18+
# * $(NODE_NAME):4140 for HTTP
19+
# * $(NODE_NAME):4240 for HTTP/2
20+
# * $(NODE_NAME):4340 for gRPC
21+
#
22+
# If you are sending HTTP or HTTP/2 traffic directly to Linkerd, you must set
23+
# the Host/Authority header to `<service>` or `<service>.<namespace>` where
24+
# `<service>` and `<namespace>` are the names of the service and namespace
25+
# that you want to proxy to. If unspecified, `<namespace>` defaults to
26+
# `default`.
27+
#
28+
# If your application receives HTTP, HTTP/2, and/or gRPC traffic it must have a
29+
# Kubernetes Service object with ports named `http`, `h2`, and/or `grpc`
30+
# respectively.
31+
#
32+
# You can deploy this to your Kubernetes cluster by running:
33+
# kubectl create ns linkerd
34+
# kubectl apply -n linkerd -f servicemesh.yml
35+
#
36+
# There are sections of this config that can be uncommented to enable:
37+
# * CNI compatibility
38+
# * Automatic retries
39+
# * Zipkin tracing
40+
################################################################################
41+
---
42+
apiVersion: v1
43+
kind: ConfigMap
44+
metadata:
45+
name: l5d-config
46+
namespace: linkerd
47+
data:
48+
config.yaml: |-
49+
admin:
50+
ip: 0.0.0.0
51+
port: 9990
52+
53+
# Namers provide Linkerd with service discovery information. To use a
54+
# namer, you reference it in the dtab by its prefix. We define 4 namers:
55+
# * /io.l5d.k8s gets the address of the target app
56+
# * /io.l5d.k8s.http gets the address of the http-incoming Linkerd router on the target app's node
57+
# * /io.l5d.k8s.h2 gets the address of the h2-incoming Linkerd router on the target app's node
58+
# * /io.l5d.k8s.grpc gets the address of the grpc-incoming Linkerd router on the target app's node
59+
namers:
60+
- kind: io.l5d.k8s
61+
- kind: io.l5d.k8s
62+
prefix: /io.l5d.k8s.http
63+
transformers:
64+
# The daemonset transformer replaces the address of the target app with
65+
# the address of the http-incoming router of the Linkerd daemonset pod
66+
# on the target app's node.
67+
- kind: io.l5d.k8s.daemonset
68+
namespace: linkerd
69+
port: http-incoming
70+
service: l5d
71+
# hostNetwork: true # Uncomment if using host networking (eg for CNI)
72+
- kind: io.l5d.k8s
73+
prefix: /io.l5d.k8s.h2
74+
transformers:
75+
# The daemonset transformer replaces the address of the target app with
76+
# the address of the h2-incoming router of the Linkerd daemonset pod
77+
# on the target app's node.
78+
- kind: io.l5d.k8s.daemonset
79+
namespace: linkerd
80+
port: h2-incoming
81+
service: l5d
82+
# hostNetwork: true # Uncomment if using host networking (eg for CNI)
83+
- kind: io.l5d.k8s
84+
prefix: /io.l5d.k8s.grpc
85+
transformers:
86+
# The daemonset transformer replaces the address of the target app with
87+
# the address of the grpc-incoming router of the Linkerd daemonset pod
88+
# on the target app's node.
89+
- kind: io.l5d.k8s.daemonset
90+
namespace: linkerd
91+
port: grpc-incoming
92+
service: l5d
93+
# hostNetwork: true # Uncomment if using host networking (eg for CNI)
94+
- kind: io.l5d.rewrite
95+
prefix: /portNsSvcToK8s
96+
pattern: "/{port}/{ns}/{svc}"
97+
name: "/k8s/{ns}/{port}/{svc}"
98+
99+
# Telemeters export metrics and tracing data about Linkerd, the services it
100+
# connects to, and the requests it processes.
101+
telemetry:
102+
- kind: io.l5d.prometheus # Expose Prometheus style metrics on :9990/admin/metrics/prometheus
103+
- kind: io.l5d.recentRequests
104+
sampleRate: 0.25 # Tune this sample rate before going to production
105+
# - kind: io.l5d.zipkin # Uncomment to enable exporting of zipkin traces
106+
# host: zipkin-collector.default.svc.cluster.local # Zipkin collector address
107+
# port: 9410
108+
# sampleRate: 1.0 # Set to a lower sample rate depending on your traffic volume
109+
110+
# Usage is used for anonymized usage reporting. You can set the orgId to
111+
# identify your organization or set `enabled: false` to disable entirely.
112+
usage:
113+
orgId: linkerd-examples-servicemesh
114+
115+
# Routers define how Linkerd actually handles traffic. Each router listens
116+
# for requests, applies routing rules to those requests, and proxies them
117+
# to the appropriate destinations. Each router is protocol specific.
118+
# For each protocol (HTTP, HTTP/2, gRPC) we define an outgoing router and
119+
# an incoming router. The application is expected to send traffic to the
120+
# outgoing router which proxies it to the incoming router of the Linkerd
121+
# running on the target service's node. The incoming router then proxies
122+
# the request to the target application itself. We also define HTTP and
123+
# HTTP/2 ingress routers which act as Ingress Controllers and route based
124+
# on the Ingress resource.
125+
routers:
126+
- label: http-outgoing
127+
protocol: http
128+
servers:
129+
- port: 4140
130+
ip: 0.0.0.0
131+
# This dtab looks up service names in k8s and falls back to DNS if they're
132+
# not found (e.g. for external services). It accepts names of the form
133+
# "service" and "service.namespace", defaulting the namespace to
134+
# "default". For DNS lookups, it uses port 80 if unspecified. Note that
135+
# dtab rules are read bottom to top. To see this in action, on the Linkerd
136+
# administrative dashboard, click on the "dtab" tab, select "http-outgoing"
137+
# from the dropdown, and enter a service name like "a.b". (Or click on the
138+
# "requests" tab to see recent traffic through the system and how it was
139+
# resolved.)
140+
dtab: |
141+
/ph => /$/io.buoyant.rinet ; # /ph/80/google.com -> /$/io.buoyant.rinet/80/google.com
142+
/svc => /ph/80 ; # /svc/google.com -> /ph/80/google.com
143+
/svc => /$/io.buoyant.porthostPfx/ph ; # /svc/google.com:80 -> /ph/80/google.com
144+
/k8s => /#/io.l5d.k8s.http ; # /k8s/default/http/foo -> /#/io.l5d.k8s.http/default/http/foo
145+
/portNsSvc => /#/portNsSvcToK8s ; # /portNsSvc/http/default/foo -> /k8s/default/http/foo
146+
/host => /portNsSvc/http/default ; # /host/foo -> /portNsSvc/http/default/foo
147+
/host => /portNsSvc/http ; # /host/default/foo -> /portNsSvc/http/default/foo
148+
/svc => /$/io.buoyant.http.domainToPathPfx/host ; # /svc/foo.default -> /host/default/foo
149+
client:
150+
kind: io.l5d.static
151+
configs:
152+
# Use HTTPS if sending to port 443
153+
- prefix: "/$/io.buoyant.rinet/443/{service}"
154+
tls:
155+
commonName: "{service}"
156+
157+
- label: http-incoming
158+
protocol: http
159+
servers:
160+
- port: 4141
161+
ip: 0.0.0.0
162+
interpreter:
163+
kind: default
164+
transformers:
165+
- kind: io.l5d.k8s.localnode
166+
# hostNetwork: true # Uncomment if using host networking (eg for CNI)
167+
dtab: |
168+
/k8s => /#/io.l5d.k8s ; # /k8s/default/http/foo -> /#/io.l5d.k8s/default/http/foo
169+
/portNsSvc => /#/portNsSvcToK8s ; # /portNsSvc/http/default/foo -> /k8s/default/http/foo
170+
/host => /portNsSvc/http/default ; # /host/foo -> /portNsSvc/http/default/foo
171+
/host => /portNsSvc/http ; # /host/default/foo -> /portNsSvc/http/default/foo
172+
/svc => /$/io.buoyant.http.domainToPathPfx/host ; # /svc/foo.default -> /host/default/foo
173+
174+
- label: h2-outgoing
175+
protocol: h2
176+
servers:
177+
- port: 4240
178+
ip: 0.0.0.0
179+
dtab: |
180+
/ph => /$/io.buoyant.rinet ; # /ph/80/google.com -> /$/io.buoyant.rinet/80/google.com
181+
/svc => /ph/80 ; # /svc/google.com -> /ph/80/google.com
182+
/svc => /$/io.buoyant.porthostPfx/ph ; # /svc/google.com:80 -> /ph/80/google.com
183+
/k8s => /#/io.l5d.k8s.h2 ; # /k8s/default/h2/foo -> /#/io.l5d.k8s.h2/default/h2/foo
184+
/portNsSvc => /#/portNsSvcToK8s ; # /portNsSvc/h2/default/foo -> /k8s/default/h2/foo
185+
/host => /portNsSvc/h2/default ; # /host/foo -> /portNsSvc/h2/default/foo
186+
/host => /portNsSvc/h2 ; # /host/default/foo -> /portNsSvc/h2/default/foo
187+
/svc => /$/io.buoyant.http.domainToPathPfx/host ; # /svc/foo.default -> /host/default/foo
188+
client:
189+
kind: io.l5d.static
190+
configs:
191+
# Use HTTPS if sending to port 443
192+
- prefix: "/$/io.buoyant.rinet/443/{service}"
193+
tls:
194+
commonName: "{service}"
195+
196+
- label: h2-incoming
197+
protocol: h2
198+
servers:
199+
- port: 4241
200+
ip: 0.0.0.0
201+
interpreter:
202+
kind: default
203+
transformers:
204+
- kind: io.l5d.k8s.localnode
205+
# hostNetwork: true # Uncomment if using host networking (eg for CNI)
206+
dtab: |
207+
/k8s => /#/io.l5d.k8s ; # /k8s/default/h2/foo -> /#/io.l5d.k8s/default/h2/foo
208+
/portNsSvc => /#/portNsSvcToK8s ; # /portNsSvc/h2/default/foo -> /k8s/default/h2/foo
209+
/host => /portNsSvc/h2/default ; # /host/foo -> /portNsSvc/h2/default/foo
210+
/host => /portNsSvc/h2 ; # /host/default/foo -> /portNsSvc/h2/default/foo
211+
/svc => /$/io.buoyant.http.domainToPathPfx/host ; # /svc/foo.default -> /host/default/foo
212+
213+
- label: grpc-outgoing
214+
protocol: h2
215+
servers:
216+
- port: 4340
217+
ip: 0.0.0.0
218+
identifier:
219+
kind: io.l5d.header.path
220+
segments: 1
221+
dtab: |
222+
/hp => /$/inet ; # /hp/linkerd.io/8888 -> /$/inet/linkerd.io/8888
223+
/svc => /$/io.buoyant.hostportPfx/hp ; # /svc/linkerd.io:8888 -> /hp/linkerd.io/8888
224+
/srv => /#/io.l5d.k8s.grpc/default/grpc; # /srv/service/package -> /#/io.l5d.k8s.grpc/default/grpc/service/package
225+
/svc => /$/io.buoyant.http.domainToPathPfx/srv ; # /svc/package.service -> /srv/service/package
226+
client:
227+
kind: io.l5d.static
228+
configs:
229+
# Always use TLS when sending to external grpc servers
230+
- prefix: "/$/inet/{service}"
231+
tls:
232+
commonName: "{service}"
233+
234+
- label: grpc-incoming
235+
protocol: h2
236+
servers:
237+
- port: 4341
238+
ip: 0.0.0.0
239+
identifier:
240+
kind: io.l5d.header.path
241+
segments: 1
242+
interpreter:
243+
kind: default
244+
transformers:
245+
- kind: io.l5d.k8s.localnode
246+
# hostNetwork: true # Uncomment if using host networking (eg for CNI)
247+
dtab: |
248+
/srv => /#/io.l5d.k8s/default/grpc ; # /srv/service/package -> /#/io.l5d.k8s/default/grpc/service/package
249+
/svc => /$/io.buoyant.http.domainToPathPfx/srv ; # /svc/package.service -> /srv/service/package
250+
251+
# HTTP Ingress Controller listening on port 80
252+
- protocol: http
253+
label: http-ingress
254+
servers:
255+
- port: 80
256+
ip: 0.0.0.0
257+
clearContext: true
258+
identifier:
259+
kind: io.l5d.ingress
260+
dtab: /svc => /#/io.l5d.k8s
261+
262+
# HTTP/2 Ingress Controller listening on port 8080
263+
- protocol: h2
264+
label: h2-ingress
265+
servers:
266+
- port: 8080
267+
ip: 0.0.0.0
268+
clearContext: true
269+
identifier:
270+
kind: io.l5d.ingress
271+
dtab: /svc => /#/io.l5d.k8s
272+
273+
---
274+
apiVersion: extensions/v1beta1
275+
kind: DaemonSet
276+
metadata:
277+
labels:
278+
app: l5d
279+
name: l5d
280+
namespace: linkerd
281+
spec:
282+
template:
283+
metadata:
284+
labels:
285+
app: l5d
286+
spec:
287+
# hostNetwork: true # Uncomment to use host networking (eg for CNI)
288+
serviceAccountName: linkerd-endpoints-reader
289+
volumes:
290+
- name: l5d-config
291+
configMap:
292+
name: "l5d-config"
293+
containers:
294+
- name: l5d
295+
image: buoyantio/linkerd:1.7.0
296+
env:
297+
- name: POD_IP
298+
valueFrom:
299+
fieldRef:
300+
fieldPath: status.podIP
301+
- name: NODE_NAME
302+
valueFrom:
303+
fieldRef:
304+
fieldPath: spec.nodeName
305+
args:
306+
- /io.buoyant/linkerd/config/config.yaml
307+
ports:
308+
- name: http-outgoing
309+
containerPort: 4140
310+
hostPort: 4140
311+
- name: http-incoming
312+
containerPort: 4141
313+
- name: h2-outgoing
314+
containerPort: 4240
315+
hostPort: 4240
316+
- name: h2-incoming
317+
containerPort: 4241
318+
- name: grpc-outgoing
319+
containerPort: 4340
320+
hostPort: 4340
321+
- name: grpc-incoming
322+
containerPort: 4341
323+
- name: http-ingress
324+
containerPort: 80
325+
- name: h2-ingress
326+
containerPort: 8080
327+
volumeMounts:
328+
- name: "l5d-config"
329+
mountPath: "/io.buoyant/linkerd/config"
330+
readOnly: true
331+
332+
# Run `kubectl proxy` as a sidecar to give us authenticated access to the
333+
# Kubernetes API.
334+
- name: kubectl
335+
image: buoyantio/kubectl:v1.14.3
336+
args:
337+
- "proxy"
338+
- "-p"
339+
- "8001"
340+
---
341+
apiVersion: v1
342+
kind: Service
343+
metadata:
344+
name: l5d
345+
namespace: linkerd
346+
spec:
347+
selector:
348+
app: l5d
349+
type: LoadBalancer
350+
ports:
351+
- name: http-outgoing
352+
port: 4140
353+
- name: http-incoming
354+
port: 4141
355+
- name: h2-outgoing
356+
port: 4240
357+
- name: h2-incoming
358+
port: 4241
359+
- name: grpc-outgoing
360+
port: 4340
361+
- name: grpc-incoming
362+
port: 4341
363+
- name: http-ingress
364+
port: 80
365+
- name: h2-ingress
366+
port: 8080

0 commit comments

Comments
 (0)