-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdocker-compose.yaml
388 lines (356 loc) · 10.8 KB
/
docker-compose.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
version: "3.9"
# First network configuration. Change this to the network interface you want to use primarily. (Better configuration for multiple Network interfaces needed).
networks:
# rmp:
# driver: macvlan
# driver_opts:
# parent: eno1 # robot network interface
# ipam:
# config:
# - subnet: 192.168.0.0/24
# gateway: 192.168.0.1
# ip_range: 192.168.0.200/25
# aux_addresses:
# net-address: 192.168.0.100 #? what is this for --> to exclude addresses from buing used.
# make new bridge network for ros
rmp:
driver: bridge
# ipam:
# config:
# - subnet:
# add this mountpoint to all services: - ./customize/entrypoint.sh:/entrypoint.sh
# Attention: child services will inherit settings from their parents. So if you set a network_mode: host in the base service, all child services will also use host networking. This is not always what you want. So be careful with this.
services:
# Base image containing dependencies.
base:
image: ghcr.io/bjoernellens1/ros2_rmp/rmp:base
build:
context: .
dockerfile: docker/Dockerfile
tags:
- ghcr.io/bjoernellens1/ros2_rmp/rmp:base
args:
ROS_DISTRO: humble
target: base
x-bake:
platforms:
- linux/arm64
- linux/amd64
# Interactive shell
stdin_open: true
tty: true
# Networking and IPC for ROS 2
network_mode: host
ipc: host
# Needed to display graphical applications
environment:
# Allows graphical programs in the container.
- DISPLAY=${DISPLAY}
- QT_X11_NO_MITSHM=1
- NVIDIA_DRIVER_CAPABILITIES=all
# set correct ros2 parameters: domain id and rmw implementation
- ROS_DOMAIN_ID=5
- RMW_IMPLEMENTATION=rmw_cyclonedds_cpp
- CYCLONEDDS_URI=file:///cyclonedds.xml
volumes:
# Allows graphical programs in the container.
- /tmp/.X11-unix:/tmp/.X11-unix:rw
- ${XAUTHORITY:-$HOME/.Xauthority}:/root/.Xauthority
- ./config/entrypoint.sh:/entrypoint.sh
- ./config/cyclonedds.xml:/cyclonedds.xml
restart: unless-stopped
# networks: # not using bridging anymore, instead try using all services on macvlan? let's see... shoudl word for now. Bridging does definitely not work because multicast is not supported here.
# rmp:
# #ipv4_address: 192.168.0.101 #actually don't need to set ips. they are set automatically by docker-compose. SHould be inherited by all child services.
# Overlay image containing the project specific source code.
overlay:
extends: base
image: ghcr.io/bjoernellens1/ros2_rmp/rmp:overlay
build:
context: .
dockerfile: docker/Dockerfile
tags:
- ghcr.io/bjoernellens1/ros2_rmp/rmp:overlay
target: overlay
x-bake:
platforms:
- linux/arm64
- linux/amd64
volumes:
- .:/repo
command: >
/bin/bash
# Additional dependencies for GUI applications
guis:
extends: overlay
image: ghcr.io/bjoernellens1/ros2_rmp/rmp:guis
build:
context: .
dockerfile: docker/Dockerfile
tags:
- ghcr.io/bjoernellens1/ros2_rmp/rmp:guis
target: guis
x-bake:
platforms:
- linux/arm64
- linux/amd64
command: >
/bin/bash
devices:
- /dev/dri:/dev/dri
# Robot State Publisher
rsp:
extends: overlay
command: >
ros2 launch cps_rmp220_support rsp.launch.py
# Controller
controller:
extends: base
command: >
ros2 run segwayrmp SmartCar --ros-args -r cmd_vel:=cmd_vel_out -p serial_full_name:=/dev/segway
devices:
- /dev/segway:/dev/ttyUSB0
#- /dev/ttyUSB0:/dev/ttyUSB0
privileged: true
# teleop
teleop:
extends: base
depends_on:
- controller
command: >
ros2 launch rmp220_teleop robot_joystick.launch.py
devices:
- /dev/input/js0:/dev/input/js0
#- /dev/input/by-id/usb-Logitech_Wireless_Gamepad_F710_56679674-joystick:/dev/input/by-id/usb-Logitech_Wireless_Gamepad_F710_56679674-joystick
privileged: true
# lidar
lidar:
extends: overlay
depends_on:
- lidar_filter
command: >
ros2 launch cps_rmp220_support robot_lidar.launch.py serial_port:=/dev/rplidarA1
devices:
- /dev/rplidarA1:/dev/rplidarA1 #udevrules needed for this to work:
# SUBSYSTEM=="tty", ATTRS{serial}=="0001", SYMLINK+="segway"
# SUBSYSTEM=="tty", ATTRS{serial}=="3453995662b3af4f81f4a69eba5f3f29", SYMLINK+="rplidarA1"
# Lidar filtering node.
lidar_filter:
extends: overlay
command: >
ros2 launch cps_rmp220_support robot_scan_filter.launch.py
# localization by ekf node
ekf:
extends: overlay
depends_on:
- controller
- rsp
command: >
ros2 launch cps_rmp220_support robot_localization.launch.py
# mapping
mapping:
extends: overlay
depends_on:
- ekf
- rsp
- lidar
command: >
ros2 launch cps_rmp220_support robot_mapping.launch.py
# slam-toolbox-localization
localization:
extends: overlay
depends_on:
- ekf
- rsp
- lidar
command: >
ros2 launch cps_rmp220_support robot_mapping_localization.launch.py map_file_name:=/repo/maps/map.yaml
# amcl_localization
amcl:
extends: overlay
depends_on:
- ekf
- rsp
- lidar
command: >
ros2 launch cps_rmp220_support robot_amcl.launch.py map:=/repo/maps/map.yaml
# navigation
navigation:
extends: overlay
depends_on:
- controller
- teleop
- rsp
- lidar
- ekf
- oakd
command: >
ros2 launch nav2_bringup bringup_launch.py slam:=False map:=/repo/maps/map_01-26-24.yaml use_sim_time:=False use_composition:=True params_file:=/repo/config/nav2_params.yaml
#maps/map_openlabday.yaml
# bash
bash:
extends: overlay
command: >
/bin/bash
# rviz2
rviz2:
#extends: guis
image: ghcr.io/bjoernellens1/ros2_rmp/rmp:guis
command: >
ros2 launch cps_rmp220_support rviz.launch.py
# Needed to display graphical applications
privileged: true # really necessary?
environment:
# Allows graphical programs in the container.
- DISPLAY=${DISPLAY}
- QT_X11_NO_MITSHM=1
- NVIDIA_DRIVER_CAPABILITIES=all
- ROS_DOMAIN_ID=5
- RMW_IMPLEMENTATION=rmw_cyclonedds_cpp
volumes:
# Allows graphical programs in the container.
- /tmp/.X11-unix:/tmp/.X11-unix:rw
- ${XAUTHORITY:-$HOME/.Xauthority}:/root/.Xauthority
# Foxglove Studio Bridge
foxglove_bridge:
extends: overlay
command: >
ros2 launch foxglove_bridge foxglove_bridge_launch.xml port:=8765
#tls:=true certfile:=/certs/foxgloveCert.crt keyfile:=/certs/foxgloveKey.key
volumes:
- /opt/cps/certs:/certs
# Foxglove Studio Webserver
foxglove:
image: ghcr.io/foxglove/studio:latest
stdin_open: true
tty: true
# Networking
network_mode: bridge
ports:
- 8080:8080
depends_on:
- foxglove_bridge
volumes:
- ./foxglove/default.json:/foxglove/default-layout.json
# USB Camera Stream
cam:
extends: overlay
command: >
ros2 run ros2_cam_openCV cam_node
devices:
- /dev/video0:/dev/video0
# ROS2 Frontier exploration
explorer:
extends: overlay
depends_on:
- controller
- teleop
- rsp
- lidar
- ekf
- navigation
command: >
ros2 launch cps_rmp220_support robot_exploration.launch.py
### Images for ROS1 Interactions
#ROS1 Bridge
ros1bridge:
image: ghcr.io/bjoernellens1/ros2_rmp/ros1bridge
command: >
ros2 run ros1_bridge dynamic_bridge --bridge-all-2to1-topics
build:
context: .
dockerfile: docker/Dockerfile
tags:
- ghcr.io/bjoernellens1/ros2_rmp/ros1bridge
args:
ROS_DISTRO: humble
target: bridge
x-bake:
platforms:
#- linux/arm64
- linux/amd64
# Networking and IPC for ROS 2
network_mode: host
ipc: host
environment:
- ROS_DOMAIN_ID=5
- RMW_IMPLEMENTATION=rmw_cyclonedds_cpp
- ROS_MASTER_URI=http://localhost:11311 # is configured to run roscore on the robot but could change to local ros1 machine here
#ROS1 roscore
roscore:
command: >
roscore
extends: ros1bridge
network_mode: host
ipc: host
environment:
- ROS_DOMAIN_ID=5
- RMW_IMPLEMENTATION=rmw_cyclonedds_cpp
- ROS_MASTER_URI=http://localhost:11311 # is configured to run roscore on the robot but could change to local ros1 machine here
## Configure on ROS1 Hosts
# seggy 192.168.0.100
# locally running ros-package: control1
# subscribing topic2
# publishing topic1
# robot2 192.168.x.x
# locally running ros-package: control2
# subscribing topic1
# publishing topic2
# As we need one ros-master to control the communication, we choose 192.168.1.1 as master. Therefore we execute locally on robot 1:
# export ROS_MASTER_URI=http://192.168.0.100:11311 # or localhost?
# export ROS_HOSTNAME=192.168.0.100
# export ROS_IP=192.168.0.100
# roscore
# In order to connect to the ROS-master, we execute locally on robot2:
# export ROS_MASTER_URI=http://192.168.0.100:11311
# export ROS_IP=192.168.0.?
# export ROS_HOSTNAME=192.168.1.?
# ROS2 oak-d-lite camera
oakd:
extends: overlay
command: >
ros2 launch depthai_examples stereo.launch.py
#devices:
#- /dev/oakd-lite:/dev/oakd-lite # need corresponding udevrules for this to work:
# SUBSYSTEM=="usb", ATTRS{idVendor}=="03e7", MODE="0666", SYMLINK+="oakd-lite"
#- /dev/:/dev/
device_cgroup_rules:
- 'c 189:* rmw'
volumes:
- /dev/bus/usb:/dev/bus/usb
# for testing the oak-d-lite camera -> works now with cgroup rules
depthai:
image: luxonis/depthai:latest
command: >
python3 /depthai/depthai_demo.py
stdin_open: true
tty: true
device_cgroup_rules:
- 'c 189:* rmw'
volumes:
- /dev/bus/usb:/dev/bus/usb
environment:
- DISPLAY=${DISPLAY}
- QT_X11_NO_MITSHM=1
- NVIDIA_DRIVER_CAPABILITIES=all
zerotier:
image: "zyclonite/zerotier:router"
container_name: zerotier-one
devices:
- /dev/net/tun
network_mode: host
volumes:
- '/var/lib/zerotier-one:/var/lib/zerotier-one'
cap_add:
- NET_ADMIN
- SYS_ADMIN
- NET_RAW
restart: unless-stopped
environment:
- TZ=Europe/Amsterdam
- PUID=1000
- PGID=1000
- ZEROTIER_ONE_LOCAL_PHYS=enp5s0 wlx00e04c5513fc # change for actual interfaces
- ZEROTIER_ONE_USE_IPTABLES_NFT=false
- ZEROTIER_ONE_GATEWAY_MODE=inbound # might change to both ways
#- ZEROTIER_ONE_NETWORK_IDS= # does not really do much?