diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index a2e0796b..00000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,20 +0,0 @@ -version: 2 -jobs: - build: - docker: - - image: quay.io/dhtech/puppet-ci:latest - - working_directory: ~/repo - - steps: - - checkout - - - run: - name: puppet lint - command: | - /root/.rbenv/shims/puppet-lint --no-puppet_url_without_modules-check --fail-on-warnings . - - - run: - name: erb check - command: | - /root/.rbenv/shims/rails-erb-lint check diff --git a/.github/workflows/puppet-lint.yaml b/.github/workflows/puppet-lint.yaml new file mode 100644 index 00000000..2c479037 --- /dev/null +++ b/.github/workflows/puppet-lint.yaml @@ -0,0 +1,24 @@ +name: puppet linting +on: + pull_request: + branches: + - master + - main + - production + paths-ignore: + - "**.md" + +jobs: + puppet-lint: + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + + + - name: puppet-lint + uses: scottbrenner/puppet-lint-action@v1.0.4 + with: + args: ./ + diff --git a/.puppet-lint.rc b/.puppet-lint.rc new file mode 100644 index 00000000..60a11cfc --- /dev/null +++ b/.puppet-lint.rc @@ -0,0 +1,6 @@ +--fail-on-warnings +--relative +--no-class_inherits_from_params_class-check +--no-documentation-check +--no-puppet_url_without_modules-check +--no-legacy_facts diff --git a/modules/akvorado.py b/modules/akvorado.py new file mode 100644 index 00000000..c0b89096 --- /dev/null +++ b/modules/akvorado.py @@ -0,0 +1,138 @@ +# vim: ts=4: sts=4: sw=4: expandtab +# Copyright 2024 dhtech +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file +import lib +import os +import sqlite3 +import yaml + +DB_FILE = '/etc/ipplan.db' + +def get_sflow_clients(): + if os.path.isfile(DB_FILE): + try: + conn = sqlite3.connect(DB_FILE) + db = conn.cursor() + except sqlite3.Error as e: + print("An error occurred: {}".format(e.args[0])) + exit(2) + else: + print("No database file found: {}".format(DB_FILE)) + exit(3) + db.execute( + "SELECT h.name AS hostname, h.ipv4_addr_txt AS ipv4_addr ,h.ipv6_addr_txt AS ipv6_addr, o2.value AS layer " + "FROM host h " + "INNER JOIN option o1 ON h.node_id = o1.node_id " + "INNER JOIN option o2 ON h.node_id = o2.node_id " + "WHERE o1.name='pkg' AND o1.value='sflowclient' " + "AND o2.name='layer';" + ) + res = db.fetchall() + if not res: + return None + + column_names = [description[0] for description in db.description] + conn.close() + rows_dict = [dict(zip(column_names, row)) for row in res] + + return rows_dict + +def get_snmpv2_providers(): + providers = [] + clients = get_sflow_clients() + if not clients: + return providers + current_event = lib.get_current_event() + for client in clients: + key = current_event+'-mgmt/snmpv2:'+client['layer'] + secrets = lib.read_secret(key) + if not secrets: + return providers + if "community" in secrets: + provider = { + "ipv4": client["ipv4_addr"], + "community": secrets["community"], + } + providers.append(provider) + return providers + +def get_snmpv3_providers(): + providers = [] + clients = get_sflow_clients() + if not clients: + return providers + current_event = lib.get_current_event() + for client in clients: + key = current_event+'-mgmt/snmpv3:'+client['layer'] + secrets = lib.read_secret(key) + if not secrets: + return providers + if "user" in secrets: + provider = { + "ipv4": client["ipv4_addr"], + "authentication-passphrase": secrets["auth"], + "authentication-protocol": secrets["authtype"].replace(" ","").upper(), + "privacy-passphrase": secrets["priv"], + "privacy-protocol": secrets["privtype"].replace(" ","").replace("128","").upper(), + "user": secrets["user"], + } + providers.append(provider) + return providers + +def get_prefixes(ipversion): + if os.path.isfile(DB_FILE): + try: + conn = sqlite3.connect(DB_FILE) + db = conn.cursor() + except sqlite3.Error as e: + print("An error occurred: {}".format(e.args[0])) + exit(2) + else: + print("No database file found: {}".format(DB_FILE)) + exit(3) + + if ipversion == "4": + db.execute( + 'SELECT SUBSTR(name,1, INSTR(name, "@")-1) AS location, name, short_name, ipv4_txt' + ' FROM network' + ' WHERE node_id NOT IN (SELECT option.node_id FROM option WHERE name = "no-akv")' + ' AND name LIKE "%@%" AND ipv4_txt IS NOT NULL' + ) + + elif ipversion == "6": + db.execute( + 'SELECT SUBSTR(name,1, INSTR(name, "@")-1) AS location, name, short_name, ipv6_txt' + ' FROM network' + ' WHERE node_id NOT IN (SELECT option.node_id FROM option WHERE name = "no-akv")' + ' AND name LIKE "%@%" AND ipv6_txt IS NOT NULL' + ' AND NOT (name = "BOGAL@DREAMHACK" AND ipv6_txt = "2a05:2240:5000::/48")' + ) + else: + raise NetworkTypeNotFoundError('network type must be 4 or 6') + + res = db.fetchall() + if not res: + raise NetworkNotFoundError('network not found') + + column_names = [description[0] for description in db.description] + conn.close() + rows_dict = [dict(zip(column_names, row)) for row in res] + + return rows_dict + + +def requires(host, *args): + return ['apache(ldap)'] + + +def generate(host, *args): + + info = {} + info['snmpv3_providers'] = get_snmpv3_providers() + info['snmpv2_providers'] = get_snmpv2_providers() + info['current_event'] = lib.get_current_event() + info['ipv6_prefixes'] = get_prefixes('6') + info['ipv4_prefixes'] = get_prefixes('4') + return {'akvorado': info} diff --git a/modules/akvorado/files/akvorado-console.service b/modules/akvorado/files/akvorado-console.service new file mode 100644 index 00000000..23e2f587 --- /dev/null +++ b/modules/akvorado/files/akvorado-console.service @@ -0,0 +1,15 @@ +[Unit] +Description=Akvorado Console +After=akvorado-orch.service +Requires=akvorado-orch.service + +[Service] +Type=simple +Restart=on-failure +RestartSec=15 +User=akvorado +ExecStart=/usr/local/bin/akvorado console http://127.0.0.1:8080 + +[Install] +WantedBy=multi-user.target + diff --git a/modules/akvorado/files/akvorado-inlet.service b/modules/akvorado/files/akvorado-inlet.service new file mode 100644 index 00000000..1930cb0c --- /dev/null +++ b/modules/akvorado/files/akvorado-inlet.service @@ -0,0 +1,15 @@ +[Unit] +Description=Akvorado Inlet +After=akvorado-orch.service +Requires=akvorado-orch.service + +[Service] +Type=simple +Restart=on-failure +RestartSec=15 +User=akvorado +ExecStart=/usr/local/bin/akvorado inlet http://127.0.0.1:8080 + +[Install] +WantedBy=multi-user.target + diff --git a/modules/akvorado/files/akvorado-orch.service b/modules/akvorado/files/akvorado-orch.service new file mode 100644 index 00000000..23e0f153 --- /dev/null +++ b/modules/akvorado/files/akvorado-orch.service @@ -0,0 +1,13 @@ +[Unit] +Description=Akvorado Orchestrator +After=network.target +[Service] +Type=simple +Restart=on-failure +RestartSec=15 +User=akvorado +ExecStart=/usr/local/bin/akvorado orchestrator /etc/akvorado/akvorado.yaml + +[Install] +WantedBy=multi-user.target + diff --git a/modules/akvorado/files/kafka.service b/modules/akvorado/files/kafka.service new file mode 100644 index 00000000..b1b0ebbd --- /dev/null +++ b/modules/akvorado/files/kafka.service @@ -0,0 +1,13 @@ +[Unit] +Requires=zookeeper.service +After=zookeeper.service + +[Service] +Type=simple +User=kafka +ExecStart=/bin/sh -c ' /var/lib/kafka/bin/kafka-server-start.sh /var/lib/kafka/config/server.properties > /var/log/kafka/kafka.log 2>&1' +ExecStop=/var/lib/kafka/bin/kafka-server-stop.sh +Restart=on-abnormal + +[Install] +WantedBy=multi-user.target diff --git a/modules/akvorado/files/zookeeper.service b/modules/akvorado/files/zookeeper.service new file mode 100644 index 00000000..62fcd238 --- /dev/null +++ b/modules/akvorado/files/zookeeper.service @@ -0,0 +1,13 @@ +[Unit] +Requires=network.target remote-fs.target +After=network.target remote-fs.target + +[Service] +Type=simple +User=kafka +ExecStart=/var/lib/kafka/bin/zookeeper-server-start.sh /var/lib/kafka/config/zookeeper.properties +ExecStop=/var/lib/kafka/bin/zookeeper-server-stop.sh +Restart=on-abnormal + +[Install] +WantedBy=multi-user.target diff --git a/modules/akvorado/manifests/init.pp b/modules/akvorado/manifests/init.pp new file mode 100644 index 00000000..cdfae488 --- /dev/null +++ b/modules/akvorado/manifests/init.pp @@ -0,0 +1,271 @@ +# Copyright 2018 dhtech +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file +# +# == Class: akvorado +# +# Alert manager for prometheus to handle sending alerts +# +# === Parameters +# + +class akvorado ($current_event, $ipv4_prefixes, $ipv6_prefixes, $snmpv3_providers, $snmpv2_providers) { + + + ##Kafka installation + ensure_packages([ + 'openjdk-17-jre', + ]) + group { 'kafka': + ensure => 'present', + } + -> user { 'kafka': + ensure => 'present', + system => true, + home => '/var/lib/kafka', + managehome => true, + } + -> file { '/var/lib/kafka/kafka.tgz': + ensure => file, + links => follow, + source => 'puppet:///data/kafka-latest.tgz', + notify => Exec['untar-kafka'] + } + -> file { '/var/log/kafka': + ensure => 'directory', + owner => 'kafka', + group => 'kafka', + mode => '0700', + } + -> file { '/var/lib/zookeeper-data': + ensure => 'directory', + owner => 'kafka', + group => 'kafka', + mode => '0700', + } + exec { 'untar-kafka': + command => '/bin/tar -xvf /var/lib/kafka/kafka.tgz -C /var/lib/kafka --strip 1', + refreshonly => true, + user => 'kafka', + } + file { '/etc/systemd/system/kafka.service': + ensure => present, + source => 'puppet:///modules/akvorado/kafka.service', + mode => '0644', + owner => 'root', + group => 'root', + notify => [ Exec['systemctl-daemon-reload'], Service['kafka'] ], + } + -> file { '/etc/systemd/system/zookeeper.service': + ensure => present, + source => 'puppet:///modules/akvorado/zookeeper.service', + mode => '0644', + owner => 'root', + group => 'root', + notify => [ Exec['systemctl-daemon-reload'], Service['zookeeper'] ], + } + -> file_line { 'kafka-enabledeletetopics': + ensure => 'present', + path => '/var/lib/kafka/config/server.properties', + line => 'delete.topic.enable = true', + notify => Service['kafka'], + } + -> file_line { 'kafka-listenlocalhost': + ensure => 'present', + path => '/var/lib/kafka/config/server.properties', + line => 'listeners=PLAINTEXT://localhost:9092', + match => '#listeners=PLAINTEXT', + notify => Service['kafka'], + } + -> file_line { 'kafka-logdir': + ensure => 'present', + path => '/var/lib/kafka/config/server.properties', + line => 'log.dirs=/var/log/kafka', + match => 'log.dirs=/tmp/kafka-logs', + notify => Service['kafka'], + } + -> file_line { 'zookeeper-datadir': + ensure => 'present', + path => '/var/lib/kafka/config/zookeeper.properties', + line => 'dataDir=/var/lib/zookeeper-data', + match => 'dataDir=/tmp/zookeeper', + notify => Service['zookeeper'], + } + -> file_line { 'zookeeper-listen': + ensure => 'present', + path => '/var/lib/kafka/config/zookeeper.properties', + line => 'clientPortAddress=127.0.0.1', + notify => Service['zookeeper'], + } + service { 'kafka': + ensure => running, + enable => true, + } + service { 'zookeeper': + ensure => running, + enable => true, + } + + ##Clickhouse installation + ensure_packages([ + 'apt-transport-https', + 'ca-certificates', + 'curl', + 'gnupg', + ]) + file { 'clickhouse-source-add': + ensure => file, + path => '/etc/apt/sources.list.d/clickhouse.list', + content => 'deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb stable main', + notify => Exec['clickhouse-source-key'], + } + exec { 'clickhouse-source-key': + command => '/usr/bin/curl -fsSL https://packages.clickhouse.com/rpm/lts/repodata/repomd.xml.key | gpg --dearmor > /usr/share/keyrings/clickhouse-keyring.gpg', + logoutput => 'on_failure', + try_sleep => 1, + refreshonly => true, + notify => Exec['apt-update'], + } + exec { 'apt-update': + command => '/usr/bin/apt-get update', + logoutput => 'on_failure', + try_sleep => 1, + refreshonly => true, + require => Package['apt-transport-https'], + } + + package { 'clickhouse-server': + ensure => installed, + require => [File['clickhouse-source-add'], Exec['clickhouse-source-key'], Exec['apt-update']], + } + -> package { 'clickhouse-client': + ensure => installed, + } + -> service { 'clickhouse-server': + ensure => running, + enable => true, + } + + #Create user/group for Akvorodo + ensure_packages([ + 'redis', + ],{ + ensure => 'present', + notify => Service['redis'], + }) + group { 'akvorado': + ensure => 'present', + } + -> user { 'akvorado': + ensure => 'present', + system => true, + home => '/var/lib/akvorado', + managehome => true, + } + #Create directories for akvorado + -> file { '/etc/akvorado': + ensure => 'directory', + owner => 'root', + group => 'akvorado', + mode => '0750', + } + #Copy akvorado to the server + -> file { '/usr/local/bin/akvorado': + ensure => file, + owner => 'root', + group => 'akvorado', + mode => '0550', + links => follow, + source => 'puppet:///data/akvorado-latest', + notify => [Service['akvorado-orch'],Exec['protobuf-schema']] + } + file { '/etc/akvorado/akvorado.yaml': + ensure => file, + content => template('akvorado/akvorado.yaml.erb'), + notify => Service['akvorado-orch'], + } + #Systemctl config + file { '/etc/systemd/system/akvorado-orch.service': + ensure => present, + source => 'puppet:///modules/akvorado/akvorado-orch.service', + mode => '0644', + owner => 'root', + group => 'root', + notify => [Exec['systemctl-daemon-reload'],Service['akvorado-orch']], + } + file { '/etc/systemd/system/akvorado-inlet.service': + ensure => present, + source => 'puppet:///modules/akvorado/akvorado-inlet.service', + mode => '0644', + owner => 'root', + group => 'root', + notify => [Exec['systemctl-daemon-reload'],Service['akvorado-inlet']], + } + file { '/etc/systemd/system/akvorado-console.service': + ensure => present, + source => 'puppet:///modules/akvorado/akvorado-console.service', + mode => '0644', + owner => 'root', + group => 'root', + notify => [Exec['systemctl-daemon-reload'],Service['akvorado-console']], + } + file { '/usr/share/GeoIP': + ensure => 'directory', + owner => 'root', + group => 'root', + mode => '0755', + } + file { '/usr/share/GeoIP/asn.mmdb': + ensure => present, + source => 'puppet:///data/asn.mmdb', + mode => '0644', + owner => 'root', + group => 'root', + } + file { '/usr/share/GeoIP/country.mmdb': + ensure => present, + source => 'puppet:///data/country.mmdb', + mode => '0644', + owner => 'root', + group => 'root', + } + apache::proxy { '1_akvorado-orch-api': + url => '/api/v0/orchestrator/', + backend => 'http://localhost:8080/api/v0/orchestrator/', + } + apache::proxy { '2_akvorado-inlet-api': + url => '/api/v0/inlet/', + backend => 'http://localhost:8081/api/v0/inlet/', + } + apache::proxy { '3_akvorado-console': + url => '/', + backend => 'http://localhost:8082/', + allow_encoded_slashes => true, + } + service { 'akvorado-orch': + ensure => running, + enable => true, + } + service { 'akvorado-inlet': + ensure => running, + enable => true, + } + service { 'akvorado-console': + ensure => running, + enable => true, + } + service { 'redis': + ensure => running, + enable => true, + } + exec { 'systemctl-daemon-reload': + command => '/bin/systemctl daemon-reload', + refreshonly => true, + } + exec { 'protobuf-schema': + command => '/usr/bin/curl http://127.0.0.1:8080/api/v0/orchestrator/clickhouse/init.sh | sh', + refreshonly => true, + require => Service['akvorado-orch'] + } +} diff --git a/modules/akvorado/templates/akvorado.yaml.erb b/modules/akvorado/templates/akvorado.yaml.erb new file mode 100644 index 00000000..26efb3a6 --- /dev/null +++ b/modules/akvorado/templates/akvorado.yaml.erb @@ -0,0 +1,327 @@ +--- +# AUTOGENERATED BY PUPPET +# All manual changes will be overwritten +reporting: + logging: {} + metrics: {} +http: + listen: :8080 + profiler: true + cache: + type: memory +clickhouse: + servers: + - 127.0.0.1:9000 + cluster: "" + database: default + username: default + password: "" + maxopenconns: 10 + dialtimeout: 5s + tls: + enable: false + verify: true + cafile: "" + certfile: "" + keyfile: "" + skipmigrations: false + kafka: + topic: flows + brokers: + - 127.0.0.1:9092 + version: 3.7.0 + tls: + enable: false + verify: true + cafile: "" + certfile: "" + keyfile: "" + saslusername: "" + saslpassword: "" + saslmechanism: none + consumers: 1 + groupname: clickhouse + enginesettings: [] + resolutions: + - interval: 0s + ttl: 360h0m0s + - interval: 1m0s + ttl: 168h0m0s + - interval: 5m0s + ttl: 2160h0m0s + - interval: 1h0m0s + ttl: 8640h0m0s + maxpartitions: 50 + systemlogttl: 720h0m0s + prometheusendpoint: "/metrics" + asns: + 25037: Dreamhack Events + networks: + # 2a01:db8:cafe:1::/64: + # name: ipv6-customers + # role: customers + # site: "" + # region: "" + # city: "" + # state: "" + # country: "" + # tenant: "" + # asn: 0 +<% @ipv4_prefixes.each do |ipv4| -%> + <%=ipv4['ipv4_txt']%>: + name: "<%= ipv4['short_name'] %>" + role: "<%= ipv4['location'] %>" +<% end -%> +<% @ipv6_prefixes.each do |ipv6| -%> + <%=ipv6['ipv6_txt']%>: + name: "<%= ipv6['short_name'] %>" + role: "<%= ipv6['location'] %>" +<% end -%> + networksources: {} + networksourcestimeout: 10s + orchestratorurl: "http://localhost:8080" +kafka: + topic: flows + brokers: + - 127.0.0.1:9092 + version: 3.7.0 + tls: + enable: false + verify: true + cafile: "" + certfile: "" + keyfile: "" + saslusername: "" + saslpassword: "" + saslmechanism: none + topicconfiguration: + numpartitions: 8 + replicationfactor: 1 + configentries: + cleanup.policy: delete + compression.type: producer + retention.ms: "86400000" + segment.bytes: "1073741824" + configentriesstrictsync: true +geoip: + asndatabase: + - /usr/share/GeoIP/asn.mmdb + geodatabase: + - /usr/share/GeoIP/country.mmdb + optional: false +schema: + disabled: [] + enabled: [] + maintableonly: [] + notmaintableonly: [] + materialize: [] + customdictionaries: {} +inlet: + - reporting: + logging: {} + metrics: {} + http: + listen: :8081 + profiler: true + cache: + type: memory + flow: + inputs: + - decoder: netflow + listen: :2055 + queuesize: 100000 + receivebuffer: 10485760 + timestampsource: udp + type: udp + usesrcaddrforexporteraddr: false + workers: 6 + - decoder: sflow + listen: :6343 + queuesize: 100000 + receivebuffer: 10485760 + timestampsource: udp + type: udp + usesrcaddrforexporteraddr: false + workers: 6 + ratelimit: 0 + metadata: + cacheduration: 30m0s + cacherefresh: 1h0m0s + cachecheckinterval: 2m0s + cachepersistfile: "" + providers: +<% unless @snmpv2_providers.empty? -%> + - agents: {} + communities: +<% @snmpv2_providers.each do |provider| -%> + <%=provider['ipv4']%>: <%=provider['community']%> +<% end -%> + ports: + ::/0: 161 + securityparameters: {} + type: snmp +<% end -%> +<% unless @snmpv3_providers.empty? -%> + - agents: {} + ports: + ::/0: 161 + securityparameters: +<% @snmpv3_providers.each do |provider| -%> + <%=provider['ipv4']%>: + user-name: <%=provider['user']%> + authentication-protocol: <%=provider['authentication-protocol']%> + authentication-passphrase: <%=provider['authentication-passphrase']%> + privacy-protocol: <%=provider['privacy-protocol']%> + privacy-passphrase: <%=provider['privacy-passphrase']%> +<% end -%> + type: snmp +<% end -%> + - agents: {} + communities: + ::/0: + - public + pollerretries: 1 + pollertimeout: 1s + ports: + ::/0: 161 + securityparameters: {} + type: snmp + workers: 1 + maxbatchrequests: 10 + routing: + provider: + collectasns: true + collectaspaths: true + collectcommunities: true + keep: 5m0s + listen: :10179 + rds: [] + ribpeerremovalbatchroutes: 5000 + ribpeerremovalmaxqueue: 10000 + ribpeerremovalmaxtime: 100ms + ribpeerremovalsleepinterval: 500ms + type: bmp + kafka: + topic: flows + brokers: + - 127.0.0.1:9092 + version: 3.7.0 + tls: + enable: false + verify: true + cafile: "" + certfile: "" + keyfile: "" + saslusername: "" + saslpassword: "" + saslmechanism: none + flushinterval: 10s + flushbytes: 104857599 + maxmessagebytes: 1000000 + compressioncodec: zstd + queuesize: 32 + core: + workers: 6 + exporterclassifiers: + - ClassifySiteRegex(Exporter.Name, "^([^-]+)-", "$1") + - ClassifyRegion("europe") + - ClassifyRole("edge") + interfaceclassifiers: + - | + ClassifyConnectivityRegex(Interface.Description, "^(?i)(transit|pni|ppni|ix):? ", "$1") && + ClassifyProviderRegex(Interface.Description, "^\\S+?\\s(\\S+)", "$1") && + ClassifyExternal() + - ClassifyInternal() + classifiercacheduration: 5m0s + defaultsamplingrate: 1 + overridesamplingrate: {} + asnproviders: + - flow + - routing + netproviders: + - flow + - routing + schema: + disabled: [] + enabled: [] + maintableonly: [] + notmaintableonly: [] + materialize: [] + customdictionaries: {} +console: + - reporting: + logging: {} + metrics: {} + http: + listen: :8082 + profiler: true + cache: + db: 0 + password: "" + protocol: tcp + server: localhost:6379 + type: redis + username: "" + defaultvisualizeoptions: + graphtype: stacked + start: 6 hours ago + end: now + filter: "" + dimensions: + - SrcAS + limit: 10 + homepagetopwidgets: + - src-as + - src-port + - protocol + - src-country + - etype + homepagegraphfilter: "" + dimensionslimit: 50 + cachettl: 0h10m0s + clickhouse: + servers: + - 127.0.0.1:9000 + cluster: "" + database: default + username: default + password: "" + maxopenconns: 10 + dialtimeout: 5s + tls: + enable: false + verify: true + cafile: "" + certfile: "" + keyfile: "" + auth: + headers: + login: X-Proxy-REMOTE-USER + name: "" + email: "" + logouturl: "" + defaultuser: + login: "" + name: "" + email: "" + logouturl: "" + database: + driver: sqlite + dsn: /var/lib/akvorado/console.sqlite + savedfilters: + - description: From Netflix + content: InIfBoundary = external AND SrcAS = AS2906 + - description: From GAFAM + content: InIfBoundary = external AND SrcAS IN (AS15169, AS16509, AS32934, AS6185, AS8075) + - description: From Swedish Armed Forces + content: InIfBoundary = external AND SrcAS = AS9201 + - description: Valve Corporation + content: InIfBoundary = external AND SrcAS = AS32590 + schema: + disabled: [] + enabled: [] + maintableonly: [] + notmaintableonly: [] + materialize: [] + customdictionaries: {} +demoexporter: [] diff --git a/modules/apache/manifests/init.pp b/modules/apache/manifests/init.pp index 97c90a8b..f9058da5 100644 --- a/modules/apache/manifests/init.pp +++ b/modules/apache/manifests/init.pp @@ -47,7 +47,7 @@ notify => Service['apache2'], } - if $::fqdn == 'status.event.dreamhack.se' or $::fqdn == 'grafana.event.dreamhack.se' { + if $::fqdn == 'status.event.dreamhack.se' or $::fqdn == 'grafana.event.dreamhack.se' or $::fqdn == 'sflow1.event.dreamhack.se' { file { 'apache-security.conf': ensure => present, path => '/etc/apache2/conf-available/security.conf', diff --git a/modules/apache/manifests/proxy.pp b/modules/apache/manifests/proxy.pp index bf5fa09f..16f832dc 100644 --- a/modules/apache/manifests/proxy.pp +++ b/modules/apache/manifests/proxy.pp @@ -3,7 +3,7 @@ # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file # -define apache::proxy($url, $backend) { +define apache::proxy($url, $backend, $allow_encoded_slashes = false) { exec { "apache_proxy_reload_${name}": command => '/usr/sbin/apachectl graceful', refreshonly => true, diff --git a/modules/apache/templates/proxy.conf.erb b/modules/apache/templates/proxy.conf.erb index c1b9f6de..763f23e7 100644 --- a/modules/apache/templates/proxy.conf.erb +++ b/modules/apache/templates/proxy.conf.erb @@ -4,3 +4,8 @@ ProxyPass <%= @url %> <%= @backend %> RequestHeader set X-Proxy-REMOTE-USER %{REMOTE_USER}s ProxyPreserveHost on + +<% if @allow_encoded_slashes == true -%> +AllowEncodedSlashes On +<% end -%> + diff --git a/modules/bind/manifests/init.pp b/modules/bind/manifests/init.pp index 2e459907..9f9208b4 100644 --- a/modules/bind/manifests/init.pp +++ b/modules/bind/manifests/init.pp @@ -34,6 +34,7 @@ if $::operatingsystem == 'OpenBSD' { $named_user = '_bind' $conf_dir = '/var/named/etc' + $log_dir = '/var/log/named' $conf_cfg = 'etc' $package_name = 'isc-bind' $rc_name = 'isc_named' @@ -53,6 +54,7 @@ else { $named_user = 'bind' $conf_dir = '/etc/bind' + $log_dir = '/var/log/bind' $conf_cfg = '/etc/bind' $package_name = 'bind9' $rc_name = 'bind9' @@ -124,6 +126,16 @@ require => Package[$package_name], } +# Make sure the log directory exists + file { 'logdir': + ensure => 'directory', + owner => $named_user, + group => $named_user, + mode => '0770', + path => $log_dir, + require => Package[$package_name], + } + # Make sure the stats directory exists file { 'statsdir': ensure => 'directory', diff --git a/modules/bind/templates/named.conf.erb b/modules/bind/templates/named.conf.erb index ca7c44b0..63d48a6d 100644 --- a/modules/bind/templates/named.conf.erb +++ b/modules/bind/templates/named.conf.erb @@ -88,9 +88,15 @@ options { <% if @role == 'resolver' -%> logging { - category lame-servers {null;}; - category client {null;}; + // Set up a channel for logging DNS queries to a file + channel query_log { + file "/var/log/bind/query.log" versions 3 size 10m; + severity info; + print-time yes; + }; + + // Configure syslog for general logging channel syslog { syslog daemon; print-time yes; @@ -98,7 +104,15 @@ logging { print-severity yes; severity warning; }; -category default { syslog; }; + + + // Disable logging for specific categories to reduce log clutter + category lame-servers { null; }; + category client { null; }; + // Log DNS queries to the query_log channel + category queries { query_log; }; + // Use syslog for default logging category + category default { syslog; }; }; # root hints provided by dns-root-data package diff --git a/modules/dnsstatd/manifests/init.pp b/modules/dnsstatd/manifests/init.pp index 18daee91..b7742c42 100644 --- a/modules/dnsstatd/manifests/init.pp +++ b/modules/dnsstatd/manifests/init.pp @@ -15,7 +15,7 @@ class dnsstatd($current_event) { - $secret_db_dnsstatd = vault('postgresql:dnsstatd', {}) + $secret_db_dnsstatd = vault('postgresql:dnsstatd', {}) ensure_packages([ 'python3-netifaces', @@ -36,8 +36,23 @@ provider => 'pip', } + file { '/opt/dnsstatd': + ensure => directory, + mode => '0750', + owner => 'root', + group => 'root', + } + + file { '/opt/dnsstatd/dnsstatd.py': + ensure => present, + source => 'puppet:///repos/dnsstatd/dnsstatd.py', + mode => '0750', + owner => 'root', + group => 'root', + } + if $secret_db_dnsstatd != {} { - file { '/scripts/dnsstatd/config': + file { '/opt/dnsstatd/config': ensure => file, content => template('dnsstatd/config.erb'), mode => '0600', @@ -46,7 +61,6 @@ } supervisor::register { 'dnsstatd': - command => '/scripts/dnsstatd/dnsstatd.py', + command => '/opt/dnsstatd/dnsstatd.py', } - } diff --git a/modules/hardware/manifests/init.pp b/modules/hardware/manifests/init.pp index 77651df3..a0014e36 100644 --- a/modules/hardware/manifests/init.pp +++ b/modules/hardware/manifests/init.pp @@ -18,13 +18,13 @@ if defined('$::productname') and $::productname =~ /VMware/ { # OpenBSD does not use open-vm-tools, see the vmt(4) driver. - if $::operatingsystem != 'OpenBSD' { + if $facts['operatingsystem'] != 'OpenBSD' { package { 'open-vm-tools': ensure => installed } } } else { - if $::manufacturer == 'HP' { + if $facts['manufacturer'] == 'HP' { package { 'gnupg': ensure => installed } @@ -63,8 +63,8 @@ ensure => installed } - } elsif $::productname == 'Wedge-DC-F 20-001331' { - if $::kernelrelease =~ /OpenNetworkLinux/ { + } elsif $facts['productname'] == 'Wedge-DC-F 20-001331' { + if $facts['kernelrelease'] =~ /OpenNetworkLinux/ { service { 'onlpd': ensure => 'stopped', enable => false, diff --git a/modules/rancid/templates/prometheus-exporter-distconfcheck.erb b/modules/rancid/templates/prometheus-exporter-distconfcheck.erb index b1e2a238..155f8cae 100755 --- a/modules/rancid/templates/prometheus-exporter-distconfcheck.erb +++ b/modules/rancid/templates/prometheus-exporter-distconfcheck.erb @@ -24,10 +24,10 @@ def open_db(): conn = sqlite3.connect(db_file) db = conn.cursor() except sqlite3.Error as e: - print >>sys-stderr, "An error accurd:", e.args[0] + print("An error accurd:", e.args[0], file=sys-stderr) sys.exit(1) else: - print >>sys-stderr, "No database file found: %s" % db_file + print("No database file found: %s" % db_file, file=sys-stderr) sys.exit(2) return db @@ -87,7 +87,7 @@ access_switches = get_access_switches(db) try: output_file = open('%s/%s' % (export_dir, export_file), 'w') except: - print >>sys.stderr, "Unable to open export file for writing" + print("Unable to open export file for writing", file=sys.stderr) sys.exit(1) @@ -107,8 +107,8 @@ for filename in rancid_conf_files: table_interfaces = cfg.find_objects_w_child(parentspec='^interface GigabitEthernet', childspec='^ description BORD;') for intf in table_interfaces: if get_circuit_id_access_switch(intf) != get_description_access_switch(intf): - print get_circuit_id_access_switch(intf) - print get_description_access_switch(intf) + print(get_circuit_id_access_switch(intf)) + print(get_description_access_switch(intf)) checks = { "IpDhcpSnooping": bool(cfg.find_objects('^ip dhcp snooping$') and cfg.find_objects('ip dhcp snooping vlan 601')), @@ -130,5 +130,5 @@ for filename in rancid_conf_files: "SpanningTreeInstance": cfg.find_objects('^spanning-tree mst configuration$')[0].has_child_with('^ instance 1 vlan 2-4094$'), } - for k, v in checks.items(): + for k, v in list(checks.items()): output_file.write('dist_check{{device="{}",check="{}"}} {} {}\n'.format(filename, k, int(v), int(time.time()*1000))) diff --git a/modules/system/manifests/init.pp b/modules/system/manifests/init.pp index b616eeb6..8fb8268c 100644 --- a/modules/system/manifests/init.pp +++ b/modules/system/manifests/init.pp @@ -100,7 +100,7 @@ creates => '/scripts/.git/modules', } - if $::kernelrelease =~ /OpenNetworkLinux/ { + if $facts['kernelrelease'] =~ /OpenNetworkLinux/ { package { 'snmpd': ensure => 'purged', } diff --git a/modules/vault/manifests/init.pp b/modules/vault/manifests/init.pp index 8bd74ab9..e21c11e6 100644 --- a/modules/vault/manifests/init.pp +++ b/modules/vault/manifests/init.pp @@ -20,7 +20,7 @@ provider => 'pip', } - if $::kernel == 'Linux' { + if $facts['kernel'] == 'Linux' { file { 'vault': ensure => file, path => '/usr/local/bin/vault', @@ -35,7 +35,7 @@ } } - if $::operatingsystem == 'Debian' and $::operatingsystemmajrelease == '11' { + if $facts['operatingsystem'] == 'Debian' and $facts['operatingsystemmajrelease'] == '11' { file { 'vault-input': ensure => file, path => '/usr/local/bin/vault-input', @@ -65,7 +65,7 @@ path => '/usr/local/bin/vault-auth', } - if $::operatingsystem == 'Debian' and $::operatingsystemmajrelease == '11' { + if $facts['operatingsystem'] == 'Debian' and $facts['operatingsystemmajrelease'] == '11' { file { 'vault-machine': ensure => file, path => '/usr/local/bin/vault-machine', @@ -82,7 +82,7 @@ } } - if $::operatingsystem == 'Debian' and $::operatingsystemmajrelease == '11' { + if $facts['operatingsystem'] == 'Debian' and $facts['operatingsystemmajrelease'] == '11' { file { 'dh-create-service-account': ensure => file, path => '/usr/local/bin/dh-create-service-account', diff --git a/modules/wireguard.py b/modules/wireguard.py new file mode 100644 index 00000000..81a06a82 --- /dev/null +++ b/modules/wireguard.py @@ -0,0 +1,16 @@ +# Copyright 2024 dhtech +# +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file +import lib + +def generate(host, *args): + + # Get current event, used to get up-to-date switch conf + current_event = lib.get_current_event() + + info = {} + info['current_event'] = current_event + return {'wireguard': info} + +# vim: ts=4: sts=4: sw=4: expandtab diff --git a/modules/wireguard/manifests/init.pp b/modules/wireguard/manifests/init.pp new file mode 100644 index 00000000..7a7b6304 --- /dev/null +++ b/modules/wireguard/manifests/init.pp @@ -0,0 +1,68 @@ +class wireguard($current_event) { + # Execute 'apt-get update' + exec { 'apt-update': # exec resource named 'apt-update' + command => '/usr/bin/apt-get update' # command this resource will run + } + + # Install wireguard package + package { 'wireguard': + ensure => installed, + require => Exec['apt-update'], # require 'apt-update' before installing + } + + # Create wireguard interface + exec { 'create': + require => Package['wireguard'], + command => '/usr/bin/ip link add dev wg0 type wireguard', + unless => '/usr/bin/ip link show wg0' + } + + exec { 'create-privkey': + command => '/usr/bin/wg pubkey < /etc/wireguard/privkey > /etc/wireguard/pubkey', + unless => '/usr/bin/ls /etc/wireguard/privkey' + require => Exec['create'], + } + + exec { 'create-pubkey': + command => '/usr/bin/wg genkey > /etc/wireguard/privkey', + unless => '/usr/bin/ls /etc/wireguard/privkey' + require => Exec['create-privkey'], + } + + + exec { 'add-key': + command => '/usr/bin/wg set wg0 listen-port 51820 private-key /etc/wireguard/privkey', + require => Exec['create-pubkey'], + } + + +# Set wireguard interface IP + exec { 'set-IP': + require => Exec['add-key'], + command => '/usr/bin/ip address add dev wg0 77.80.229.133/25', + unless => '/usr/bin/ip addr show wg0 | grep 77.80.229.133/25' + } + + file { '/etc/wireguard/yaml': + ensure => directory, + require => Exec['set-IP'], + recurse => remote, + source => 'puppet:///svn/$::{current_event}/services/wireguard', +} + + +# Build the wg0 config file will all clients from previous step + file { 'setConf': + ensure => file, + path => '/etc/wireguard/wg0.conf', + notify => Exec[syncConf], + content => template('wireguard/wg0.conf.erb'), + require => file['/etc/wireguard/yaml'], # require that yaml file exists before trying to use it.... + } + +# Sync changes towards the wg0 interface + exec { 'syncConf': + require => file['setConf'], + command => '/usr/bin/wg syncconf wg0 /etc/wireguard/wg0.conf', + } +} diff --git a/modules/wireguard/templates/wg0.conf.erb b/modules/wireguard/templates/wg0.conf.erb new file mode 100644 index 00000000..c18d6fad --- /dev/null +++ b/modules/wireguard/templates/wg0.conf.erb @@ -0,0 +1,10 @@ +<% require 'yaml' %> +<% clients = YAML.load_file('/etc/wireguard/yaml/wireguard-clients.yaml')['clients'] %> + +<% clients.each do |nick, client| -%> +# <%= nick %> +[Peer] +PublicKey = <%= client['publickey'] %> +AllowedIPs = <%= client['ip'] %> + +<% end -%> \ No newline at end of file