diff --git a/.dockerignore b/.dockerignore index 8ef5a542c841..61335e9e5961 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,6 +1,5 @@ * !.cache/test_certs -!fb/config/ !.devcontainer/ !third_party/ @@ -29,12 +28,6 @@ !lte/gateway/deploy !lte/gateway/docker/deploy -!devmand/cloud/ -!devmand/gateway/ -!devmand/protos/ - -!fb/src/dpi/ - !orc8r/cloud/configs/ !orc8r/lib/ !orc8r/cloud/go/ diff --git a/.github/workflows/agw-workflow.yml b/.github/workflows/agw-workflow.yml index 0ff10128d974..5c126e8a3443 100644 --- a/.github/workflows/agw-workflow.yml +++ b/.github/workflows/agw-workflow.yml @@ -28,8 +28,9 @@ concurrency: env: DEVCONTAINER_IMAGE: "ghcr.io/magma/magma/devcontainer:latest" - BAZEL_BASE_IMAGE: "ghcr.io/magma/magma/bazel-base:sha-4a878d8" - CACHE_KEY: bazel-base-image + BAZEL_BASE_IMAGE: "ghcr.io/magma/magma/bazel-base:latest" + # see GH14041 + CACHE_KEY: bazel-base-image-sha-c4de1e5 REMOTE_DOWNLOAD_OPTIMIZATION: true jobs: diff --git a/.github/workflows/bazel.yml b/.github/workflows/bazel.yml index dd92cde0bf9d..0370c939154e 100644 --- a/.github/workflows/bazel.yml +++ b/.github/workflows/bazel.yml @@ -23,8 +23,9 @@ on: - master env: - BAZEL_BASE_IMAGE: "ghcr.io/magma/magma/bazel-base:sha-4a878d8" - CACHE_KEY: bazel-base-image + BAZEL_BASE_IMAGE: "ghcr.io/magma/magma/bazel-base:latest" + # see GH14041 + CACHE_KEY: bazel-base-image-sha-c4de1e5 REMOTE_DOWNLOAD_OPTIMIZATION: true concurrency: diff --git a/.github/workflows/build_all.yml b/.github/workflows/build_all.yml index a01d1fbe441c..9c089efe5cb9 100644 --- a/.github/workflows/build_all.yml +++ b/.github/workflows/build_all.yml @@ -115,6 +115,7 @@ jobs: runs-on: macos-12 outputs: artifacts: ${{ steps.publish_packages.outputs.artifacts }} + magma_package: ${{ steps.publish_packages.outputs.magma_package }} steps: - uses: actions/checkout@7884fcad6b5d53d10323aee724dc68d8b9096a2e # pin@v2 with: @@ -179,6 +180,14 @@ jobs: if [[ "$HTTP_STATUS" != "2"* ]]; then PUBLISH_ERROR="true" fi + # extract magma debian package version + match="magma_[0-9]+\.[0-9]+\.[0-9]+-[0-9]+-[a-z0-9]+_[a-z0-9]+.deb" + if [[ $i =~ $match ]]; then + magma_package=${i#magma_} + magma_package=${magma_package%_[a-z0-9]*.deb} + magma_package='magma='${magma_package} + echo "::set-output name=magma_package::${magma_package}" + fi done # set output if [[ "$PUBLISH_ERROR" != "true" ]]; then @@ -991,3 +1000,15 @@ jobs: SLACK_ICON_EMOJI: ":heavy_check_mark:" SLACK_COLOR: "#00FF00" SLACK_FOOTER: ' ' + trigger-debian-integ-test: + if: always() && github.event_name == 'push' && github.repository_owner == 'magma' && github.ref == 'refs/heads/master' + runs-on: ubuntu-latest + needs: agw-build + steps: + - name: Trigger debian integ test workflow + uses: peter-evans/repository-dispatch@v2 + with: + token: ${{ secrets.GITHUB_TOKEN }} + repository: magma/magma + event-type: build-all-artifact + client-payload: '{ "artifact": "${{ needs.agw-build.outputs.magma_package }}" }' diff --git a/.github/workflows/docker-promote.yml b/.github/workflows/docker-promote.yml new file mode 100644 index 000000000000..33474ef20c57 --- /dev/null +++ b/.github/workflows/docker-promote.yml @@ -0,0 +1,53 @@ +# Copyright 2022 The Magma Authors. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: docker promote + +on: + workflow_dispatch: + inputs: + branch_tag: + description: 'Branch version number' + required: true + release_tag: + description: 'Release version number' + required: true + +jobs: + docker-promote: + runs-on: ubuntu-latest + env: + BRANCH_TAG: ${{ inputs.branch_tag }} + RELEASE_TAG: ${{ inputs.release_tag }} + MAGMA_ARTIFACTORY: artifactory.magmacore.org + steps: + - uses: tspascoal/get-user-teams-membership@533553aa88900a17c59177d65bcf8c5c97ff1a90 # pin@v1.0.3 + name: Check if user has rights to promote + id: checkUserMember + with: + username: ${{ github.actor }} + team: 'approvers-ci' + GITHUB_TOKEN: ${{ secrets.github_token }} + - if: ${{ steps.checkUserMember.outputs.isTeamMember == 'false' }} + run: | + echo "User is not a member of the team" + exit 1 + - uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # Pin v2.0.0 + name: Login to Artifactory + with: + registry: docker.${{ env.MAGMA_ARTIFACTORY }} + username: ${{ secrets.ARTIFACTORY_USERNAME }} + password: ${{ secrets.ARTIFACTORY_PASSWORD }} + - run: | + wget https://github.com/magma/magma/raw/master/orc8r/tools/docker/promote.sh + chmod 755 promote.sh + # Promote Docker images + ./promote.sh diff --git a/.github/workflows/gcc-problems.yml b/.github/workflows/gcc-problems.yml index 7ec82771d24a..39a5955cff59 100644 --- a/.github/workflows/gcc-problems.yml +++ b/.github/workflows/gcc-problems.yml @@ -26,8 +26,9 @@ on: - master - 'v1.*' env: - BAZEL_BASE_IMAGE: "ghcr.io/magma/magma/bazel-base:sha-4a878d8" - CACHE_KEY: bazel-base-image + BAZEL_BASE_IMAGE: "ghcr.io/magma/magma/bazel-base:latest" + # see GH14041 + CACHE_KEY: bazel-base-image-sha-c4de1e5 REMOTE_DOWNLOAD_OPTIMIZATION: true concurrency: diff --git a/.github/workflows/helm-promote.yml b/.github/workflows/helm-promote.yml new file mode 100644 index 000000000000..39753baa2cb9 --- /dev/null +++ b/.github/workflows/helm-promote.yml @@ -0,0 +1,49 @@ +# Copyright 2022 The Magma Authors. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: helm promote + +on: + workflow_dispatch: + inputs: + magma_version: + description: 'Magma version number' + required: true + +jobs: + helm-promote: + runs-on: ubuntu-latest + env: + MAGMA_VERSION: ${{ inputs.magma_version }} + MAGMA_ARTIFACTORY: https://artifactory.magmacore.org:443/artifactory + HELM_CHART_MUSEUM_TOKEN: ${{ secrets.HELM_CHART_MUSEUM_TOKEN }} + HELM_CHART_MUSEUM_USERNAME: ${{ secrets.HELM_CHART_MUSEUM_USERNAME }} + steps: + - uses: tspascoal/get-user-teams-membership@533553aa88900a17c59177d65bcf8c5c97ff1a90 # pin@v1.0.3 + name: Check if user has rights to promote + id: checkUserMember + with: + username: ${{ github.actor }} + team: 'approvers-ci' + GITHUB_TOKEN: ${{ secrets.github_token }} + - if: ${{ steps.checkUserMember.outputs.isTeamMember == 'false' }} + run: | + echo "User is not a member of the team" + exit 1 + - run: | + wget https://github.com/magma/magma/raw/master/orc8r/tools/helm/promote.sh + chmod 755 promote.sh + # Promote Helm charts + ./promote.sh orc8r-${MAGMA_VERSION}.tgz + ./promote.sh cwf-orc8r-${MAGMA_VERSION}.tgz + ./promote.sh feg-orc8r-${MAGMA_VERSION}.tgz + ./promote.sh lte-orc8r-${MAGMA_VERSION}.tgz + ./promote.sh domain-proxy-${MAGMA_VERSION}.tgz diff --git a/.github/workflows/lte-integ-test-bazel.yml b/.github/workflows/lte-integ-test-bazel.yml index 88be84554cad..e86ca5e6c630 100644 --- a/.github/workflows/lte-integ-test-bazel.yml +++ b/.github/workflows/lte-integ-test-bazel.yml @@ -78,9 +78,7 @@ jobs: run: | cd lte/gateway vagrant ssh -c 'cd ~/magma; bazel/scripts/remote_cache_bazelrc_setup.sh "${{ env.CACHE_KEY }}" "${{ env.REMOTE_DOWNLOAD_OPTIMIZATION }}" "${{ secrets.BAZEL_REMOTE_PASSWORD }}";' magma - vagrant ssh -c 'sudo sed -i "s@#precedence ::ffff:0:0/96 100@precedence ::ffff:0:0/96 100@" /etc/gai.conf;' magma vagrant ssh -c 'cd ~/magma; bazel build --profile=bazel_profile_lte_integ_tests `bazel query "kind(.*_binary, //orc8r/... union //lte/... union //feg/...)"`;' magma - vagrant ssh -c 'sudo sed -i "s@precedence ::ffff:0:0/96 100@#precedence ::ffff:0:0/96 100@" /etc/gai.conf;' magma - name: Linking bazel-built script executables to '/usr/local/bin/' run: | cd lte/gateway diff --git a/.github/workflows/lte-integ-test-containerized.yml b/.github/workflows/lte-integ-test-containerized.yml new file mode 100644 index 000000000000..013f89492cbe --- /dev/null +++ b/.github/workflows/lte-integ-test-containerized.yml @@ -0,0 +1,65 @@ +# Copyright 2022 The Magma Authors. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: LTE integ test containerized AGW + +on: + workflow_dispatch: null + workflow_run: + workflows: + - build-all + branches: + - master + types: + - completed + +jobs: + lte-integ-test-containerized: + if: github.event.workflow_run.conclusion == 'success' && (github.repository_owner == 'magma' || github.event_name == 'workflow_dispatch') + runs-on: macos-12 + steps: + - name: Cache magma-dev-box + uses: actions/cache@0865c47f36e68161719c5b124609996bb5c40129 # pin@v3 + with: + path: ~/.vagrant.d/boxes/magmacore-VAGRANTSLASH-magma_dev + key: vagrant-box-magma-dev-v1.2.20220801 + - name: Cache magma-test-box + uses: actions/cache@0865c47f36e68161719c5b124609996bb5c40129 # pin@v3 + with: + path: ~/.vagrant.d/boxes/magmacore-VAGRANTSLASH-magma_test + key: vagrant-box-magma-test + - name: Cache magma-trfserver-box + uses: actions/cache@0865c47f36e68161719c5b124609996bb5c40129 # pin@v3 + with: + path: ~/.vagrant.d/boxes/magmacore-VAGRANTSLASH-magma_trfserver + key: vagrant-box-magma-trfserver-v20220722 + - uses: actions/setup-python@7f80679172b057fc5e90d70d197929d454754a5a # pin@v2 + with: + python-version: '3.8.10' + - name: Install pre requisites + run: | + pip3 install --upgrade pip + pip3 install ansible fabric3 jsonpickle requests PyYAML + vagrant plugin install vagrant-vbguest vagrant-disksize vagrant-reload + - name: Open up network interfaces for VM + run: | + sudo mkdir -p /etc/vbox/ + echo '* 192.168.0.0/16' | sudo tee /etc/vbox/networks.conf + echo '* 3001::/64' | sudo tee -a /etc/vbox/networks.conf + - uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # pin@v3 + - name: Run the integration test + env: + DOCKER_REGISTRY: docker-ci.artifactory.magmacore.org/ + MAGMA_DEV_CPUS: 3 + MAGMA_DEV_MEMORY_MB: 9216 + working-directory: lte/gateway + run: | + fab --show=debug --set DOCKER_REGISTRY=${DOCKER_REGISTRY} integ_test_containerized diff --git a/.github/workflows/lte-integ-test-magma-deb.yml b/.github/workflows/lte-integ-test-magma-deb.yml index 41bd52897463..5eff9275aadc 100644 --- a/.github/workflows/lte-integ-test-magma-deb.yml +++ b/.github/workflows/lte-integ-test-magma-deb.yml @@ -13,13 +13,8 @@ name: LTE integ test magma-deb on: workflow_dispatch: null - workflow_run: - workflows: - - build-all - branches: - - master - types: - - completed + repository_dispatch: + types: [build-all-artifact] jobs: lte-integ-test-magma-deb: @@ -30,7 +25,7 @@ jobs: - name: Cache magma-deb-box uses: actions/cache@0865c47f36e68161719c5b124609996bb5c40129 # pin@v3 with: - path: ~/.vagrant.d/boxes/magmacore-VAGRANTSLASH-magma_deb + path: ~/.vagrant.d/boxes/ubuntu-VAGRANTSLASH-focal64 key: vagrant-box-magma-deb-focal64-20220804.0.0 - name: Cache magma-test-box uses: actions/cache@0865c47f36e68161719c5b124609996bb5c40129 # pin@v3 @@ -59,6 +54,7 @@ jobs: env: MAGMA_DEV_CPUS: 3 MAGMA_DEV_MEMORY_MB: 9216 + MAGMA_PACKAGE: ${{ github.event.client_payload.artifact }} run: | cd lte/gateway fab integ_test_deb_installation diff --git a/cwf/gateway/deploy/roles/cwag/tasks/main.yml b/cwf/gateway/deploy/roles/cwag/tasks/main.yml index 3d10aa25e675..c8776da62d6e 100644 --- a/cwf/gateway/deploy/roles/cwag/tasks/main.yml +++ b/cwf/gateway/deploy/roles/cwag/tasks/main.yml @@ -63,7 +63,7 @@ sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes state=present reload=yes - name: Configure Debian interfaces - when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' block: - name: Add static IP to cwag_br0 become: true diff --git a/cwf/gateway/deploy/roles/ovs/tasks/main.yml b/cwf/gateway/deploy/roles/ovs/tasks/main.yml index 1dc69a2c534a..1d1e86bfa557 100644 --- a/cwf/gateway/deploy/roles/ovs/tasks/main.yml +++ b/cwf/gateway/deploy/roles/ovs/tasks/main.yml @@ -11,7 +11,7 @@ # limitations under the License. - name: Install openvswitch and dependencies - when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' + when: ansible_distribution == 'Ubuntu' include_tasks: debian.yml - name: Install openvswitch and dependencies diff --git a/cwf/gateway/docker/c/Dockerfile b/cwf/gateway/docker/c/Dockerfile index 0fee6f196cae..397b526e5800 100644 --- a/cwf/gateway/docker/c/Dockerfile +++ b/cwf/gateway/docker/c/Dockerfile @@ -17,7 +17,6 @@ ARG OS_DIST=ubuntu OS_RELEASE=focal EXTRA_REPO=https://artifactory.magmacore.org/artifactory/debian-test -# Stretch is required for c build FROM $OS_DIST:$OS_RELEASE AS builder ARG OS_DIST OS_RELEASE EXTRA_REPO diff --git a/docs/docusaurus/i18n/en.json b/docs/docusaurus/i18n/en.json index c2963a1d5a2e..3ac423a48f36 100644 --- a/docs/docusaurus/i18n/en.json +++ b/docs/docusaurus/i18n/en.json @@ -1991,6 +1991,105 @@ }, "version-1.7.0/resources/version-1.7.0-ref_pcap": { "title": "PCAP Collection" + }, + "version-1.8.0/basics/version-1.8.0-prerequisites": { + "title": "Prerequisites" + }, + "version-1.8.0/basics/version-1.8.0-quick_start_guide": { + "title": "Quick Start Guide" + }, + "version-1.8.0/cwf/version-1.8.0-dev_testing": { + "title": "Test CWAG" + }, + "version-1.8.0/feg/version-1.8.0-deploy_build": { + "title": "Build FeG" + }, + "version-1.8.0/feg/version-1.8.0-deploy_install": { + "title": "Install FeG" + }, + "version-1.8.0/feg/version-1.8.0-s1ap_federated_tests": { + "title": "S1AP Federated Integration Tests" + }, + "version-1.8.0/general/version-1.8.0-aws_cloudstrapper": { + "title": "AWS Cloudstrapper Install" + }, + "version-1.8.0/howtos/version-1.8.0-inbound_roaming": { + "title": "Inbound Roaming" + }, + "version-1.8.0/howtos/version-1.8.0-network_probe": { + "title": "Network Probe" + }, + "version-1.8.0/howtos/troubleshooting/version-1.8.0-datapath_connectivity": { + "title": "Debugging AGW datapath issues" + }, + "version-1.8.0/lte/version-1.8.0-architecture_overview": { + "title": "Overview" + }, + "version-1.8.0/lte/version-1.8.0-build_install_magma_pkg_in_agw": { + "title": "Build and install a magma package in AGW" + }, + "version-1.8.0/lte/version-1.8.0-deploy_install_docker": { + "title": "Install Docker AGW" + }, + "version-1.8.0/lte/version-1.8.0-deploy_install": { + "title": "Install AGW" + }, + "version-1.8.0/lte/version-1.8.0-dev_notes": { + "title": "Developer Notes" + }, + "version-1.8.0/lte/version-1.8.0-dev_unit_testing": { + "title": "Test AGW" + }, + "version-1.8.0/lte/version-1.8.0-extended_5G_sa_features": { + "title": "Extended 5G SA Features" + }, + "version-1.8.0/lte/version-1.8.0-pipelined": { + "title": "Pipelined" + }, + "version-1.8.0/lte/version-1.8.0-s1ap_tests": { + "title": "S1AP Integration Tests" + }, + "version-1.8.0/lte/version-1.8.0-suci_extensions": { + "title": "SUCI Extensions" + }, + "version-1.8.0/nms/version-1.8.0-nms_arch_overview": { + "title": "Overview" + }, + "version-1.8.0/nms/version-1.8.0-dev_components": { + "title": "Components" + }, + "version-1.8.0/nms/version-1.8.0-dev_spacing": { + "title": "Spacing Guidelines" + }, + "version-1.8.0/nms/version-1.8.0-dev_testing": { + "title": "Test NMS" + }, + "version-1.8.0/nms/version-1.8.0-nms_organizations": { + "title": "Multitenancy (Organizations)" + }, + "version-1.8.0/orc8r/version-1.8.0-deploy_install": { + "title": "Install Orchestrator" + }, + "version-1.8.0/orc8r/version-1.8.0-deploy_intro": { + "title": "Introduction" + }, + "version-1.8.0/orc8r/version-1.8.0-deploy_using_juju": { + "title": "Deploy Orchestrator using Juju (Beta)" + }, + "version-1.8.0/orc8r/version-1.8.0-dev_build": { + "title": "Build Orchestrator" + }, + "version-1.8.0/orc8r/version-1.8.0-dev_gateway_registration": { + "title": "Gateway Registration" + }, + "version-1.8.0/orc8r/version-1.8.0-dev_minikube": { + "title": "Deploy on Minikube" + }, + "version-1.8.0/orc8r/version-1.8.0-dev_rest_api_auth": { + "title": "REST API Auth" + }, + "version-1.8.0/proposals/version-1.8.0-p014_proposal_process": { + "title": "Magma Proposals" } }, "links": { diff --git a/docs/docusaurus/versioned_docs/version-1.8.0/basics/prerequisites.md b/docs/docusaurus/versioned_docs/version-1.8.0/basics/prerequisites.md new file mode 100644 index 000000000000..814b15451133 --- /dev/null +++ b/docs/docusaurus/versioned_docs/version-1.8.0/basics/prerequisites.md @@ -0,0 +1,252 @@ +--- +id: version-1.8.0-prerequisites +title: Prerequisites +hide_title: true +original_id: prerequisites +--- + +# Prerequisites + +These are the prerequisites to setting up a full private LTE Magma deployment. +Additional prerequisites for developers can be found in the [contributor guide on Github](https://github.com/magma/magma/wiki/Contributor-Guide). + +## Operating System + +Currently, the main development operating system (OS) is **macOS**. Documentation is mainly focused on that operating system. +To develop on a **Linux OS**, the package manager (brew for macOS) will need to be replaced by the appropriate package manager for the respective Linux distribution (e.g. apt, yum, etc.). +**Windows OS** is currently _not_ supported as developing environment, due to some dependencies on Linux-only tools during setup, such as Ansible or `fcntl`. You can try to use a [DevContainer setup](../contributing/contribute_vscode.md#open-a-devcontainer-workspace-with-github-codespaces) though. + +## Development Tools + +Development can occur from multiple OS's, where **macOS** and **Ubuntu** are **explicitly supported**, with additional polish for macOS. + +**Note:** If you still want to contribute from a different OS, you will need to figure out some workarounds to install the tooling. You might want to follow one of the guides, either macOS or Ubuntu, and replicate the steps in your preferred OS. + +### macOS + +1. Install the following tools + + 1. [Docker](https://www.docker.com) and Docker Compose + 2. [Homebrew](https://brew.sh/) + 3. [VirtualBox](https://www.virtualbox.org/) + 4. [Vagrant](https://vagrantup.com) + + ```bash + brew install go@1.18 pyenv + # NOTE: this assumes you're using zsh. + # See the above pyenv install instructions if using alternative shells. + echo 'export PATH="/usr/local/opt/go@1.18/bin:$PATH"' >> ~/.zshrc + echo 'eval "$(pyenv init --path)"' >> ~/.zprofile + echo 'eval "$(pyenv init -)"' >> ~/.zshrc + exec $SHELL + # IMPORTANT: close your terminal tab and open a new one before continuing + pyenv install 3.8.10 + pyenv global 3.8.10 + pip3 install ansible fabric3 jsonpickle requests PyYAML + vagrant plugin install vagrant-vbguest vagrant-disksize vagrant-reload + ``` + + **Note**: In the case where installation of `fabric3` through pip was unsuccessful, + try switching to other package installers. Try running `brew install fabric`. + + You should start Docker Desktop and increase the memory + allocation for the Docker engine to at least 4GB (Preferences -> Resources -> + Advanced). If you are running into build/test failures with Go that report + "signal killed", you likely need to increase Docker's allocated resources. + + ![Increasing docker engine resources](assets/docker-config.png) + +### Ubuntu + +1. Install the following tools + 1. [Docker](https://docs.docker.com/engine/install/ubuntu/) and [Docker Compose](https://docs.docker.com/compose/install/) + 2. [VirtualBox](https://www.virtualbox.org/wiki/Linux_Downloads) + 3. [Vagrant](https://www.vagrantup.com/downloads) (Install by downloading the `.deb` file. Installing via the command line using `apt-get` can currently cause an issue with OpenSSL. See also [this discussion](https://github.com/hashicorp/vagrant/issues/12751).) +2. Install golang version 18. + + 1. Download the tar file. + + ```bash + wget https://artifactory.magmacore.org/artifactory/generic/go1.18.3.linux-amd64.tar.gz + ``` + + 2. Extract the archive you downloaded into `/usr/local`, creating a Go tree in `/usr/local/go`. + + ```bash + sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.18.3.linux-amd64.tar.gz + ``` + + 3. Add `/usr/local/go/bin` to the PATH environment variable. + + ```bash + export PATH=$PATH:/usr/local/go/bin + ``` + + 4. Verify that you've installed Go by opening a command prompt and typing the following command + + ```bash + go version + ``` + + You should expect something like this + + ```bash + go version go1.18.3 linux/amd64 + ``` + +3. Install `pyenv`. + + 1. Update system packages. + + ```bash + sudo apt update -y + ``` + + 2. Install some necessary dependencies. **If you are using `zsh` instead of `bash`, replace** `.bashrc` **for** `.zshrc`. + + ```bash + apt install -y make build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev libsqlite3-dev wget curl llvm libncurses5-dev libncursesw5-dev xz-utils tk-dev libffi-dev liblzma-dev python-openssl git + ``` + + **Note**: For Ubuntu 22.04, use `python3-openssl` instead of `python-openssl`. + + 3. Clone `pyenv` repository. + + ```bash + git clone https://github.com/pyenv/pyenv.git ~/.pyenv + ``` + + 4. Configure `pyenv`. + + ```bash + echo 'export PYENV_ROOT="$HOME/.pyenv"' >> ~/.bashrc + echo 'export PATH="$PYENV_ROOT/bin:$PATH"' >> ~/.bashrc + echo -e 'if command -v pyenv 1>/dev/null 2>&1; then\n eval "$(pyenv init -) "\nfi' >> ~/.bashrc + exec "$SHELL" + ``` + + 5. Create python virtual environment version 3.8.10. + + ```bash + pyenv install 3.8.10 + pyenv global 3.8.10 + ``` + + **Note**: The `pyenv` installation [might fail with a segmentation fault](https://github.com/pyenv/pyenv/issues/2046). Try using `CFLAGS="-O2" pyenv install 3.8.10` in that case. + +4. Install `pip3` and its dependencies. + + 1. Install `pip3`. + + ```bash + sudo apt install python3-pip + ``` + + 2. Install the following dependencies + + ```bash + pip3 install ansible fabric3 jsonpickle requests PyYAML + ``` + +5. Install `vagrant` necessary plugin. + + ```bash + vagrant plugin install vagrant-vbguest vagrant-disksize vagrant-reload + ``` + + Make sure `virtualbox` is the default provider for `vagrant` by adding the following line to your `.bashrc` (or equivalent) and restart your shell: `export VAGRANT_DEFAULT_PROVIDER="virtualbox"`. + +## Downloading Magma + +You can find Magma code on [Github](https://github.com/magma/magma). + +To download Magma current version, or a specific release do the following + +```bash +git clone https://github.com/magma/magma.git +cd magma + +# in case you want to use a specific version of Magma (for example v1.6) +git checkout v1.6 + +# to list all available releases +git tag -l +``` + +## Deployment Tooling + +First, follow the previous section on [developer tools](#development-tools). Then, install some +additional prerequisite tools. + +### macOS + +Install necessary dependencies and configure the aws cli + +```bash +brew install aws-iam-authenticator kubectl helm terraform +python3 -m pip install awscli boto3 +aws configure +``` + +### Ubuntu + +Install the following + +1. [aws-iam-authenticator for Linux](https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html). +2. [kubectl for Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#install-using-native-package-management). +3. [Helm for Linux](https://helm.sh/docs/intro/install/). +4. [Terraform for Linux](https://learn.hashicorp.com/tutorials/terraform/install-cli). +5. awscli + + ```bash + sudo apt install awscli + ``` + +### Orchestrator and NMS + +Orchestrator deployment depends on the following components + +1. AWS account +2. Registered domain for Orchestrator endpoints + +We recommend deploying the Orchestrator cloud component of Magma into AWS. +Our open-source Terraform scripts target an AWS deployment environment, but if +you are familiar with devops and are willing to roll your own, Orchestrator can +run on any public/private cloud with a Kubernetes cluster available to use. +The deployment documentation will assume an AWS deployment environment - if +this is your first time using or deploying Orchestrator, we recommend that you +follow this guide before attempting to deploy it elsewhere. + +Provide the access key ID and secret key for an administrator user in AWS +(don't use the root user) when prompted by `aws configure`. Skip this step if +you will use something else for managing AWS credentials. + +## Production Hardware + +### Access Gateways + +Access gateways (AGWs) can be deployed on to any AMD64 architecture machine +which can support a Debian or Ubuntu 20.04 Linux installation. The basic system +requirements for the AGW production hardware are + +1. 2+ physical Ethernet interfaces +2. AMD64 dual-core processor around 2GHz clock speed or faster +3. 4GB RAM +4. 32GB or greater SSD storage + +In addition, in order to build the AGW, you should have on hand + +1. A USB stick with 2GB+ capacity to load a Debian Stretch ISO +2. Peripherals (keyboard, screen) for your production AGW box for use during + provisioning + +### RAN Equipment + +We currently have tested with the following EnodeB's + +1. Baicells Nova 233 TDD Outdoor +2. Baicells Nova 243 TDD Outdoor +3. Assorted Baicells indoor units (for lab deployments) + +Support for other RAN hardware can be implemented inside the `enodebd` service +on the AGW, but we recommend starting with one of these EnodeBs. diff --git a/docs/docusaurus/versioned_docs/version-1.8.0/basics/quick_start_guide.md b/docs/docusaurus/versioned_docs/version-1.8.0/basics/quick_start_guide.md new file mode 100644 index 000000000000..480fd58f5830 --- /dev/null +++ b/docs/docusaurus/versioned_docs/version-1.8.0/basics/quick_start_guide.md @@ -0,0 +1,237 @@ +--- +id: version-1.8.0-quick_start_guide +title: Quick Start Guide +hide_title: true +original_id: quick_start_guide +--- +# Quick Start Guide + +The quick start guide is for developing on Magma or just trying it out. Follow +the deployment guides under Orchestrator and Access Gateway if you are +installing Magma for a production deployment. + +With the [prereqs](prerequisites.md) installed, we can now set up a minimal +end-to-end system on your development environment. In this guide, we'll start +by running the LTE access gateway and orchestrator cloud, and then +register your local access gateway with your local cloud for management. + +We will be spinning up a virtual machine and some docker containers for this +full setup, so you'll probably want to do this on a system with at least 8GB +of memory. Our development VM's are in the 192.168.60.0/24, 192.168.128.0/24 and +192.168.129.0/24 address spaces, so make sure that you don't have anything +running which hijacks those (e.g. VPN). + +In the following steps, note the prefix in terminal commands. `HOST` means to +run the indicated command on your host machine, and `MAGMA-VM` on the `magma` +vagrant machine under `lte/gateway`. + +## Provisioning the environment + +Go ahead and open up 2 fresh terminal tabs. Start in + +### Terminal Tab 1: Provision the AGW VM + +The development environment virtualizes the access gateway, so you don't need +any production hardware on hand to test an end-to-end setup. +We'll be setting up the LTE AGW VM in this tab. + +You need to make sure that your local network setup is correct for the VM to +start properly. Especially the entries `* 192.168.0.0/16` and `* 3001::/64` must exist in your +`/etc/vbox/networks.conf`. + +```bash +HOST [magma]$ echo "* 192.168.0.0/16" | sudo tee -a /etc/vbox/networks.conf +HOST [magma]$ echo "* 3001::/64" | sudo tee -a /etc/vbox/networks.conf +HOST [magma]$ cd lte/gateway +HOST [magma/lte/gateway]$ vagrant up magma +``` + +This will take a few minutes to spin up the VM. While that runs, switch over +to... + +**Note**: If you are looking to test/develop the LTE features of AGW, without +cloud based network management, you can skip the rest of this guide and try the +[S1AP integration tests](../lte/s1ap_tests.md) now. + +### Terminal Tab 2: Build Orchestrator + +Here, we'll be building the Orchestrator docker containers. + +```bash +HOST [magma]$ cd orc8r/cloud/docker +HOST [magma/orc8r/cloud/docker]$ ./build.py --all +``` + +This will build all the docker images for Orchestrator. The `vagrant up` from +the first tab should finish before the image building, so you should switch +to that tab and move on for now. + +## Initial Run + +Once `vagrant up` in the first tab finishes: + +### Terminal Tab 1: Build AGW from Source + +We will kick off the initial build of the AGW from source here. + +```bash +HOST [magma/lte/gateway]$ vagrant ssh magma +MAGMA-VM [/home/vagrant]$ cd magma/lte/gateway +MAGMA-VM [/home/vagrant/magma/lte/gateway]$ make run +``` + +**Note**: If you encounter unexpected errors during this process, try running +`vagrant provision magma` in the host environment for more debugging +information. + +This will take a while (we have a lot of CXX files to build). With 2 extensive +build jobs running, now is a good time to grab a coffee or lunch. The first +build ever from source will take a while, but afterwards, a persistent ccache +and Docker's native layer caching will speed up subsequent builds +significantly. + +You can monitor what happens in the other tab now: + +### Terminal Tab 2: Start Orchestrator + +Once the Orchestrator build finishes, we can start the development Orchestrator +cloud for the first time. We'll also use this time to register the local +client certificate you'll need to access the local API gateway for your +development stack. + +To start Orchestrator (without metrics) is as simple as: + +```bash +HOST [magma/orc8r/cloud/docker]$ ./run.py + +Creating orc8r_postgres_1 ... done +Creating orc8r_test_1 ... done +Creating orc8r_maria_1 ... done +Creating elasticsearch ... done +Creating fluentd ... done +Creating orc8r_kibana_1 ... done +Creating orc8r_proxy_1 ... done +Creating orc8r_controller_1 ... done +``` + +If you want to run everything, including metrics, run: + +```bash +HOST [magma/orc8r/cloud/docker]$ ./run.py --metrics + +Creating orc8r_alertmanager_1 ... done +Creating orc8r_maria_1 ... done +Creating elasticsearch ... done +Creating orc8r_postgres_1 ... done +Creating orc8r_config-manager_1 ... done +Creating orc8r_test_1 ... done +Creating orc8r_prometheus-cache_1 ... done +Creating orc8r_prometheus_1 ... done +Creating orc8r_kibana_1 ... done +Creating fluentd ... done +Creating orc8r_proxy_1 ... done +Creating orc8r_controller_1 ... done +``` + +The Orchestrator application containers will bootstrap certificates on startup +which are cached for future runs. Watch the directory `magma/.cache/test_certs` +for a file `admin_operator.pfx` to show up (this may take a minute or two). + +```bash +HOST [magma/orc8r/cloud/docker]$ ls ../../../.cache/test_certs + +admin_operator.key.pem bootstrapper.key controller.crt rootCA.key +admin_operator.pem certifier.key controller.csr rootCA.pem +admin_operator.pfx certifier.pem controller.key rootCA.srl +``` + +The owner and group of `admin_operator.key.pem` and `admin_operator.pfx` in `/magma/.cache/test_certs/` are `root`. +You need to change ownership of these files to your user with `chown`, e.g. + +```bash +HOST [magma/orc8r/cloud/docker] sudo chown ${USER}:${USER} ../../../.cache/test_certs/admin_operator.key.pem +HOST [magma/orc8r/cloud/docker] sudo chown ${USER}:${USER} ../../../.cache/test_certs/admin_operator.pfx +``` + +then: + +```bash +HOST [magma/orc8r/cloud/docker]$ open ../../../.cache/test_certs +``` + +In the Finder window that pops up, double-click `admin_operator.pfx` to add the +local client cert to your keychain. *The password for the cert is magma*. +In some cases, you may have to open up the Keychain app in MacOS and drag-drop +the file into the login keychain if double-clicking doesn't work. + +If you use Firefox, you'll have to import this .pfx file into your browser's +installed client certificates. See [here](https://support.globalsign.com/customer/en/portal/articles/1211486-install-client-digital-certificate---firefox-for-windows) +for instructions. If you use Chrome or Safari, you may have to restart the +browser before the certificate can be used. + +After starting the Orchestrator with `HOST [magma/orc8r/cloud/docker]$ ./run.py` +and importing `admin_operator.pfx`, you should be able to visit the Swagger UI +at [https://localhost:9443/swagger/v1/ui](https://localhost:9443/swagger/v1/ui). +Note that your browser may refuse to accept the server certificate from +`localhost:9443`. Firefox and Safari will let you override this warning. Chrome +will also let you [bypass the warning if you type `thisisunsafe`](https://www.technipages.com/google-chrome-bypass-your-connection-is-not-private-message). + +### Connecting Your Local LTE Gateway to Your Local Cloud + +At this point, you will have built all the code in the LTE access gateway and +the Orchestrator cloud. All the services on the LTE access gateway and +orchestrator cloud are running, but your gateway VM isn't yet set up to +communicate with your local cloud. + +We have a fabric command set up to do this: + +```bash +HOST [magma]$ cd lte/gateway +HOST [magma/lte/gateway]$ fab -f dev_tools.py register_vm +``` + +This command will seed your gateway and network on Orchestrator with some +default LTE configuration values and set your gateway VM up to talk to your +local Orchestrator cloud. Wait a minute or 2 for the changes to propagate, +then you can verify that things are working: + +```bash +HOST [magma/lte/gateway]$ vagrant ssh magma + +MAGMA-VM$ sudo service magma@* stop +MAGMA-VM$ sudo service magma@magmad restart +MAGMA-VM$ sudo tail -f /var/log/syslog + +# After a minute or 2 you should see these messages: +Sep 27 22:57:35 magma-dev magmad[6226]: [2018-09-27 22:57:35,550 INFO root] Checkin Successful! +Sep 27 22:57:55 magma-dev magmad[6226]: [2018-09-27 22:57:55,684 INFO root] Processing config update g1 +Sep 27 22:57:55 magma-dev control_proxy[6418]: 2018-09-27T22:57:55.683Z [127.0.0.1 -> streamer-controller.magma.test,8443] "POST /magma.Streamer/GetUpdates HTTP/2" 200 7bytes 0.009s +``` + +## Using the NMS UI + +Magma provides an UI for configuring and monitoring the networks. To set up +the NMS to talk to your local Orchestrator: + +```bash +HOST [magma]$ cd nms +HOST [magma/nms] $ COMPOSE_PROJECT_NAME=magmalte docker-compose build magmalte +HOST [magma/nms] $ docker-compose up -d +HOST [magma/nms] $ ./scripts/dev_setup.sh +``` + +After this, you will be able to access the UI by visiting +[https://magma-test.localhost](https://magma-test.localhost), and using the email `admin@magma.test` +and password `password1234`. We recommend Firefox or Chrome. If you see Gateway Error 502, don't worry, the +NMS can take upto 60 seconds to finish starting up. +Note that you will only see a network if you connected your local LTE gateway as described above. + +`magma-test` is the default organization. +Organizations are managed at [host.localhost](https://host.localhost) +where you can log in with the same credentials. + +**Note**: If you want to test the access gateway VM with a physical eNB and UE, +refer to +the [Connecting a physical eNodeb and UE device to gateway +VM](../lte/dev_notes.md#connecting-a-physical-enodeb-and-ue-to-gateway-vm) +section. diff --git a/docs/docusaurus/versioned_docs/version-1.8.0/cwf/dev_testing.md b/docs/docusaurus/versioned_docs/version-1.8.0/cwf/dev_testing.md new file mode 100644 index 000000000000..c940707cfcdf --- /dev/null +++ b/docs/docusaurus/versioned_docs/version-1.8.0/cwf/dev_testing.md @@ -0,0 +1,106 @@ +--- +id: version-1.8.0-dev_testing +title: Test CWAG +hide_title: true +original_id: dev_testing +--- + +# Test Carrier Wifi Access Gateway + +This guide covers tips for quickly validating Carrier Wifi Access Gateway changes. + +## Dev environment + +In general, all unit testing for CWAG is done on the CWAG test VM. + +To SSH into the VM, run + +```bash +[HOST] cd $MAGMA_ROOT/cwf/gateway +[HOST] vagrant up cwag_test +[HOST] vagrant ssh cwag_test +``` + +The commands shown below should be run inside the test VM unless specified otherwise. + +## Format and verify build + +To run all existing unit tests, run + +```bash +[VM] make -C ${MAGMA_ROOT}/cwf/gateway precommit +[VM] make -C ${MAGMA_ROOT}/cwf/gateway/integ_tests precommit +``` + +## Run unit tests + +To run all existing unit tests, run + +```bash +[VM] make -C ${MAGMA_ROOT}/cwf/gateway test +``` + +## Run integration tests + +### Test setup + +CWF integration tests use 3 separate VMs listed below. +`cwf/gateway/fabfile.py` can be used to automate all setup work. + +### `cwag-dev` + +Runs CWAG docker containers and mock core services needed to run the test. +See `cwf/gateway/docker-compose.integ-test.yml` for the complete list of services. + +### `cwag-test` + +Runs a UE simulator service and all tests. + +### `magma-trfserver` + +Runs an iperf3 server to drive traffic through CWAG. + +#### Entire test suite + +To run all setup work and the entire CWF integration test suite, run + +```bash +[HOST] fab integ_test +``` + +Once the above command has been executed, which means that the set-up of the VMs etc. has been +performed, command-line options can be utilized to rerun the tests without redoing the set-up + +```bash +[HOST] fab integ_test:provision_vm=False,no_build=True +``` + +#### Individual tests + +The command above can be further modified to run one integration test at a time + +```bash +[HOST] fab integ_test:provision_vm=False,no_build=True,skip_unit_tests=True,test_re= +``` + +where `` is to be replaced by the desired test, e.g. `TestGyReAuth`. Run +`fab --display integ_test` to see more available options. + +Note that running a test can be further expedited by running it directly on the CWAG test VM using +`gotestsum`, which echoes how it is run by the `fab` command. In particular, the above command can +be run with `run_tests=False` to do the required set-up + +```bash +[HOST] fab integ_test:run_tests=False,provision_vm=False,no_build=True,skip_unit_tests=True,test_re= +``` + +before logging into the CWAG test VM, navigating to the gateway folder and running the test directly + +```bash +[HOST] vagrant ssh cwag_test +[VM] cd magma/cwf/gateway +[VM] gotestsum --format=standard-verbose --packages='./...' -- -test.short -timeout 50m -count 1 -tags=all -run= +``` + +This command can be used to execute the test multiple times in a way that is faster than can be done +with the `fab` command, which may be helpful during development. diff --git a/docs/docusaurus/versioned_docs/version-1.8.0/feg/deploy_build.md b/docs/docusaurus/versioned_docs/version-1.8.0/feg/deploy_build.md new file mode 100644 index 000000000000..6b39e10496ad --- /dev/null +++ b/docs/docusaurus/versioned_docs/version-1.8.0/feg/deploy_build.md @@ -0,0 +1,61 @@ +--- +id: version-1.8.0-deploy_build +title: Build FeG +hide_title: true +original_id: deploy_build +--- + +# Build Federation Gateway Components + +If you cloned Magma using git, make sure you are checked out on the release you +intend to build. + +In case you need to change the version you can: + +```bash +# to list all releases +git tag -l + +# to switch to a different release (for example v1.8) +git checkout v1.8 + +# to switch to master (developement version) +git checkout master +``` + +Once you are on a proper version of Magma, make sure your Docker daemon is running. +Then go run those commands to build FeG. + +```bash +cd magma/feg/gateway/docker +docker-compose build --parallel +# if build fails try with sudo and without parallelization +sudo docker-compose build +``` + +Note that you are building FeG from your local repository. There is no need to +change content `.env` + +If this is your first time building the FeG, this may take a while. + +When this job finishes, you will have built FeG on your local machine. You can +check the images using docker. You should `gateway_python` and `gateway_go` +among others images that were used during the build process. + +```bash +docker images +``` + +In case you want to host FeG on your image registry do the following to upload these +images: + +```bash +../../../orc8r/tools/docker/publish.sh -r -i gateway_python +../../../orc8r/tools/docker/publish.sh -r -i gateway_go +``` + +In case you built Magma CWF (Carrier Wi-FI), you also need to upload `gateway_radius`. + +```bash +../../../orc8r/tools/docker/publish.sh -r -i gateway_radius +``` diff --git a/docs/docusaurus/versioned_docs/version-1.8.0/feg/deploy_install.md b/docs/docusaurus/versioned_docs/version-1.8.0/feg/deploy_install.md new file mode 100644 index 000000000000..f8bae4e0997e --- /dev/null +++ b/docs/docusaurus/versioned_docs/version-1.8.0/feg/deploy_install.md @@ -0,0 +1,167 @@ +--- +id: version-1.8.0-deploy_install +title: Install FeG +hide_title: true +original_id: deploy_install +--- + +# Install Federation Gateway + +## Prerequisites + +To install the Federation Gateway, there are three required files that are +deployment-specific. These are described below: + +- `rootCA.pem` - This file should match the `rootCA.pem` of the Orchestrator +that the Federation Gateway will connect to. + +- `control_proxy.yml` - This file is used to configure the `magmad` +and `control_proxy` services to point toward the appropriate Orchestrator. +A sample configuration is provided below. The `bootstrap_address`, +`bootstrap_port`, `controller_address`, and `controller_port` are the +parameters that will likely need to be modified (check + `/magma/feg/gateway/configs/control_proxy.yml` for the most recent + format) + +```yaml +# +# Copyright 2020 The Magma Authors. + +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# nghttpx config will be generated here and used +nghttpx_config_location: /var/tmp/nghttpx.conf + +# Location for certs +rootca_cert: /var/opt/magma/certs/rootCA.pem +gateway_cert: /var/opt/magma/certs/gateway.crt +gateway_key: /var/opt/magma/certs/gateway.key + +# Listening port of the proxy for local services. The port would be closed +# for the rest of the world. +local_port: 8443 + +# Cloud address for reaching out to the cloud. +cloud_address: controller.magma.test +cloud_port: 443 + +bootstrap_address: bootstrapper-controller.magma.test +bootstrap_port: 443 + +# Option to use nghttpx for proxying. If disabled, the individual +# services would establish the TLS connections themselves. +proxy_cloud_connections: True + +# Allows http_proxy usage if the environment variable is present +allow_http_proxy: True +``` + +- `.env` - This file provides any deployment specific environment variables used +in the `docker-compose.yml` of the Federation Gateway. A sample configuration +is provided below (please check `magma/feg/gateway/docker/.env` for the most + recent format): + +```yaml +# Copyright 2020 The Magma Authors. + +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +COMPOSE_PROJECT_NAME=feg +DOCKER_REGISTRY= +DOCKER_USERNAME= +DOCKER_PASSWORD= +IMAGE_VERSION=latest +GIT_HASH=master + +ROOTCA_PATH=/var/opt/magma/certs/rootCA.pem +CONTROL_PROXY_PATH=/etc/magma/control_proxy.yml +SNOWFLAKE_PATH=/etc/snowflake +CONFIGS_DEFAULT_VOLUME=/etc/magma +CONFIGS_TEMPLATES_PATH=/etc/magma/templates + +CERTS_VOLUME=/var/opt/magma/certs +CONFIGS_VOLUME=/var/opt/magma/configs + +# This section is unnecessary if using host networking +S6A_LOCAL_PORT=3868 +S6A_HOST_PORT=3868 +S6A_NETWORK=sctp + +SWX_LOCAL_PORT=3869 +SWX_HOST_PORT=3869 +SWX_NETWORK=sctp + +GX_LOCAL_PORT=3870 +GX_HOST_PORT=3870 +GX_NETWORK=tcp + +GY_LOCAL_PORT=3871 +GY_HOST_PORT=3871 +GY_NETWORK=tcp +``` + +## Installation + +The installation is done using the `install_gateway.sh` script located at +`magma/orc8r/tools/docker`. To install, copy that file and the three files +described above into a directory on the install host. Then + +```console +INSTALL_HOST [~/]$ sudo ./install_gateway.sh feg +``` + +After this completes, you should see: `Installed successfully!!` + +## Registration + +After installation, the next step is to register the gateway with the Orchestrator. +To do so: + +```console +INSTALL_HOST [~/]$ cd /var/opt/magma/docker +INSTALL_HOST [/var/opt/magma/docker]$ docker-compose exec magmad /usr/local/bin/show_gateway_info.py +``` + +This will output a hardware ID and a challenge key. This information must be +registered with the Orchestrator. At this time, NMS support for FeG +registration is still in-progress. + +To register the FeG, go to the Orchestrator's Swagger UI in your browser. +(i.e. ). + +Now, create a Federation Network. This is found at `/feg` under the +**Federation Networks** section. If you have not registered any gateways before, you +must set up a tier under the **Orchestrator** section at `/networks/{network_id}/tiers`. +Then register the gateway under the **Federation Gateway** section at `/feg/{network_id}/gateways` +using the network ID of the Federation Network and the hardware ID and challenge key +from the previous step. + +To verify that the gateway was correctly registered, run: + +```console +INSTALL_HOST [~/]$ cd /var/opt/magma/docker +INSTALL_HOST [/var/opt/magma/docker]$ docker-compose exec magmad /usr/local/bin/checkin_cli.py +``` + +## Upgrades + +The Federation Gateway supports NMS initiated upgrades. These can be triggered +from the NMS under the `Configure` section by updating the FeG's tier to the +appropriate `Software Version`. After triggering the upgrade from the NMS, +magmad on the gateway will pull down the specified docker images, +update any static configuration, and update the docker-compose file to the +appropriate version. diff --git a/docs/docusaurus/versioned_docs/version-1.8.0/feg/s1ap_federated_tests.md b/docs/docusaurus/versioned_docs/version-1.8.0/feg/s1ap_federated_tests.md new file mode 100644 index 000000000000..333bdb76deb2 --- /dev/null +++ b/docs/docusaurus/versioned_docs/version-1.8.0/feg/s1ap_federated_tests.md @@ -0,0 +1,212 @@ +--- +id: version-1.8.0-s1ap_federated_tests +title: S1AP Federated Integration Tests +hide_title: true +original_id: s1ap_federated_tests +--- + +# S1AP Federated Integration Tests + +The S1AP Integration Test only tests the AGW. The objective of the **S1AP +Federated Integration Tests** is to provide a test platform +to run Magma with all of its components. That is, end-to-end tests with all +components of Magma: AGW, Orc8r and FeG. + +Currently, these tests are in an experimental phase, so we are only testing the connectivity of +AGW - Orc8r - FeG, and we are able to run the basic authentication related tests using the S1AP tester +and our mock HSS. + +## Architecture + +As the diagram indicates, these tests spin up **AGW**, **FeG**, and **Orc8r** +and use `S1AP` and `magma_trfserver` as an eNb and SGi emulator to run tests. + +```mermaid +graph LR; + A[[S1AP]] --- AGW ---|control| Orc8r --- FeG --- HSS[(HSS)]; + AGW ---|data| Z[[magma_trfserver]]; +``` + +The services will run either on Vagrant VMs or on Docker: + +| Services | Vagrant VM | Docker | +|-------------------|:---------------:|:---------:| +| AGW | magma | | +| FeG | ✓ | ✓ | +| Orc8r | | ✓ | +| Traffic server | magma_trfserver | | +| S1AP tester | magma_test | | +| HSS | | ✓ | + +*Note that FeG runs on Docker inside the Magma VM. The reason is to guarantee +Docker host mode is supported by the host (not supported on Mac).* + +## Running the tests + +Below there are three different ways to run the tests. These will help you +debug any intermediate step should the automation fail. + +### Automated test run + +The fab script mentioned below will do everything for you. In case it fails, try one of the other two methods: +[semiautomatic](#semiautomatic-test-run) or [manual](#manual-build). + +This script will + +- Build AGW, Orc8r and FeG +- Start AGW, Orc8r and FeG +- Configure Orc8r with AGW and FeG +- Run connectivity tests between all three components +- Run a basic attach/detach test +- Run an attach/detach test with multiple UEs (for 32 UEs) + +To execute the script, run: + +```bash +cd magma/lte/gateway +fab federated_integ_test:build_all=True + +# to run it again, you can skip the build_all +# however, if you change code in Magma, you will need to build_all again +fab federated_integ_test +``` + +You can access Orc8r adding to your keychain the `admin_operator.pfx` cert +you will find at `/magma/.cache/test_certs`. Then you can check your +provisioned gateways using +[swagger interface](https://127.0.0.1:9443/apidocs/v1/?docExpansion=none) +that will be running on your Orc8r + +Please, for more detail, check the following sections which provide more +insight about this process. + +### Semiautomatic test run + +#### Build environment + +Using this method, you will build the environment step by step but still using +certain automation. If you want to build the environment manually instead, go to +[Manual build](#manual-build). + +On your host machine, execute these commands to build, start and configure the AGW, +FeG and Orc8r: + +```bash +cd magma/lte/gateway/python/integ_tests/federated_tests +fab build_all_and_configure +``` + +After this has run, you can check +whether your gateways have been bootstrapped using the magmad logs on the AGW and FeG. The +command below will try to reach Orc8r from AGW and FeG, and FeG from AGW: + +```bash +cd magma/lte/gateway/python/integ_tests/federated_tests +fab test_connectivity +``` + +Once it has been built, start the `magma_trfserver` and `magma_test` VMs: + +```bash +cd magma/lte/gateway +vagrant up magma_test +vagrant up magma_trfserver +``` + +You can then [run the tests manually](#run-tests-manually). + +### Manual build + +If you want to build the environment manually, you can carry out the following steps. + +*Note that commands for the AGW and FeG have to be run inside the Vagrant VM. For this reason, +all such commands include the `vagrant ssh magma` command first. To leave +Vagrant, just type `exit`. Orc8r will need to be run on the +host itself (no Vagrant involved).* + +- AGW: + +```bash +cd magma/lte/gateway +vagrant up magma +vagrant ssh magma + +# inside vagrant vm +cd magma/lte/gateway +make run + +# exit from vagrant vm +exit +``` + +- FeG: + +```bash +cd magma/lte/gateway +vagrant up magma +vagrant ssh magma + +# inside vagrant vm +cd magma/lte/gateway/python/integ_tests/federated_tests/docker +docker-compose build +./run.py +``` + +- Orc8r: + +```bash +cd magma/orc8r/cloud/docker +./build.py -a +./run.py + +# return to agw folder +cd magma/lte/gateway +# register gateways +fab --fabfile=dev_tools.py register_federated_vm +fab --fabfile=dev_tools.py register_feg_gw +``` + +- Test VM: + +```bash +cd magma/lte/gateway +vagrant up magma_test +vagrant ssh magma_test + +# inside vagrant vm +cd magma/lte/gateway/python +make + +# exit from vagrant vm +exit +``` + +- Traffic VM: + +```bash +cd magma/lte/gateway +vagrant up magma_trfserver +``` + +#### Run tests manually + +Once you have built all of the VMs, you can try to run the tests from the +`magma_test` VM. + +**Note**: Currently only [s6a related LTE Integ tests](https://github.com/magma/magma/blob/master/lte/gateway/python/integ_tests/defs.mk#L288) can be run in federated mode. + +```bash +cd magma/lte/gateway +vagrant ssh magma_test + +# inside vagrant vm +cd magma/lte/gateway/python/integ_tests +## Individual test(s), e.g.: +make fed_integ_test TESTS=s1aptests/test_attach_detach.py + +## All tests +make fed_integ_test + +# once the tests are done, you can exit the vagrant vm +exit +``` diff --git a/docs/docusaurus/versioned_docs/version-1.8.0/general/cloudstrapper_install.md b/docs/docusaurus/versioned_docs/version-1.8.0/general/cloudstrapper_install.md new file mode 100644 index 000000000000..536707e9aaae --- /dev/null +++ b/docs/docusaurus/versioned_docs/version-1.8.0/general/cloudstrapper_install.md @@ -0,0 +1,231 @@ +--- +id: version-1.8.0-aws_cloudstrapper +title: AWS Cloudstrapper Install +hide_title: true +original_id: aws_cloudstrapper +--- + +# Deploying Magma via Cloudstrapper + +There are two basic options for setting up Magma Cloudstrapper within Amazon Web Services: Marketplace or via private image. + +## 1) Launching Cloudstrapper from Marketplace + +- Access the [AWS Marketplace](https://aws.amazon.com/marketplace) and search for “Magma Cloudstrapper” + - Alternatively, check the [direct link](https://aws.amazon.com/marketplace/pp/prodview-wkchyk2okdnhc?qid=1627070115980&sr=0-2&ref_=srh_res_product_title) +- Click on “Continue to Subscribe” and “Continue to Configuration” +- Choose “Delivery Method”, “Software Version” and “Region.” The only default we recommend you change is the “Region" +- Click on “Continue to Launch” + - In “Choose Action”, select “Launch from Website” (default) + - The EC2 Instance Type dropdown will select “t2.medium” by default + - Choose preferred values for other drop-boxes. Cloudstrapper will work fine deployed on the public subnet. + - Under “Security Group Settings” select a security group that allows SSH traffic and any other rules that are relevant to your network. + - Under “Key Pair Settings” select your preferred key pair. +- Click on Launch +- In order to ssh into your Cloudstrapper use the key pair .pem file and ubuntu in this format: ssh -i <KeyPair> ubuntu@<InstanceIP> + - Example: +- ssh -i "cloudstrapper-test.pem" ubuntu@1.1.1.1 + - **NOTE:** If you receive an “WARNING: UNPROTECTED PRIVATE KEY FILE!” error while trying to SSH you will need to change the permissions on your key file by running `chmod 400` to make it read-only. + +## 2) Launching Cloudstrapper from Private Images + +- Navigate to the “AMIs” page to verify Cloudstrapper images show up under “Images Owned By Me” +- Select the Cloudstrapper AMI followed by the “Launch” button. +- On the “Choose Instance Type” page, select the t2.micro instance type (free) and click next. +- No changes are required to default settings on the “Configure Instance Details” page. Click next and navigate to the “Add Storage” page. +- Update the Size parameter to 32GB of space and proceed to the “Add Tags” page. +- Select the “Add Tag” button or click the link to add a name tag. In the “Value” column enter in a name for your Cloudstrapper and in the “Key” field create a key. Proceed to the “Configure Security Group” page. +- On this page, select a security group that at allows SSH traffic (default) and any other rules that are relevant to your network. +- Proceed to the “Review” page to ensure all your network details are correct followed by clicking the Launch button. +- Finally, select a pre-existing key pair that will allow access to your network and launch your instances. + +## Configure Orchestrator Deployment Parameters + +- Once your instance is created, click on the “Instance ID” url and select the “Connect” button +- In the SSH tab copy the command to connect to your instance and run it within your CLI + - **NOTE:** If you receive an “WARNING: UNPROTECTED PRIVATE KEY FILE!” error while trying to SSH you will need to change the permissions on your key file by running `chmod 400` to make it read-only. +- Create a magma-dev directory and clone magma master onto it by running the following commands: + +```bash +mkdir ~/magma-dev +cd ~/magma-dev +git clone https://github.com/magma/magma.git +``` + +- Locate and navigate to the playbooks directory inside the source repo: + + - `cd ~/magma-dev/magma/experimental/cloudstrapper/playbooks` + +- Copy the secrets.yaml file and update the credentials for AWS: + + - `cp roles/vars/secrets.yaml ~/magma-dev/` +- Update your secrets.yaml file with your AWS credentials. The two fields that are required to update are the `AWS Access Key` and the `AWS Secret Key`. **Note:** make sure you have a space between the colon and before your keys. + +- Build types: Magma can be deployed via binaries hosted in the community artifactory. These options are enabled in the code by default and Cloudstrapper, when using the ‘community’ option, it will default to these options. + - Community Builds: Builds are created and labeled by Magma CI teams and available for deployment. The secrets.yaml file does not need any inputs in the docker and github variables. + - Private Builds: Edit the secrets.yaml file in the `/magma-dev` directory to include the github and docker credentials under the Github Prerequisites and Dockerhub Prerequisites fields. + +**Please ensure that:** + +- You are deploying orchestrator in a region that supports at least **three Availability Zones** +- The region is clean and has no leftover artifacts from a prior Orc8r deployment. Please use the cleanup script below if needed. +- the value of varFirstInstall is set based on if the account already has an “AWSServiceRoleForAmazonElasticsearchService” role created (to be automated). If it exists, varFirstInstall in the config file would be “false”. If not, varFirstInstall in the config file would be “true”. If the role does not exist already, Orc8r will create the role. +- there is disk space of at least 1+ GB to pull code and create local artifacts +- dirLocalInventory/orc8rClusterName folder does not exist from a previous install (to be automated) + +```bash +aws iam list-roles --profile | grep -i AWSServiceRoleForAmazonElasticsearchService + +"RoleName": "AWSServiceRoleForAmazonElasticsearchService", +"Arn": "arn:aws:iam:::role/aws-service-role/es.amazonaws.com/AWSServiceRoleForAmazonElasticsearchService" +``` + +## Deploy Orchestrator + +- Prior to completing the following steps you must obtain a domain name for your Orc8r +- Create a parameter file (must end in a .yaml extension) in the ~/magma-dev directory + - View the example below to examine what a sample parameter file would look like. You must reserve a domain name before completing this step. + +```bash +--- +dirLocalInventory: ~/magma-dev +orc8rClusterName: Sydney +orc8rDomainName: ens-16-sydney.failedwizard.dev +orc8rLabel: 1.8.0 +orc8rVersion: v1.8 +awsOrc8rRegion: ap-southeast-2 +varBuildType: community +varFirstInstall: "false" +``` + +- A legend of the variables used: + - dirLocalInventory: Folder which has secrets.yaml file that includes AWS access and secret keys + - orc8rClusterName: A local folder created in dirLocalInventory used to store state information + - orc8rDomainName: Domain name of the Orc8r + - orc8rLabel: The label to look for in the containers repository + - orc8rVersion: The version of orc8r tools used to generate artifacts + - awsOrc8rRegion: The region where this orc8r will be deployed + - varBuildType: Using either ‘community’ or ‘custom’ binaries + - varFirstInstall: Indicating if this is the first install of any kind of Magma or not, to skip some of the default, shared roles created + +- First change your directory to `~/magma-dev/magma/experimental/cloudstrapper/playbooks`. Next, run the playbook to set up the Orchestrator deployment: + +```bash +ansible-playbook orc8r.yaml -e '@' +``` + +- After a successful run of the playbook (30-40 minutes), run terraform to obtain nameserver information to be added to DNS. + +Example: + +```bash +cloudstrapper:~/magma-dev/Mumbai/terraform #terraform output +nameservers = tolist([ +"ns-1006.awsdns-61.net", +"ns-1140.awsdns-14.org", +"ns-2020.awsdns-60.co.uk", +"ns-427.awsdns-53.com", +]) +``` + +## Deploy the AGW + +- If you would like to do a customized installation (Ex. generating your own classless interdomain routing) you will first need to create your own .yaml file + - This is done by navigating to `magma-dev/magma/experimental/cloudstrapper/playbooks/roles/agw-infra/vars/` + - Next, create your own .yaml file where you can configure your unique parameters. Here is an example: + +```bash +**cidrVpc**: 10.7.0.0/16 +**cidrSgi**: 10.7.4.0/24 +**cidrEnodeb**: 10.7.2.0/24 +**cidrBridge**: 10.7.6.0/24 +**azHome**: "{{ awsAgwAz }}" +**secGroup**: "{{ secgroupDefault }}" +**sshKey**: "{{ keyHost }}" +**siteName**: MenloPark +``` + +- If you would like to do a non-customized setup you can use the `MenloPark` idSite for your installation for the steps below. + +- Create a parameter file in the `~/magma-dev` directory. A sample parameter file would look like as follows + - **Note:** Use a base ubuntu image of the region for the awsCloudstrapperAmi variable. + - Please ensure you have a key with name described in keyHost available in the region. This is the key that would be embedded into the AGW for ssh access + - Future: awsCloudstrapperAmi will be renamed to awsBastionAmi (task filed) + +```bash + --- + dirLocalInventory: ~/magma-dev + awsAgwAmi: ami-00ca08f84d1e324b0 + awsCloudstrapperAmi: ami-02f1c0266c02f885b + awsAgwRegion: ap-northeast-1 + keyHost: keyMagmaHostBeta + idSite: MenloPark + idGw: mpk01 + + +``` + +- A legend of the variables used: + - dirLocalInventory: Location of folder with secrets.yaml file that include AWS access and secret keys + - awsAgwAmi: Id of the AGW AMI available in the region of deployment + - awsCloudstrapperAmi: Id of the Cloudstrapper AMI available in the region of deployment + - awsAgwRegion: Region where AGWs will be deployed + - keyHost: Public key of keypair from region, will be embedded into the launched AGW for SSH access + - idSite: ID of site and partial name of variable file that has site specific information (such as CIDRs) ([Example](https://github.com/magma/magma/blob/master/experimental/cloudstrapper/playbooks/roles/agw-infra/vars/varSiteMenloPark.yaml)) + - idGw: Id of Gateway to be installed; Used as value of a tag with key as Name; Can be changed for subsequent AGW deployments + +- Locate and navigate to the playbooks directory inside the source repo: + +```bash +cd ~/magma-dev/magma/experimental/cloudstrapper/playbooks +``` + +- Run the following command for the first AGW: + +```bash +ansible-playbook agw-provision.yaml --tags createNet,createBridge,createGw,inventory -e '@' + +``` + +- Run the following for subsequent AGWs : + +```bash +ansible-playbook agw-provision.yaml --tags createGw -e '@' +``` + +## Configure AGW and Orchestrator + +- Configure AGW [manually](https://magma.github.io/magma/docs/lte/config_agw) or through the playbooks running agw-configure from the Bridge node. +- Start by configuring the Bridge node as a bastion host. Using the Bridge node as Bastion host, configure the newly deployed AGW to communicate with the Orc8r. +- Follow the following steps [here](https://magma.github.io/magma/docs/orc8r/deploy_install) to create an admin user for NMS. +- Generate a challenge key and hardware id and add it to Orc8r from the [Magmacore website documentation](https://magma.github.io/magma/docs/lte/deploy_config_agw). + +## Custom AGWs for Snowcone + +Snowcone devices require that an image be already embedded in the device before shipping. Hence, the devices require a key to be embedded in the authorized_keys file for the default user (‘ubuntu’) or a customer’s preferred user before the device is ordered. To achieve that run the following commands. + +- To generate a custom image from the base Access Gateway AMI + + 1. Start an instance of the Access Gateway AMI similar to the “Launching Cloudstrapper” session above. + 2. Once the instance is booted up, add your public key to the ~/.ssh/authorized_keys file + 3. Snapshot the image to create a new AMI. Use this AMI to order your snowcone. + +- Alternatively, to build your own AGW AMI from scratch and customize it for your use-case + + 1. Follow section 5.1 from the [README](https://github.com/magma/magma/tree/master/experimental/cloudstrapper) file of the Cloudstrapper deployment + +## Cleaning up an Orc8r environment + +Orc8r cleanup allows the user to target a given region and automatically cleanup all the resources there and ensure it is ready for a new deployment. + +Run `terraform destroy` to release all terraform created resources from within the ‘terraform’ directory inside the ‘dirLocalInventory/orc8rClusterName’ folder. + +In certain cases, terraform might leave artifacts that weren’t cleaned up that might impact previously deployment installations. Use Cloudstrapper’s cleanup capabilities to cleanup the region of all known artifacts. + +**Variables to consider for cleanup:** + +- awsOrc8rRegion: Region where Orchestrator runs + - command: `ansible-playbook cleanup.yaml [--tags various] -e '@'` + +- For a complete environment cleanup or orc8r run: + - command: `ansible-playbook cleanup.yaml —skip-tags agw -e ‘@’` diff --git a/docs/docusaurus/versioned_docs/version-1.8.0/howtos/inbound_roaming.md b/docs/docusaurus/versioned_docs/version-1.8.0/howtos/inbound_roaming.md new file mode 100644 index 000000000000..a43308f89af0 --- /dev/null +++ b/docs/docusaurus/versioned_docs/version-1.8.0/howtos/inbound_roaming.md @@ -0,0 +1,408 @@ +--- +id: version-1.8.0-inbound_roaming +title: Inbound Roaming +hide_title: true +original_id: inbound_roaming +--- + +# Inbound Roaming + +Inbound Roaming allows a Magma operator to provide service for subscribers +belonging to other operators (roaming subscribers) + +Inbound Roaming requires Magma operator to reach agreements with +other operators to have direct connectivity to their HSS through `S6a` +(diameter) and PGW through `S8` (gtp) interfaces. VPN is suggested to reach +those roaming services but is out of scope of Magma. + +At the bottom of this document you have +[configuration example](#configuration-example) + +TEID allocation is described on +[Technical Reference](https://docs.magmacore.org/docs/lte/dev_teid_allocation) +section. + +## Architecture + +Currently, we support two Architectures: + +- **Local Non-Federated + Roaming**: where local subscribers are stored in + SubscriberDB and roaming subscribers use a remote Federated Gateway to reach + HSS/PGW. +- **Local Federated + Roaming**: where local Subscribers use local Federated + Gateway to reach HSS/PCRF/OCS and roaming Subscribers use a remote Federated + Gateway to reach HSS/PGW. + +Any roaming architecture will be composed of an LTE Federated Network and Feg +Network that will serve the local subscribers. Then we will have as many extra +FEG Network as roaming agreements. + +The example in the picture blow shows a possible architecture for a +Local Non-Federated + Roaming case (one roaming agreement). As you can see in +the picture below, VPN is suggested to reach to the user plane at the remote +PGW. + +![Magma events table](assets/feg/inbound_roaming_architecture_non_federated.png?raw=true "Non-Federated Inbound Roaming") + +Other configurations like SubscriberDB with PCRF/OCS to handle local +subscribers may also work but are not tested yet. + +Configurations with SubscriberDB, local HSS and remote HSS all at the same +time is not supported yet. + +The following is a logic diagram of Neutral Host architecture we +use on Inbound Roaming to be able to route the request properly. Note that +in this diagram we add the Optional Local FeG. In case you are using +subscriberDB for your local subscribers, you will not need that FeG Gateway, +but you will still need the Neutral Host Network. + +```mermaid +graph LR + +AGW_G --Any_PLMN--> NH_Network +NH_Network --PLMN1--> FeG_Gateway_1 +NH_Network --PLMN2--> FeG_Gateway_2 +NH_Network --PLMN3--> FeG_Gateway_3 +Optional_FeG_gateway --Other_PLMN--> Local_HSS +FeG_Gateway_1 --> HSS_1 +FeG_Gateway_2 --> HSS_2 +FeG_Gateway_3 --> HSS_3 + + +subgraph AGW Network +AGW_G +end + +subgraph Neutral_Host_Network +NH_Network & Optional_FeG_gateway + +end + +subgraph Roaming_FeG_Network_1 +FeG_Gateway_1 +end + +subgraph Roaming_FeG_Network_2 +FeG_Gateway_2 +end + +subgraph Roaming_FeG_Network_3 +FeG_Gateway_3 +end +``` + +## Prerequisites + +Before starting to configure roaming setup, first you need to bring up a +setup to handle your own/local subscribers. So before configuring Inbound +Roaming you need: + +- Install [Or8cr](https://docs.magmacore.org/docs/orc8r/architecture_overview), +- Install [Federatetion Gateway](https://docs.magmacore.org/docs/feg/deploy_intro) and, +- Install [Access Gateway](https://docs.magmacore.org/docs/lte/setup_deb). +- Create a Federate Deployment (see [below](#Create a Federated Deployment)). +- Make sure your setup is able to serve calls with your local subscribers + +Once you are done you should either: + +- Local Non-Federated case: `a Federated LTE Network with an LTE Gateway (AGW)` + `a Federation Network WITHOUT Federation Gateway (FeG)` +- Local Federated case: `a Federated LTE Network with an LTE Gateway (AGW)`+ + `a Federation Network WITH Federation Gateway (FeG)` + +We will refer to them as `local` Networks and Gateways to differentiate them +from the `roaming` Networks and Gateways we will create in the next step. + +### Create a Federated Deployment + +As mentioned, Inbound Roaming requires of a FeG gateway to reach the roaming +network. That is why Federated Deployment is required. Please, configure it +using this guide for [Federated Deployment](https://docs.magmacore.org/docs/feg/federated_FWA_setup_guide). + +All architectures requiere a Local FeG Network to exist. However depending on +your architecture, you may not need to create a local FeG Gateway inside that +FeG Network. Please check the table below which indicates what **gateways** +are required depending on the architecture + +| Network Type -> | `feg` roaming | `feg` local | `feg_lte` (AGW) | +| :-------------------: | :-----------: | :----------:| :--------------:| +| Local - Non Federated | Yes | No | Yes | +| Local - Federated | Yes | Yes | Yes | + +For both, Federated and Non-Federated mode, on `lte_feg` network under `epc` +you should set `hss_relay_enabled` to `true`. Selection of HSS or +subscriberDb will be done by Inbound Roaming mapping configuration. + +In case you are not using PCRF and OCS, then your +not need to create a FeG Gateway from the previous gide on your Federated +Deployment. Also remember in your Federated LTE network, under `epc`, you +will have to set `gx_gy_relay_enabled` to `false`, so the request are sent to +internal policy entity, not to the PCRF or OCS. + +## Inbound Roaming configuration + +The following instructions use Orc8r Swagger API to configure Inbound Roaming. +You can do the same using Swagger API or NMS JSON editor. + +In these instructions we will mainly use GET and PUT methods to read and write +from Swagger. We will use GET to see the content of Network/Gateway, +we will copy and paste that into the result of GET into the PUT method to +modify parameters. + +Below are the steps to add Inbound Roaming to your current setup: + +- Create roaming Federated Networks and Gateways. +- Configure local Access Gateway Network routing based on PLMN. +- Configure local Federation Network routing based on PLMN. +- Configure roaming Federation Network served networks +- Configure FeG Gateways +- Check connectivity + +### 1. Create Roaming Federated Networks and Federated Gateways + +Inbound Roaming needs as many FeG Networks as roaming agreements. Don't +forget to create a Federated Gateway per FeG Network. + +Roaming FeG Networks do not need to serve any Federated LTE Network (only the +FeG Network created on Pre Requisites needs to serve a Federated LTE Network) + +Those roaming Federated Gateways will need `S6a` and `S8` interfaces +configured (make sure you have the other operators HSS and PGW +parameters). To configure those interfaces go to **Swagger API**: + +- Go to`Federation Gateways` GET method `Get a specific + federation gateway` and search the configuration for one of those roaming + FeG Networks. +- Copy/paste the response into the PUT method `Update an entire federation + gateway record` +- Edit the `6a` and `S8` fields (check the example from the GET method to + see any missing parameter) +- Hit Execute (check no errors are show on the Swagger Responses) +- Run the GET method again to see the changes. + +Note that you need a routable IP to the roaming HSS and PGW to configure those +interfaces. + +### 2. Configure Local Access Gateway Network routing + +When a request gets to Access Gateway, this will have to be routed to +the proper call flow to either use `SubscriberDB` or `S6a/S5` or`S6a/S8`. +based on subscriber PLMN. + +To enable that routing you will have to configure it in your local Access +Gateway Network we created in Pre Requisites. On **Swagger API**: + +- Go to `Federated LTE Networks` and search using GET method `Describe a + federated LTE network` your local (non roaming) Federated LTE Network +- Copy/paste the response into PUT method `Update an entire Federated LTE + network` +- Find the key `federation`. If you completed Pre Requisites properly, you + should have `feg_network_id` pointing to your FeG Network. +- Add the routing dictionary following the example below, adding an entry per +each PLMN. + +```text + "federation": { + "federated_modes_mapping": { + "enabled": true, + "mapping": [ + { + "apn": "", + "imsi_range": "", + "mode": "local_subscriber", + "plmn": "123456" + }, + { + "apn": "", + "imsi_range": "", + "mode": "s8_subscriber", + "plmn": "9999" + } + ] + }, + "feg_network_id": "example_feg_network" + }, +``` + +To configure that key properly: + +- Field `mode` will indicate the path the subscriber will take: + - `local_subscriber` for PLMNs served by your own SubscriberDB/HSS. + - `s8_subscriber` for PLMNs served by roam HSS (`S6a`) and roam PGW (`S8`). + - Any other PLMN not configured here will be defaulted to local HSS (`S6a`). +- You can leave `apn` and `imsi_range` blank since it is not supported yet +- Add the routing dictionary following the example, adding an entry per each +PLMN. +- Note `hss_relay_enabled` must be enabled. The decision to send it to HSS or + not will be taken by `federated_modes_mapping`. If you disable, s8_subscribers + will not be sent to the FeG to get the HSS. + +- Flag `gx_gy_relay_enabled` can be enabled or disabled depending on if your + network works with local policy db or with OCS and PCRF (gx/gy). If your + local subscribers authenticate with HSS but use GX/GY, then you will have to + leave it as `True`. + +### 3. Configure Local Federation Network routing + +When a request gets to the Orc8r, this will have to be routed to the proper +FeG Network which serves that PLMN. + +To enable that routing you will have to configure it in your local FeG Network +we created in Pre Requisites. On **Swagger API**: + +- Go to `Federation Networks` and search using GET method `Describe a + federation network` your local (non roaming) FeG Network +- Copy/paste the response into PUT method `Update an entire federation network` +- Modify/add the `nh_routes` (see the example on Swagger API if it is + missing from your configuration). On the map match the PLMN, and the name + of the roam FeG Network. Note that these are names of FeG Networks, + not FeG Gateways. + +```text + "nh_routes": { + "00102": "inbound_feg", + "9999": "feg_roaming_network_1" + }, +``` + +- Hit Execute (check no errors are show on the Swagger Responses) +- Run the GET method again to see the changes. + +### 4. Configure Roaming Federation Network served networks + +Roaming Federation Networks will need a last configuration in order to match +them with their serving Local Federation Network. To do that, add to the +Roaming Federation Networks configuration the following key + +```text + "served_nh_ids": [ + "example_feg_network" + ], +``` + +That means that the Inbound FeG Network will be served by the local network +(in this case called `example_feg_network`) + +### 5. Configure Roaming FeG Gateway + +Configure Roaming FeG gateway serving roaming subscribers, but just configure +`s6a` and `s8`. Configure local GTP port to match with your PGW GTP-U port. +`apn_operator_sufix` is optional and will just add a suffix to the APN sent by +the UE. + +```text + "s8": { + "apn_operator_suffix": ".operator.com", + "local_address": "foo.bar.com:5555", + "pgw_address": "foo.bar.com:5555" + }, +``` + +Note you don't need to define the local IP, you can just use :port +`"local_address": ":5555"` + +### 6. Check connectivity + +- From Access Gateway make sure your Access Gateway is able to reach PGW-U + IP. + +```bash +ping -I sgi_interface pgw_u_ip +``` + +- Fom Federated Gateway make sure you can reach PGW-C IP and test your +Federated Gateway can reach PGW using this command + +```bash +$ cd /var/opt/magma/docker +$ sudo docker-compose exec s8_proxy /var/opt/magma/bin/s8_cli cs -server + +192.168.32.118:2123 123456789012345 +# where 192.168.32.118:2123 is the ip and port of the PGW-C +# where 123456789012345 is a valid imsi (if you use a not valid imsi +# you can still check the connectivity, but you will get a GTP error back +# from PGW +# Add -use_builtincli flag if you don't have a FeG setup properly yet +``` + +## Test and troubleshooting + +It is recommendable that before running the tests, enable some extra +logging capabilities in both Access Gateway, and Federated Gateway to +trace the call. + +For better details in Access Gateway logs: + +- Enable `log_level: DEBUG` in `mme.yml` and `subscriberdb.yml` +- Enable `print_grpc_payload: True` on `subscriberdb.yml` +- Restart magma, so the changes are taken +- See the logs using `sudo journalctl -fu magma@mme` or sudo `journalctl -fu + magma@subscriberdb` + +For better details Federated Gateway logs: + +- Add GRPC printing in the following services `s6a_proxy`, + `s8_proxy` adding `MAGMA_PRINT_GRPC_PAYLOAD: 1`. For example for s6a_proxy + +```yaml + s6a_proxy: + <<: *goservice + container_name: s6a_proxy + command: envdir /var/opt/magma/envdir /var/opt/magma/bin/s6a_proxy -logtostderr=true -v=0 + environment: + MAGMA_PRINT_GRPC_PAYLOAD: 1 +``` + +- Restart docker process, so the vars are taken `sudo docker-compose down` and + `sudo docker-compose up -d` +- Display the logs using for example`sudo docker-compose logs -f s8_proxy` + +### Test with s6a_cli and s8_cli + +FeG has a couple of clients to run an HSS Authentication Request (s6a) and +Create Session Request (s8) without the need of having a UE. You can run them +either on FeG or AGW. + +- Run From FeG + +```bash +# Use FeG s6a_proxy +sudo docker-compose exec s8_proxy /var/opt/magma/bin/s6a_cli air -remote_s6a 001002000000810 +# Use s6a_porxy that runs on the cli +sudo docker-compose exec s8_proxy /var/opt/magma/bin/s6a_cli air -use_builtincli false -remote_s6a 001002000000810 + +# use FeG s8_proxy +sudo docker-compose exec s8_proxy /var/opt/magma/bin/s8_cli cs -server 192.168.32.118:2123 -delete 3 +# use s8_proxy that runs on the cli +sudo docker-compose exec s8_proxy /var/opt/magma/bin/s8_cli cs -server 192.168.32.118:2123 -use_builtincli false -delete 3 +``` + +- Run from AGW + +```bash +# Extract the binaries from docker container from FeG, and move them to AGW +sudo docker cp s6a_proxy:/var/opt/magma/bin/s6a_cli . +sudo docker cp s8_proxy:/var/opt/magma/bin/s8_cli . +``` + +```bash +# Execute from AGW +./s6a_cli air -remote_s6a 001002000000810 +./s8_cli cs -server 192.168.32.118:2123 -delete 3 -apn inet -remote_s8 001002000000810 +``` + +## Configuration example + +Attached you can find the configuration that handle local subscribers with +both subscriber db and HSS and roaming subscribers: + +- PLMN 88888: uses subscriber DB to authenticate and Gx/Gy for accounting. + That is why we have `gx_gy_relay_enabled` as True. Those subscribers are + never sent to the FeG. +- PLMN 00102: MME sends those subscribers to be authenticated through the FeG. + When the request reaches Orc8r (in Feg Relay service) and using `nh_routes` + configured on the local FeG network, those subscribers are forwarded to + `inbound_feg` network +- Rest of PLMN: MME forwards any other PLMN to be authenticated through the + FeG. In the orc8r they are forwarded to the local FeG network `terravm_feg_network` + +[[inbound_roaming_sample.zip]](assets/feg/inbound_roaming_sample.zip) diff --git a/docs/docusaurus/versioned_docs/version-1.8.0/howtos/network_probe.md b/docs/docusaurus/versioned_docs/version-1.8.0/howtos/network_probe.md new file mode 100644 index 000000000000..edfef35af154 --- /dev/null +++ b/docs/docusaurus/versioned_docs/version-1.8.0/howtos/network_probe.md @@ -0,0 +1,193 @@ +--- +id: version-1.8.0-network_probe +title: Network Probe +hide_title: true +original_id: network_probe +--- + +# Lawfull Interception + +## Overview + +The Network Probe solution allows a Magma operator to provide standardized lawful interception X2 and X3 interfaces as described in [ETSI TS 103 221-2](https://www.etsi.org/deliver/etsi_ts/103200_103299/10322102/01.04.01_60/ts_10322102v010401p.pdf). This feature takes advantage of the rest API (swagger) to provide the X1 interface. + +## Architecture + +Current architecture leverages both AGW and Orc8r to deliver the magma LI feature. It aims at providing a 3GPP complaint solution and smooth integration with different Lawful Interception Management System (LIMS). + +The high level design is described in the picture below, + +![Network Probe Architecture](assets/lte/network_probe_architecture.png "Network Probe Architecture") + +The LI feature can be summarized as follow, + +### X1 Interface + +The X1 interface relies on the Orc8r Swagger API to configure intercept tasks and destinations. This interface uses Json content and thus is not 3GPP complaint. An external solution is needed to handle the translation between the 3GPP (XML based) and Orc8r Swagger when required. + +Swagger nprobe endpoints allow the following, + +#### 1. Tasks management + +Network Probe Tasks represent an interception warrant and must be configured by LIMS. They provide the following information, + +- task_id : is UUID v4 representing an XiD identifier. +- target_id : represents the subscriber identifier +- target_type : represents the subscriber identifier type (IMSI, IMEI, MSISDN). Only IMSI is supported now. +- delivery_type : (events_only/all) states whether to deliver X2 or both X2 and X3 to the LIMS. +- correlation_id : allows X2 and X3 records correlation. A random value is generated if not provided. +- operator_id : operator identifier +- domain_id : domain identifier +- duration : specifies the lifetime of the task. If set to 0, the task will not expire until deleted through APIs. + +Each configured task in swagger will be propagated to the appropriate services (nprobe, liagentd, pipelined). + +#### 2. Destinations management + +Network Probe Destinations represent the configuration of the remote server in charge of collecting the records. + +- delivery_address : provides the address of the remote server. +- delivery_type : (events_only/all) states whether the server can receive X2 or both X2 and X3. +- private_key : TLS private key to connect the delivery address +- certificate : TLS certificate to connect to the delivery address +- skip_verify_server : skip client verification when self-signed certificates are provided. + +*Note: The orc8r nprobe service (X2 Interface) processes the first destination only. Subsequent destinations are ignored.* + +### X2 Interface + +The X2 interface is provided by the nprobe service in Orc8r. This service collects all the relevant events for targeted subscriber from fluentd through elastic search. Then, it parses them to create X2 records (aka Intercept Related Information - IRI) as specified ETSI TS 103 221-2 before exporting them to a remote server over TLS. +The current list of supported records are: + +- BearerActivation +- BearerModification +- BearerDeactivation +- EutranAttach. + +### X3 Interface + +It leverages AGW services to deliver X3 records as specified ETSI TS 103 221-2. +First, PipelineD mirrors all the data plane of the targeted subscriber to a dedicated network interface. Then, LiAgentD continuously listens on this port and process each packet as follow, + +- For each new target, It interrogates MobilityD to retrieve the subscriber ID from IP address +- Create a new intercept state (currently stored locally) +- Create X3 record by encapsulating the mirrored packet (starting from IP layer) in X3 header. +- Exports records to a remote server over TLS. + +## Prerequisites + +Before starting to configure the LI feature, first you need to prepare the following, + +- An orchestrator setup (Orc8r) +- An LTE Gateway (AGW) +- A remote TLS server to collect records and corresponding certificates. +- TLS Client certificates for X2 and X3 Interfaces + +## NetworkProbe Configuration + +The following instructions use Orc8r Swagger API to configure Network Probe feature. We will mainly use GET and POST methods to read and write from Swagger. +Below are the steps to enable this feature in your current setup: + +### 1. Enable LI mirroring in PipelineD in AGW + +Edit /etc/magma/pipelined.yml + +- Enable li_mirror in static_services list +- Set the following items, + - li_local_iface: gtp_br0 + - li_mirror_all: false + - li_dst_iface: li_port +- restart pipelined + +### 2. Enable LiAgentD service in AGW + +Copy `nprobe.{pem,key}` to `/var/opt/magma/certs/`, then edit `/etc/magma/liagentd.yml` + +- Enable the service +- Set the following remote TLS server information + - proxy_addr + - proxy_port + - cert_file + - key_file +- restart LiAgentD service + +*Note this service does not rely on Network Probe Destinations and must be configured manually.* + +### 3. Configure a NetworkProbe Task and Destination + +Go to **Swagger API**: + +- Go to `nprobe` POST method `Add a new NetworkProbeTask to the network` and set the content. +- Run the GET method again to see the applied changes. + +```json +{ + "task_details": { + "correlation_id": 605394647632969700, + "delivery_type": "events_only", + "domain_id": "string", + "duration": 300, + "operator_id": 1, + "target_id": "string", + "target_type": "imsi", + "timestamp": "2020-03-11T00:36:59.65Z" + }, + "task_id": "29f28e1c-f230-486a-a860-f5a784ab9177" +} +``` + +*Note that timestamp, correlation ID, domain ID and duration are optional and can be skipped. Task ID must be a valid uuid v4.* + +- Similarly, go to `nprobe` POST method `Add a new NetworkProbeDestination to the network` and set the content. +- Run the GET method again to see the applied changes. + +```json +{ + "destination_details": { + "delivery_address": "127.0.0.1:4040", + "delivery_type": "events_only", + "private_key": "string", + "certificate": "string", + "skip_verify_server": false + }, + "destination_id": "29f28e1c-f230-486a-a860-f5a784ab9177" +} +``` + +## Test and Troubleshooting + +It is recommendable that before running the tests, you enable some extra logging capabilities in both Access Gateway and Orc8r. + +For better details in Access Gateway logs: + +- Enable `log_level: DEBUG` in `liagentd.yml` +- See the logs using `sudo journalctl -fu magma@liagentd` + +Verify that the configured nprobe tasks through swagger were properly propagated to AGW services, + +- Open /var/opt/magma/configs/gateway.mconfig +- Verify that pipelined config contains the targeted subscribers id. + +```text +"liUes":{"imsis":["IMSI001010000000001"]} +``` + +- Verify that liagentd config contains the task information. + +```text +"nprobeTasks":[{"taskId":"29f28e1c-f230-486a-a860-f5a784ab9177","targetId":"IMSI001010000000001","targetType":"imsi","deliveryType":"events_only","correlationId":"605394647632070000"}] +``` + +Verify that events are streamed from the AGW to Orc8r in the NMS + +- Observe nprobe service logs in Orc8r with + +```bash +# Local setup +docker logs orc8r_controller_1 +``` + +```bash +# Remote Setup +kubectl -n orc8r logs nprobe-orc8r-... +``` diff --git a/docs/docusaurus/versioned_docs/version-1.8.0/howtos/troubleshooting/datapath_connectivity.md b/docs/docusaurus/versioned_docs/version-1.8.0/howtos/troubleshooting/datapath_connectivity.md new file mode 100644 index 000000000000..cd42aec8b208 --- /dev/null +++ b/docs/docusaurus/versioned_docs/version-1.8.0/howtos/troubleshooting/datapath_connectivity.md @@ -0,0 +1,232 @@ +--- +id: version-1.8.0-datapath_connectivity +title: Debugging AGW datapath issues +hide_title: true +original_id: datapath_connectivity +--- + +# AGW datapath debugging + +Following is a step by step guide for debugging datapath connectivity issues +of a UE. + +AGW datapath is based on OVS but there are multiple components that handle the +packet in uplink and downlink direction. Any one of the components can result +in connectivity issues. + +## Major components of datapath + +1. S1 (GTP) tunnel +2. OVS datapath +3. NAT/NonNAT forwarding plane. + +You need to check if any of this component dropping packet to root cause packet +drop issues. Following steps guides through the debugging process. + +## Datapath debugging when 100% packets are dropped + +Debugging datapath issues is much easier when you have traffic running. This +is specially important in case of LTE to avoid UE getting into inactive state. +Inactive state changes state of datapath flows for a UE, so its hard to debug +issues when there are such state changes. +It is recommended to have `ping` or other traffic generating utility running +on UE or the server (on SGi side of the network) while debugging the issue. + +1. Check magma services are up and running: + `service magma@* status`. For datapath health mme, sessions and pipelineD are + important services to look at. Check syslog for ERRORs from services. + If All looks good continue to next step. +2. Check for OVS services: + + ```bash + service openvswitch-switch status + ``` + +3. Check OVS Bridge status: gtp ports might vary depending on number of eNB + connected sessions. but `ovs-vsctl show` should not show any port with + any errors. If you see GTP related error run `/usr/local/bin/ovs-kmod-upgrade.sh`. + After running this command you need to reattach UEs. + + ```bash + ovs-vsctl show + + ...-...-...-...-..... + Manager "ptcp:6640" + Bridge gtp_br0 + Controller "tcp:127.0.0.1:6633" + is_connected: true + Controller "tcp:127.0.0.1:6654" + is_connected: true + fail_mode: secure + Port mtr0 + Interface mtr0 + type: internal + Port g_563160a + Interface g_563160a + type: gtpu + options: {key=flow, remote_ip="w.z.y.z"} + Port ipfix0 + Interface ipfix0 + type: internal + Port patch-up + Interface patch-up + type: patch + options: {peer=patch-agw} + Port gtp0 + Interface gtp0 + type: gtpu + options: {key=flow, remote_ip=flow} + Port g_963160a + Interface g_963160a + type: gtpu + options: {key=flow, remote_ip="a.b.c.d"} + Port li_port + Interface li_port + type: internal + Port gtp_br0 + Interface gtp_br0 + type: internal + Port proxy_port + Interface proxy_port + ... + Bridge uplink_br0 + Port uplink_br0 + Interface uplink_br0 + type: internal + Port dhcp0 + Interface dhcp0 + type: internal + Port patch-agw + Interface patch-agw + type: patch + options: {peer=patch-up} + ovs_version: "2.14.3" + ``` + +4. Check if UE is actually connected to datapath using: + `mobility_cli.py get_subscriber_table`. In case the IMSI is missing in this + table, you need to debug issue in control plane. UE is not attached to the + AGW, you need to inspect MME logs for control plane issues. + If UE is connection continue to next step. +5. From here onwards you are going to debug OVS datapath, so you need to select + a UE and identify which traffic direction is broken. You can do so by + - Generating uplink traffic in UE + - Capturing packets on gtpu_sys_2152 device: `tcpdump -eni gtpu_sys_2152 host $UE_IP`. If you do not see any packet, it means that packets are not reaching the GTP tunnel. Check S1 connectivity to debug further. + - *NATed datapath*: Capture packets on gtp_br0 `tcpdump -eni gtp_br0 host $UE_IP`. If you don't see any packets, try debugging with the `dp_probe_cli.py` utility. This utility would show which OVS table is dropping the packet. + - *NATed datapath*: You also need to check if the packet is egressing on + the SGi port. You can do so by running tcpdump on SGi port + `tcpdump -eni $SGi_dev dst $SERVER_IP`. In case the packet is missing on the SGi port, you have an issue with the routing. Check the routing table and iptables rules on the AGW. + - *Non-NAT datapath*: You also need to check if the packet is egressing on + the SGi port. You can do so by running tcpdump on the SGi port + `tcpdump -eni $SGi_dev dst $SERVER_IP`. If you don't see any packets, try debugging with the `dp_probe_cli.py` utility. This utility would show which OVS table is dropping the packet. + - In case uplink packets are reaching SGi port, you need to debug issues in + downlink direction. +6. Check if you are receiving packets from server by capturing return traffic + packet: `tcpdump -eni $SGi_dev src $SERVER_IP`. If you do not see these packets + you need to debug SGi network configuration. +7. Check traffic stats from UE in OVS. `dp_probe_cli.py --imsi 1234 -D UL stats` + In case the stats show packets reaching OVS. This should be non-zero. + For downlink traffic, Check stats for DL. +8. If all looks good so far, you need to trace packet in OVS pipeline, This command + would show datapath action that OVS would apply to incoming packets. If it + shows ‘drop’ it means OVS is dropping the packet, + For tracing packets in UL direction: + - If there is action to forward the traffic to egress port check connectivity + between SGi interface and destination host. + - For NonNat (Bridged mode) you might need vlan action for handling MultiAPN. + + ```bash + $ dp_probe_cli.py -i 414200000000029 -d UL -I 114.114.114.114 -P 80 -p tcp`. + + IMSI: 414200000000029, IP: 192.168.128.12 + Running: sudo ovs-appctl ofproto/trace gtp_br0 tcp,in_port=3,tun_id=0x1,ip_dst=114.114.114.114,ip_src=192.168.128.12,tcp_src=3372,tcp_dst=80 + Datapath Actions: set(eth(src=02:00:00:00:00:01,dst=5e:5b:d1:8a:1a:42)),set(skb_mark(0x5)),1 + Uplink rules: allowlist_sid-IMSI414200000000029-APNNAME1 + ``` + + For DL traffic: check if action show tunnel set action. + + ```bash + $ Dp_probe_cli.py -i 414200000000029 -d DL -I 114.114.114.114 -P 80 -p tcp + + IMSI: 414200000000029, IP: 192.168.128.12 + Running: sudo ovs-appctl ofproto/trace gtp_br0 tcp,in_port=local,ip_dst=192.168.128.12,ip_src=114.114.114.114,tcp_src=80,tcp_dst=3372 + Datapath Actions: set(tunnel(tun_id=0xc400003f,dst=10.0.2.208,ttl=64,tp_dst=2152,flags(df|key))),pop_eth,set(skb_mark(0x4)),2 + ``` + +9. In case of DL traffic, if you see datapath action, check if the dst ip address in tunnel() + action is the right eNB for the UE. + - Check routing table for this IP address `ip route get $dst_ip` + - Check if the eNB is reachable from the AGW. there could be FW rules dropping + the packets. + +10. In case probe command shows drop you need to check which table is dropping + the packet. Manually run the OVS trace command from above output shown on line + starting with `Running`. For above DL example `sudo ovs-appctl ofproto/trace + gtp_br0 tcp,in_port=local,ip_dst=192.168.128.12,ip_src=114.114.114.114,tcp_src=80,tcp_dst=3372` +11. The trace command shows which table is dropping the packet. To map the numberical + tble number to AGW pipeline table use pipelined-cli. + + ```text + root@magma:~# pipelined_cli.py debug table_assignment + App Main Table Scratch Tables + ---------------------------------------------------------------------- + mme 0 [] + ingress 1 [] + arpd 2 [] + access_control 3 [21] + proxy 4 [] + middle 10 [] + gy 11 [22, 23] + enforcement 12 [24] + enforcement_stats 13 [] + egress 20 [] + ``` + +12. In case enforcement or gy table is dropping the packet, it means there is + no rule for traffic or there is blocking rule for the traffic, that drops + the packet. + - You can check rules in datapath using dp-probe command: + `dp_probe_cli.py -i 414200000000029 --direction UL list_rules` + - To validate rules pushed from orc8r, you can use stat-cli: `state_cli.py + parse "policydb:rules"`, This command would dump all rules, you need + to check which rule are applicable to the UE. +13. Packet drops in access_control means there is static config in pipelineD + which does not allow this connection. +14. AGW should not be dropping packet in any other table. File a bug report with + the trace output in a github issue. +15. If this document does not help to debug the issue, please post output of + all steps in new github issue. + +## Intermittent packets drop + +Intermittent packets loss is harder to debug than previous case. In this case the +services and flow tables are configured currently but still some packets are dropped. +Following are usual suspects: + +1. TC queue is dropping packets due to rate limiting, command + `pipelined_cli.py debug qos` shows stats for all dropped packets. Run the + test case and observe if you see any dropped packets + + ```text + root@agw:~# pipelined_cli.py debug qos + /usr/local/lib/python3.5/dist-packages/scapy/config.py:411: CryptographyDeprecationWarning: Python 3.5 support will be dropped in the next release of cryptography. Please upgrade your Python. + import cryptography + Root stats for: eth0 + qdisc htb 1: root refcnt 2 r2q 10 default 0 direct_packets_stat 5487 ver 3.17 direct_qlen 1000 + Sent 1082274 bytes 7036 pkt (dropped 846, overlimits 4244 requeues 0) + backlog 0b 0p requeues 0 + + Root stats for: eth1 + qdisc htb 1: root refcnt 2 r2q 10 default 0 direct_packets_stat 41140 ver 3.17 direct_qlen 1000 + Sent 3603343 bytes 41337 pkt (dropped 0, overlimits 0 requeues 0) + backlog 0b 0p requeues 0 + ``` + +2. NAT could be dropping packets. This can be due to no ports available in NAT + table due to large number of open connections. AGW has default setting for + the max connections `sysctl net.netfilter.nf_conntrack_max` and default + range of source port `sysctl net.ipv4.ip_local_port_range`. If you see + higher number of simultaneous connections, you need to tune these parameters. + +If none of this works file detailed bug report on github. diff --git a/docs/docusaurus/versioned_docs/version-1.8.0/lte/architecture_overview.md b/docs/docusaurus/versioned_docs/version-1.8.0/lte/architecture_overview.md new file mode 100644 index 000000000000..fcd1108f1738 --- /dev/null +++ b/docs/docusaurus/versioned_docs/version-1.8.0/lte/architecture_overview.md @@ -0,0 +1,183 @@ +--- +id: version-1.8.0-architecture_overview +title: Overview +hide_title: true +original_id: architecture_overview +--- + +# Architecture Overview + +This document overviews the architecture of the Access Gateway (AGW) component of a Magma deployment. + +## Overview + +The main service within AGW is *magmad*, which orchestrates the lifecycle of all other AGW services. The only service *not* managed by magmad is the *sctpd* service (when the AGW is running in stateless mode). + +The major services and components hosted within the AGW include + +- OVS shapes the data plane +- *control_proxy* proxies control-plane traffic +- *enodebd* manages eNodeBs +- *health* checks and reports health metrics to Orchestrator +- *mme* encapsulates MME functionality and parts of SPGW control plane features in LTE EPC +- *mobilityd* manages subscriber mobility +- *pipelined* programs OVS +- *policydb* holds policy information +- *sessiond* coordinates session lifecycle +- *subscriberdb* holds subscriber information + +Note: The *mme* service will be renamed soon to *accessd*, as its main purpose is to normalize the control signaling with the 4G/5G RAN nodes. + +Together, these components help to facilitate and manage data both to and from the user. + +```text + ┏━━━━━━━━━━━━━━┓ ┏━━━━━━━━━━━━━━┓ + ┃ Magma Orc8r ┃───────────┃ Magma Feg ┃ + ┗━━━━━━━━━━━━━━┛ ┗━━━━━━━━━━━━━━┛ + ▲ ▲ + │ │ + ┏━━━━━┻━━━━━━┻━━━━━┓ + ┃ control_proxy ┃ + ┗━━━━━┳━━━━━━┳━━━━━┛ + │ └────────────────────────────────────────────────┐ + └──────────────────────────────────┐ │ + │ │ +┏Access Control & ┓ ┏━━━━Session━━━━┓ │ ┏━━━━Device━━━━━┓ │┏━━━Optional━━━━┓ +┃ Management ┃ ┃ & Policy ┃ │ ┃ Management ┃ │┃ Services ┃ +┃ ┌───────────┐ ┃ ┃ Management ┃ │ ┃ ┌───────────┐ ┃ │┃ ┌────────────┐┃ +┃ │ MME │ ┃ ┃ ┌───────────┐ ┃ └──╋─│ magmad │ ┃ │┃ │ monitord │┃ +┃┌─▶│ (accessd) │◀╋───────┐ ┃ │ policydb │◀╋┐ ┃ └───────────┘ ┃ │┃ └────────────┘┃ +┃│ └───────────┘ ┃ │ ┃ └───────────┘ ┃│ ┃ ┌───────────┐ ┃ │┃ ┌────────────┐┃ +┃│ │ │ │ ┃ │ ┃ ┌───────────┐ ┃│ ┃ │ dnsd │ ┃ │┃ │ redirectd │┃ +┃│ │ │ └─╋────┐ └─╋▶│ sessiond │◀╋┘ ┃ └───────────┘ ┃ │┃ └────────────┘┃ +┃│ │ └───────╋───┐│ ┃ └───────────┘ ┃ ┃ ┌───────────┐ ┃ │┃ ┌────────────┐┃ +┃│ └─────────────╋─┐ ││ ┗━━━━━━━▲━━━━━━━┛ ┃ │ health │ ┃ │┃ │ conntrackd │┃ +┃│ ┌───────────┐ ┃ │ ││ │ ┃ └───────────┘ ┃ │┃ └────────────┘┃ +┃└─▶│ sctpd │ ┃ │ ││ │ ┃ ┌───────────┐ ┃ │┃ ┌────────────┐┃ +┃ └───────────┘ ┃ │ ││ │ ┃ │ enodebd │ ┃ └╋─│td-agent-bit│┃ +┃ ┌───────────┐ ┃ │ ││ └───────┐ ┃ └───────────┘ ┃ ┃ └────────────┘┃ +┃ │ smsd │ ┃ │ ││ │ ┗━━━━━━━━━━━━━━━┛ ┃ ▲ ┃ +┃ └───────────┘ ┃ │ ││ Packet Processing │ ┗━━━━━━━━╋━━━━━━┛ +┃ ┃ │ ││ ┃ Pipeline ┃ │ │ +┗━━━━━━━━━━━━━━━━━┛ │ ││ ┃ ┌───────────┐ ┃ │ │ + │ ││ ┃ │ pipelined │◀╋─┘ │ + ┏━━Subscriber ━━┓ │ ││ ┃ └───────────┘ ┃ │ + ┃ Management ┃ │ ││ ┃ ▲ ┃ ┏━━Telemetry, Logging, Debug━━━┓│ + ┃┌────────────┐ ┃ │ ││ ┃ │ ┃ ┃ ┌───────────┐┌───────────┐ ┃│ + ┃│subscriberdb│◀╋──┘ ││ ┃ ┌───────────┐ ┃ ┃ │ ctraced ││ eventd │──╋┘ + ┃└────────────┘ ┃ ││ ┃ │ dpid │ ┃ ┃ └───────────┘└───────────┘ ┃ + ┗━━━━━━━━━━━━━━━┛ ││ ┃ └───────────┘ ┃ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ + ││ ┗━━━━━━━━━━━━━━━┛ + ┌───────────┐ ││ + │directoryd │◀─────┘│ + └───────────┘ │ + ┌───────────┐ │ + │ mobilityd │◀──────┘ + └───────────┘ +``` + +## Default components + +This section describes the majority of AGW services and components. + +### SCTPd + +*sctpd* service. Termination service for the SCTP connections currently used for S1-C and N2 interfaces. It decouples application layers of 3GPP control signaling from the SCTP communication. Restarts of other AGW services do not impact this service in stateless mode, hence it preserves RAN connections upon application crashes or software updates. Restart of this service forces all the other major AGW services to restart. + +### MME + +*mme* service. Implements S1AP, NAS and MME subcomponents for LTE control plane. Also implements SGW and PGW control plane. If the *mme* service is restarted, S1 connection will be restarted and users service will be affected unless the AGW is in stateless mode. Mobilityd, pipelined, and sessiond are restarted in this process as well. + +### Enodebd + +*enodebd* service. Enodebd supports management of eNodeB devices that use TR-069 as a management interface. This is an optional service that is not used in the case of unmanaged eNBs. This is used for both provisioning the eNodeB and collecting the performance metrics. It also acts as a statistics reporter for externally managed eNodeBs. It supports following data models + +- Device Data model: TR-181, TR-098 +- Information Data model: TR-196 + +### Magmad + +*magmad* service. Parent service to start all Magma services, owns the collection and reporting of metrics of services, and also acts as the bootstrapping client with Orchestrator. + +### DNSd + +*dnsd* service. Local DNS and DHCP server for the eNodeB. + +### SubscriberDB + +*subscriberdb* service. Magma uses SubscriberDB to enable LTE data services through one network node like AGW for LTE subscribers. It is bypassed for the deployments that make use of the MNO's HSS. + +It supports the following two S6a procedures + +- S6a: Authentication Information Request and Answer (AIR/AIA) +- S6a: Update Location Request and Answer (ULR/ULA) + +SubscriberDB also supports these additional functions + +- Interface with Orchestrator to receive subscriber information such as IMSI, secret key (K), OP, user-profile during system bring-up +- Generate authentication vectors using Milenage algorithm and share these with MME +- Share user profile with MME + +### Mobilityd + +*mobilityd* service. IP address management service. It primarily functions as an interface with the Orchestrator to receive an IP address block during system bring-up. The service can allocate and release IP addresses for the subscriber on the request from S-PGW Control Plane. + +### Directoryd + +*directoryd* service. Lookup service where you are able to push different keys and attribute pairs for each key. Commonly used keys include subscriber ID and session ID. + +### Sessiond + +*sessiond* service. Sessiond implements the control plane for the PCEF functionality in Magma. Sessiond is responsible for the lifecycle management of the session state (credit and rules) associated with a user. It interacts with the PCEF datapath through pipelined for L2-L4 and DPId for L4-L7 policies. + +### PolicyDB + +*policydb* service. PolicyDB is the service that supports static PCRF rules. This service runs in both the AGW and the orchestrator. Rules managed through the rest API are streamed to the PolicyDB instances on the AGW. Sessiond ensures these policies are implemented as specified. + +### DPId + +*dpid* service. Deep packet inspection service to enforce policy rules. + +### Pipelined + +*pipelined* service. Pipelined is the control application that programs the OVS openflow rules. Pipelined is a set of services that are chained together. These services can be chained and enabled/disabled through the REST API. If pipelined is restarted, users service will be affected. + +### Eventd + +*eventd* service. Service that acts like an intermediary for different magma services, using the service303 interface, it will receive and push the generated registered events to the td-agent-bit service on the gateway, so these can be then later sent to Orchestrator. These events will be sent to ElasticSearch where they can be queried. + +### SMSd + +*smsd* service. service that functions as the AGW interface that will sync the SMS information with Orchestrator. + +### Ctraced + +*ctraced* service. Service used for managing call tracing on the AGW. The Tshark tool is used for packet capture and filtering. Packet captures are sent back to the Orchestrator and viewable on the NMS. Preferred usage for call tracing is through the NMS. + +### Health checker + +*health* service. Health checker service that verifies the state on MME, mobilityd, sessiond and pipelined and cleans corrupt state if necessary. + +### Control proxy + +*control_proxy* service. Control proxy manages the network transport between the gateways and the controller (Orchestrator). It also provides the following functionality + +- Abstract service addressing by providing a service registry, mapping a user-addressable name to its remote IP and port +- Push all traffic over HTTP/2, encrypted using TLS. The traffic is routed to individual services by encoding the service name in the HTTP/2 `authority` header. +- Individual gRPC calls between a gateway and the controller are multiplexed over the same HTTP/2 connection, avoiding connection setup time per RPC call + +### Header enrichment + +There are two services within header enrichment: the envoy controller and envoy_dp. Envoy functions as the de facto HTTP proxy used for service mesh and can be programmed via the gRPC API. Additionally, the Envoy dataplane can scale up and scale down according to resources on AGW. The envoy controller is programmed by pipelined according to the set of rules in place. + +## Dynamic services + +Dynamic services are optional AGW services that must be enabled by updating the AGW's *magmad* configuration. + +### Monitord + +*monitord* service. Monitors the CPEs connected to the AGW. Sends ICMP pings to the CPEs connected to the gateway and reports liveness metrics. + +### Td-agent-bit + +*td-agent-bit* service. Enables log aggregation and event logging where it takes input from syslog and the events service and forwards the output to the Orchestrator. It is received on the Orchestrator by Fluentd then stored in Elasticsearch. diff --git a/docs/docusaurus/versioned_docs/version-1.8.0/lte/build_install_magma_pkg_in_agw.md b/docs/docusaurus/versioned_docs/version-1.8.0/lte/build_install_magma_pkg_in_agw.md new file mode 100644 index 000000000000..695bc18a0e58 --- /dev/null +++ b/docs/docusaurus/versioned_docs/version-1.8.0/lte/build_install_magma_pkg_in_agw.md @@ -0,0 +1,77 @@ +--- +id: version-1.8.0-build_install_magma_pkg_in_agw +title: Build and install a magma package in AGW +hide_title: true +original_id: build_install_magma_pkg_in_agw +--- +# Build and install a magma package in AGW + +**Description:** Purpose of this document is describe how to build and install a magma package in AGW. + +**Environment:** AGW + +**Steps:** + +1. **Clone or Update your local repository**. In any machine (i.e. your computer) clone the magma repo and checkout the version from where you would like to build your package. For example, for v1.8 you can run: + + ```bash + git clone https://github.com/magma/magma.git + git checkout v1.8 + ``` + +2. **Install prerequisites**. Make sure you have installed all the tools specified in the prerequisites + +3. **Build and create deb package**. + To build an AGW package, use the script located at `$MAGMA_ROOT/lte/gateway/fabfile.py`. The commands below will create a vagrant machine, then build and create a deb package. + + The following commands are to be run from `$MAGMA_ROOT/lte/gateway` on your host machine. + To create a package for production, run + + ```bash + fab release package + ``` + + To create a package for development or testing, run + + ```bash + fab dev package + ``` + + The `dev` flag will compile all C++ services with `Debug` compiler flags and enable ASAN. This is recommended for testing only as it will impact performance. In contrast, the production package has C++ services built with `RelWithDebInfo` compiler flags. + +4. **Locate the packages**. Once the above command finished. You need to enter the VM to verify the deb packages are there. + + ```bash + vagrant ssh magma + cd ~/magma-packages/ + ``` + + You will need only the ones that say `magma_1.1.XXX` and `magma-sctpd_1.1.XXX` (for v1.1 versions) + +5. **Download the package**. You can download the files to your computer from the vagrant machine. To do so, you can install a vagrant plugin in your computer and then download the package from the VM to your computer with the following commands: + + ```bash + vagrant plugin install vagrant-scp + vagrant scp magma: ~/magma-packages/ + ``` + +6. **Upload the package to AGW** that you would like to install. + +7. **Install the package**. In order to install the new deb package in AGW, you can run + + ```bash + sudo apt -f install MAGMA_PACKAGE + ``` + +8. **Restart the magma services** + + ```bash + sudo service magma@* stop + sudo service magma@magmad restart + ``` + +9. You can **verify the installed version** with + + ```bash + apt show magma + ``` diff --git a/docs/docusaurus/versioned_docs/version-1.8.0/lte/deploy_install.md b/docs/docusaurus/versioned_docs/version-1.8.0/lte/deploy_install.md new file mode 100644 index 000000000000..92189d7591ee --- /dev/null +++ b/docs/docusaurus/versioned_docs/version-1.8.0/lte/deploy_install.md @@ -0,0 +1,106 @@ +--- +id: version-1.8.0-deploy_install +title: Install AGW +hide_title: true +original_id: deploy_install +--- + +# Install Access Gateway on Ubuntu (Bare Metal) + +## Prerequisites + +To set up a Magma Access Gateway, you will need a machine that +satisfies the following requirements: + +- AGW_HOST: 64bit-X86 machine, baremetal strongly recommended + (not virtualized). You will need two ethernet ports. In this guide, + `enp1s0` and `enp2s0` are used: + - `enp1s0`: Will carry any traffic that is not S1. That is, data plane traffic (SGi), + control plane traffic (Orc8r HTTP2) and management (ssh). + - `enp2s0`: S1 interface. + +> NOTE: +> +> - Interface names might have different names on your hardware, so just +> replace `enp1s0` and `enp2s0` with your current interface names +> when following this guide. +> +> - The `agw_install_ubuntu.sh` script will rename the `enp1s0` +> interface to `eth0`. +> +> - If you do not want all internet traffic to go through `enp1s0` +> to separate control plane (Orc8r Http2 traffic) from user plane, you +> may want to add another interface and configure proper routing. + +## Deployment + +### 1. Create boot USB stick and install Ubuntu on your AGW host + +- Download the Ubuntu Server 20.04 LTS `.iso` image from the Ubuntu website. Verify its integrity by checking [the hash](https://releases.ubuntu.com/20.04/SHA256SUMS). +- Create a bootable USB using this [Etcher tutorial](https://tutorials.ubuntu.com/tutorial/tutorial-create-a-usb-stick-on-macos#0) +- Boot your AGW host from USB + - Press F11 to select boot sequence. WARNING: This might be different for your machine. + - If you see two options to boot from USB, select the non-UEFI option. +- Install and configure your Access Gateway according to your network defaults. + - Make sure to enable ssh server and utilities (untick every other). +- Connect your SGi interface to the internet and select this port during the +installation process to get an IP using DHCP. (Consider enabling DHCP snooping to mitigate [Rogue DHCP](https://en.wikipedia.org/wiki/Rogue_DHCP)) + +### 2. Deploy magma on the AGW_HOST + +#### Run AGW installation + +To install on a server with a DHCP-configured SGi interface + +```bash +su +wget https://raw.githubusercontent.com/magma/magma/v1.8/lte/gateway/deploy/agw_install_ubuntu.sh +bash agw_install_ubuntu.sh +``` + +To install on a server with statically-allocated SGi interface. For example, +if SGi has an IP of 1.1.1.1/24 and the upstream router IP is 1.1.1.200 + +```bash +su +wget https://raw.githubusercontent.com/magma/magma/v1.8/lte/gateway/deploy/agw_install_ubuntu.sh +bash agw_install_ubuntu.sh 1.1.1.1/24 1.1.1.200 +``` + +The script will run a pre-check script that will tell you what will change on your machine +and prompt you for your approval. If you are okay with these changes, reply `yes` and magma will +be installed. If you respond with `no`, the installation will be stopped. + +```bash + - Check if Ubuntu is installed + Ubuntu is installed + - Check for magma user + magma user is not Installed + - Check if both interfaces are named eth0 and eth1 + Interfaces will be renamed to eth0 and eth1 + eth0 will be set to dhcp and eth1 10.0.2.1 + Do you accept those modifications and want to proceed with magma installation?(y/n) + Please answer yes or no. +``` + +The machine will reboot but the installation is not finished yet; the script is still running in the background. +You can follow the output using + +```bash +journalctl -fu agw_installation +``` + +When you see "AGW installation is done.", it means that your installation is complete. You can make sure magma is running by executing + +```bash +service 'magma@*' status +``` + +#### Post-install check + +Make sure you have the `control_proxy.yml` file in directory `/var/opt/magma/configs/` +before running the post-install script + +```bash +bash /root/agw_post_install_ubuntu.sh +``` diff --git a/docs/docusaurus/versioned_docs/version-1.8.0/lte/deploy_install_docker.md b/docs/docusaurus/versioned_docs/version-1.8.0/lte/deploy_install_docker.md new file mode 100644 index 000000000000..4d96e5cf76c4 --- /dev/null +++ b/docs/docusaurus/versioned_docs/version-1.8.0/lte/deploy_install_docker.md @@ -0,0 +1,110 @@ +--- +id: version-1.8.0-deploy_install_docker +title: Install Docker AGW +hide_title: true +original_id: deploy_install_docker +--- + +# Install Docker-based Access Gateway on Ubuntu + +## Prerequisites + +To set up a Magma Access Gateway, you will need a machine that +satisfies the following requirements: + +- AGW_HOST: aarch64 or 64bit-X86 machine. You will need two ethernet ports. In this guide, + `enp1s0` and `enp2s0` are used: + - `enp1s0`: Will carry any traffic that is not S1. That is, data plane traffic (SGi), + control plane traffic (Orc8r HTTP2) and management (ssh). + - `enp2s0`: S1 interface. + +> NOTE: +> +> - Interface names might have different names on your hardware, so just +> replace `enp1s0` and `enp2s0` with your current interface names +> when following this guide. +> +> - The `agw_install_docker.sh` script will rename the `enp1s0` +> interface to `eth0`. +> +> - If you do not want all internet traffic to go through `enp1s0` +> to separate control plane (Orc8r Http2 traffic) from user plane, you +> may want to add another interface and configure proper routing. + +## Deployment + +### 1. Create boot USB stick and install Ubuntu on your AGW host + +- Download the Ubuntu Server 20.04 LTS `.iso` image from the Ubuntu website +- Create a bootable USB using this [Etcher tutorial](https://tutorials.ubuntu.com/tutorial/tutorial-create-a-usb-stick-on-macos#0) +- Boot your AGW host from USB + - Press F11 to select boot sequence. WARNING: This might be different for your machine. + - If you see two options to boot from USB, select the non-UEFI option. +- Install and configure your Access Gateway according to your network defaults. + - Make sure to enable ssh server and utilities (untick every other). +- Connect your SGi interface to the internet and select this port during the +installation process to get an IP using DHCP. + +### 2. Deploy magma on the AGW_HOST + +#### Do pre-installation steps + +Become root user: + +```bash +sudo -i +``` + +Copy your `rootCA.pem` file from orc8r to the following location: + +```bash +mkdir -p /var/opt/magma/certs +vim /var/opt/magma/certs/rootCA.pem +``` + +#### Run AGW installation + +Download AGW docker install script + +```bash +wget https://github.com/magma/magma/raw/v1.8/lte/gateway/deploy/agw_install_docker.sh +bash agw_install_docker.sh +``` + +#### Configure AGW + +Once you see the output `Reboot this machine to apply kernel settings`, reboot your AGW host. + +Create `control_proxy.yml` file with your orc8r details: + +```bash +cat << EOF | sudo tee /var/opt/magma/configs/control_proxy.yml +cloud_address: controller.orc8r.magmacore.link +cloud_port: 443 +bootstrap_address: bootstrapper-controller.orc8r.magmacore.link +bootstrap_port: 443 +fluentd_address: fluentd.orc8r.magmacore.link +fluentd_port: 24224 + +rootca_cert: /var/opt/magma/certs/rootCA.pem +EOF +``` + +Start your access gateway: + +```bash +cd /var/opt/magma/docker +sudo docker-compose up -d +``` + +Now get Hardware ID and Challenge key and add AGW in your orc8r: + +```bash +docker exec magmad show_gateway_info.py +``` + +Then restart your access gateway: + +```bash +sudo docker-compose up -d --force-recreate +``` diff --git a/docs/docusaurus/versioned_docs/version-1.8.0/lte/dev_notes.md b/docs/docusaurus/versioned_docs/version-1.8.0/lte/dev_notes.md new file mode 100644 index 000000000000..240b7bc3e3fe --- /dev/null +++ b/docs/docusaurus/versioned_docs/version-1.8.0/lte/dev_notes.md @@ -0,0 +1,344 @@ +--- +id: version-1.8.0-dev_notes +title: Developer Notes +hide_title: true +original_id: dev_notes +--- + +# Developer Notes + +This section provides a guide for anyone testing existing features, +fixing a bug or adding a new feature to the Access Gateway. All developers are +highly encouraged to maintain this guide to make sure it is up to date and +continues to grow. + +## Configuration/system settings + +If you have a gateway running in a VM (as described in the [Quick Start +Guide](../basics/quick_start_guide)), the `magma` directory is shared between +the guest and host machine, so changes made on either system reflect on the +other. Exceptions to this rule are the systemd unit files and python scripts. +Changes to these files on the guest or host need to be manually synced. + +### Configuration files/directories + +- `/etc/magma/`: location of default configurations and templates for all + services +- `/etc/magma/gateway.mconfig`: main file that contains the configuration for + all services, as exposed via the Orc8r API +- `/var/opt/magma/configs/gateway.mconfig`: For gateways connected to an + orchestrator, the configuration from Orc8r is periodically streamed to the +gateway and written here. This streamed config takes precedence over +`/etc/magma/gateway.mconfig`. +- `/etc/magma/.yml`: Service configuration file, in YAML format. + These configurations are local and are not exposed through the API. These +include logging level, local network interface names, etc. +- `/etc/magma/templates/.conf.template`: This contains the + structured template for the .conf file used as input to +some services, such as Control-proxy, Dnsd, MME and Redis. +- `/var/opt/magma/tmp/.conf`: The configuration file read by + some services, such as Control-proxy, Dnsd, MME and Redis, at start-up. This +file is generated by mapping the configuration values from `gateway.mconfig` and +`` to the template defined in `.conf.template`. + +### Systemd unit configuration files + +- `/etc/systemd/system/magma@.service`: Systemd unit + files for Magma service. Note that these files are maintained under +`magma/lte/gateway/deploy/roles/magma/files/systemd/` and are copied into the +`/etc/systemd/system` directory of the VM at the time of provisioning. You need +to manually sync changes to these files between guest and host OS. + +### Python scripts to generate configs + +- `generate__config.py`: Scripts that generate the + `.conf` file for some services. These are executed every time +a service starts. +Note that these files are maintained under `magma/lte/gateway/python/scripts` +and copied to the `/usr/local/bin` directory in the guest host at the time of +provisioning. Changes to these scripts need to be manually synced between the +guest and host OS. + +## Testing + +### Connecting a physical eNodeB and UE to Gateway VM + +While the [S1ap integration tests](s1ap_tests.md) provide a simulated UE(s) and +eNodeB(s) to test your AGW VM while actively developing, one can extend the +testing to physical UE and eNodeB. +To connect a physical eNodeB to the gateway VM: + +1. Connect the eNodeB to a port on the host machine, say it is interface `en9`. +1. From the VirtualBox GUI, switch the Adapter 2 (for `eth1` interface) from +`Host-only` to `Bridged` mode and bridge it to interface `en9` from above. +1. In gateway VM, modify the `nat_iface` in `/etc/magma/pipelined.yml` from +`eth2` to `eth0`. Restart all services. +1. In the gateway VM, follow the steps in [EnodeB +Configuration](enodebd#basic-troubleshooting). Make sure the `earfcn` set in the +enodebd section of `gateway.mconfig` is the one that is supported by the eNodeB +under consideration. Use this [calculator](https://www.sqimway.com/lte_band.php) to +get the `earfcn` corresponding to the frequency range listed on the eNodeB. + +To connect a physical UE to the gateway VM, + +1. Use a programmable SIM which is provisioned with the LTE auth key that you +will use in the EPC. +1. On the gateway VM, add the subscriber using the CLI: +1. `magtivate` +1. `subscriber_cli.py add --lte-auth-key IMSI<15 digit +IMSI>` + +1. On the UE, turn airplane mode on, then off, to trigger a fresh attach + +### Connecting a physical AGW to S1AP test VM + +Another useful combination is to run the [S1ap integration tests](s1ap_tests.md) +on a physical AGW directly connected to the host machine running the +*magma_test* VM. + +#### On the magma_test host machine + +- Connect `eth1` of the physical AGW to a port on the host machine, say it is +interface `en9`. +- From the VirtualBox GUI, switch the Adapter 1 (for `eth1` interface) from +`Host-only` to `Bridged` mode and bridge it to interface `en9` from above. + +#### On the physical AGW + +- Change the static IP address of `eth1` to match the one expected by the test +VM + - `sudo sed -i 's/address 10.0.2.1/address 192.168.60.142/g'` + `/etc/network/interfaces.d/eth1` + - `sudo ifdown eth1` + - `sudo ifup eth1` + - `sudo ip addr del 10.0.2.1/24 dev eth1` +- Enable DEV_MODE + - `sudo bash -c 'echo "MAGMA_DEV_MODE=1" >> /etc/environment'` +- Enable Pipelined `ryu_rest_service` + - `grep -qF 'ryu_rest_service' /etc/magma/pipelined.yml || sudo sed -i` +`"s/static_services: \[/static_services: \[ \'ryu_rest_service\',/g"` +`/etc/magma/pipelined.yml` +- Restart all services + - `sudo service sctpd restart` + +#### On the test VM + +- Change credentials + - `sudo sed -i 's/"vagrant"/"magma"/g'` +`/home/vagrant/magma/lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py` +- Disable use of `magtivate` + - `sudo sed -i 's/magtivate_cmd + " && " + //g'` +`/home/vagrant/magma/lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py` + +Now, you can run the [S1ap integration tests](s1ap_tests.md) as usual. + +## Debugging + +### Logs + +To change the logging level for a particular service, please modify the +`log_level` in `/etc/magma/.yml`. If `log_level` is missing just +add it. You can use `INFO`, `WARNING`, `ERROR`, `DEBUG`. When `log_level` is +missing, the default level is the lowset, `INFO` + +To see GRPC messages on your log, check next section + +You can check the logs in different places: + +- `/var/log/syslog`: gives a good view of all the Magma services +running on the AGW. This is a good place to check whether the AGW is connecting +to the orchestrator, any GRPC errors or which service is causing a cascaded +crash (e.g. a crash in Sessiond can cause Mme service to terminate). + A good way to filter the logs from individual processes is with `journalctl`. +For example, to look at logs from SubscriberDb use: + `sudo journalctl -fu magma@subscriberdb` + +- `/var/log/mme.log` is a symbolic link that points to the latest log file +created by the MME service. The Mme service creates a new log file with name +`MME.magma-dev.root.log.INFO.-