diff --git a/acceptance/router_benchmark/BUILD.bazel b/acceptance/router_benchmark/BUILD.bazel index 21f3f853da..c2b04051c9 100644 --- a/acceptance/router_benchmark/BUILD.bazel +++ b/acceptance/router_benchmark/BUILD.bazel @@ -57,9 +57,7 @@ py_binary( "--brload", "$(location //acceptance/router_benchmark/brload:brload)", ], - data = [ - "//acceptance/router_benchmark/brload", - ], + data = data, imports = ["."], visibility = ["//visibility:public"], deps = [ diff --git a/acceptance/router_benchmark/benchmark.py b/acceptance/router_benchmark/benchmark.py index 04e77807b5..1e18e990c1 100755 --- a/acceptance/router_benchmark/benchmark.py +++ b/acceptance/router_benchmark/benchmark.py @@ -105,6 +105,7 @@ class RouterBMTool(cli.Application, RouterBM): brload_cpus: list[int] = [] artifacts = f"{os.getcwd()}/acceptance/router_benchmark" prom_address: str = "localhost:9090" + debug_run = False def host_interface(self, excl: bool): """Returns the next host interface that we should use for a brload links. @@ -201,13 +202,14 @@ def setup(self, avail_interfaces: list[str]): # Check that the given interfaces are safe to use. We will wreck their config. for intf in avail_interfaces: output = sudo("ip", "addr", "show", "dev", intf) - if len(output.splitlines()) > 2: - logger.error(f"""\ - Interface {intf} appears to be in some kind of use. Cowardly refusing to modify it. - If you have a network manager, tell it to disable or ignore that interface. - Else, how about \"sudo ip addr flush dev {intf}\"? - """) - raise RuntimeError("Interface in use") + for l in output.splitlines(): + if l.strip().startswith("inet"): + logger.error(f"""\ + Interface {intf} appears to be in some kind of use. Cowardly refusing to modify it. + If you have a network manager, tell it to disable or ignore that interface. + Else, how about \"sudo ip addr flush dev {intf}\"? + """) + raise RuntimeError("Interface in use") # Looks safe. self.avail_interfaces = avail_interfaces @@ -290,7 +292,7 @@ def instructions(self): print(f""" INSTRUCTIONS: -1 - Configure your subject router according to accept/router_benchmark/conf/router.toml") +1 - Configure your subject router according to acceptance/router_benchmark/conf/. If using openwrt, an easy way to do that is to install the bmtools.ipk package. In addition, bmtools includes two microbenchmarks: scion-coremark and scion-mmbm. Those will run automatically and the results will be used to improve the benchmark report. @@ -298,6 +300,10 @@ def instructions(self): Optional: If you did not install bmtools.ipk, install and run those microbenchmarks and make a note of the results: (scion-coremark; scion-mmbm). + On platforms for which the bmtools package is not available, the router can be configured by + copying *all* the files in acceptance/router_benchmark/conf/ to the subject router's + configuration directory (typically /etc/scion/). + 2 - Configure the following interfaces on your router (The procedure depends on your router UI) - All interfaces should have the mtu set to 9000: - One physical interface with addresses: {", ".join(multiplexed)} @@ -307,7 +313,9 @@ def instructions(self): the "must reach" annotation matters. The '#' number is the order in which the corresponding host interface must be given on the command line in step 7. -3 - Connect the corresponding ports into your test switch (best if dedicated for the test). +3 - Connect the corresponding ports into your test switch (best if dedicated for the test). It is + highly recommended to connect *only* these ports. Leave other network interfaces un-connected + during the test. 4 - Restart the scion-router service. @@ -319,9 +327,9 @@ def instructions(self): 6 - Connect the corresponding ports into your test switch. If using a partitioned network, make sure that port is reachable by the corresponding subject router port. -7 - Execute this script with arguments: --run , where is the list - of names you collected in step 5. If using a partitioned network, make sure to supply them - in the order indicated in step 2. +7 - Execute this script with arguments: --run , where is the + space-separated list of names you collected in step 5. If using a partitioned network, make + sure to supply them in the order indicated in step 2. If coremark and mmbm values are available, the report will include a performance index. diff --git a/doc/dev/testing/benchmarking.rst b/doc/dev/testing/benchmarking.rst index c3460edd7b..f7852c4330 100644 --- a/doc/dev/testing/benchmarking.rst +++ b/doc/dev/testing/benchmarking.rst @@ -4,10 +4,11 @@ Router Benchmark :program:`acceptance/router_benchmark/benchmark.py` is a tool to benchmark an external router. -The usage is simply: ``acceptance/router_benchmark/benchmark.py``. +The usage is simply: ``bazel run acceptance/router_benchmark:benchmark``. Without any options, the tool outputs instructions. Those instructions comprise how to configure -the subject router and how to re-execute the tool so it actually carries the measurement. +the subject router and how to re-execute the tool so it actually carries the measurement. Remember +to precede the added arguments with '--' so that these arguments are not consumed by `bazel run`. In order to accomplish the tool's instructions one will need to: @@ -24,4 +25,5 @@ results available for pickup by :program:`benchmark.py`. Otherwise these operations still have to be carried out manually. The :program:`mmbm` and :program:`coremark` tools can be found in: ``bazel-bin/tools/mmbm/mmbm_/mmbm`` and -``bazel-bin/tools/coremark/coremark``. +``bazel-bin/tools/coremark/coremark``. The proper configuration of the subject router is the +set of files in `acceptance/router_benchmark/config`.