diff --git a/.gitignore b/.gitignore index 2782cfc4a..1ca0b4932 100644 --- a/.gitignore +++ b/.gitignore @@ -18,3 +18,7 @@ reports/ #Ignore python cache __pycache__/ + +#Ignore local files +.DS_Store +target/ diff --git a/scripts/benchrunner.py b/scripts/benchrunner.py new file mode 100755 index 000000000..de9ed7d11 --- /dev/null +++ b/scripts/benchrunner.py @@ -0,0 +1,414 @@ +#!/usr/bin/env python3 +import argparse +import csv +import logging +import re +import subprocess +from pathlib import Path + + +def repo_root() -> Path: + """Return the root of the lind-wasm repo.""" + return Path(__file__).resolve().parent.parent + + +ROOT = repo_root() +BENCH_DIR = ROOT / "tests" / "benchmarks" +LIND_FS = ROOT / "lindfs" + +GRATES_REPO_URL = "https://github.com/Lind-Project/lind-wasm-example-grates" +GRATES_REPO_DIR = BENCH_DIR / "grates" +GRATES_EXAMPLES_DIR = GRATES_REPO_DIR / "examples" + +log = logging.getLogger(__name__) + + +def run_cmd(cmd, timeout=180): + """Run a command and return CompletedProcess, return None on failure.""" + try: + status = subprocess.run(cmd, timeout=timeout, check=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + except subprocess.TimeoutExpired as e: + log.debug(f"Command timed out: {str(e)}") + return None + except subprocess.CalledProcessError as e: + log.debug(f"Called process error: {str(e)}") + return None + except (FileNotFoundError, PermissionError) as e: + log.debug(f"Binary not found: {str(e)}") + return None + except OSError as e: + log.debug(f"OS Error: {str(e)}") + return None + + return status + + +def bench_relpath(path: Path) -> Path: + """Return path relative to tests/benchmarks.""" + return path.resolve().relative_to(BENCH_DIR) + + +def lindfs_path(rel: Path) -> Path: + """Return absolute path inside lindfs for a relative benchmark path.""" + return LIND_FS / rel + + +def compile_lind(c_file: Path) -> str: + """Compile a C benchmark to wasm using lind_compile.""" + status = run_cmd(["lind_compile", str(c_file), str(BENCH_DIR / "bench.c")]) + + if not status: + return None + + rel = bench_relpath(c_file).with_suffix(".cwasm") + # lind_compile places outputs inside lindfs; lind-boot is chrooted there. + return rel.as_posix() + + +def get_test_description(test_file: Path) -> str: + try: + text = Path(test_file).read_text() + except (OSError, UnicodeDecodeError): + return None + + m = re.search(r'^\s*//\s*DESCRIPTION:\s*(.*)', text, re.MULTILINE) + if m: + return m.group(1).strip() + + return None + + +def compile_native(c_file: Path) -> Path: + """Compile a C benchmark to a native binary and place it in lindfs.""" + rel = bench_relpath(c_file).with_suffix("") + out_path = lindfs_path(rel) + out_path.parent.mkdir(parents=True, exist_ok=True) + + status = run_cmd( + [ + "cc", + str(c_file), + str(BENCH_DIR / "bench.c"), + "-o", + str(out_path), + ] + ) + + if not status: + return None + + return out_path + + +def ensure_grates_repo(): + """Ensure a sparse-checkout repo exists for grates.""" + if not GRATES_REPO_DIR.exists(): + run_cmd( + [ + "git", + "clone", + "--filter=blob:none", + "--no-checkout", + GRATES_REPO_URL, + str(GRATES_REPO_DIR), + ] + ) + run_cmd(["git", "-C", str(GRATES_REPO_DIR), + "sparse-checkout", "init", "--cone"]) + + +def add_sparse_path(path: str): + """Add a path to the sparse-checkout set if needed.""" + status = run_cmd( + ["git", "-C", str(GRATES_REPO_DIR), "sparse-checkout", "list"] + ) + existing = [] + if status: + existing = [ + line.strip() + for line in status.stdout.decode("utf-8").splitlines() + if line.strip() + ] + if path not in existing: + new_paths = existing + [path] + status = run_cmd( + ["git", "-C", str(GRATES_REPO_DIR), + "sparse-checkout", "set"] + new_paths + ) + # Pull latest changes. + run_cmd(["git", "-C", str(GRATES_REPO_DIR), "checkout", "main"]) + + +def resolve_grate_dir(grate_name: str) -> Path: + """Find a grate directory, preferring the external repo.""" + ensure_grates_repo() + add_sparse_path(f"examples/{grate_name}") + repo_path = GRATES_EXAMPLES_DIR / grate_name + if repo_path.exists(): + return repo_path + return BENCH_DIR / grate_name + + +def compile_grate(grate_dir: Path) -> str: + """Compile a grate folder and return the output path inside lindfs.""" + status = run_cmd(["bash", str(grate_dir / "compile_grate.sh"), "."]) + if not status: + return None + rel = bench_relpath(grate_dir).with_suffix(".cwasm") + return rel.name + + +def parse_output(res, output, platform, description=None): + """Parse benchmark output lines and update results.""" + try: + for line in output.decode("utf-8").splitlines(): + parts = [part.strip() for part in line.split("\t")] + if len(parts) != 4: + continue + test, param, loops, avg = parts + + if test not in res: + res[test] = {} + if param not in res[test]: + res[test][param] = {"linux": -1, + "lind": -1, "grate": -1, "loops": -1} + + if description: + res[test][param]["desc"] = description + + res[test][param][platform] = avg + res[test][param]["loops"] = loops + except Exception: + print("Invalid output from test: ", output.decode("utf-8")) + + +def run_lind(wasm_paths, res, platform, description=None): + """Run lind-boot with one or more wasm paths.""" + cmd = ["lind_run"] + wasm_paths + status = run_cmd(cmd) + if status: + parse_output(res, status.stdout, platform, description) + + return status + + +def run_native(binary_path: Path, res, description=None): + """Run a native benchmark binary.""" + status = run_cmd([str(binary_path)]) + if status: + parse_output(res, status.stdout, "linux", description) + + return status + + +def run_grate_test(grate_dir: Path, res, description=None): + """Run a grate test described by a .grate file or directory.""" + bins = [] + + for part in grate_dir.name.split("."): + if part.endswith("-grate"): + grate_bin = compile_grate(resolve_grate_dir(part)) + if not grate_bin: + return None + bins.append(grate_bin.replace("-", "_")) + else: + c_file = BENCH_DIR / f"{part}.c" + bins.append(compile_lind(c_file)) + + return run_lind(bins, res, "grate", description) + + +def to_int(value): + """Best-effort int conversion for numeric strings.""" + if isinstance(value, int): + return value + try: + return int(value) + except (TypeError, ValueError): + return -1 + + +def try_int(value): + try: + return int(value) + except (TypeError, ValueError): + return value + + +def format_ratio(value, base): + """Format value and its ratio to base.""" + v = to_int(value) + b = to_int(base) + if v < 0: + return "--" + if b <= 0: + return str(value) + return f"{v} ({v / b:.3f})" + + +def build_display_rows(res): + rows = [] + for test in res: + for param in res[test]: + linux = res[test][param]["linux"] + lind = res[test][param]["lind"] + grate = res[test][param]["grate"] + loops = res[test][param]["loops"] + desc = res[test][param].get("desc", "--") + + rows.append( + [ + test, + param, + format_ratio(linux, linux), + format_ratio(lind, linux), + format_ratio(grate, linux), + loops, + desc, + ] + ) + + # Sort results by (test, param): + # Ensures all tests from the same benchmark and clubbed together, + # and wherever possible are sorted in increasing order of param. + rows.sort(key=lambda r: (r[0], try_int(r[1]))) + + last_test = None + for row in rows: + if row[0] == last_test: + row[-1] = "" + else: + last_test = row[0] + + return rows + + +def print_results(res): + """Print results as a padded table sorted by test and param.""" + rows = build_display_rows(res) + if len(rows) == 0: + return + + headers = ("TEST", "PARAM", "LINUX (ns)", + "LIND (ns)", "GRATE (ns)", "ITERATIONS", "DESCRIPTION") + widths = [len(h) for h in headers] + for row in rows: + for i, val in enumerate(row): + widths[i] = max(widths[i], len(str(val))) + + fmt = " ".join([f"{{:<{w}}}" for w in widths]) + print(fmt.format(*headers)) + print(" ".join(["-" * w for w in widths])) + for row in rows: + print(fmt.format(*row)) + + +def format_raw(value): + if value in (-1, None, "-1"): + return "--" + return str(value) + + +def write_csv(res, path: Path): + """Write results as CSV to a file.""" + headers = [ + "TEST", + "PARAM", + "LINUX (ns)", + "LIND (ns)", + "GRATE (ns)", + "ITERATIONS", + "DESCRIPTION", + ] + rows = build_display_rows(res) + + with open(path, "w", encoding="utf-8", newline="") as f: + writer = csv.writer(f) + writer.writerow(headers) + writer.writerows(rows) + + +def parse_args(): + parser = argparse.ArgumentParser( + description="Run lind-wasm microbenchmarks") + parser.add_argument( + "patterns", + nargs="*", + help="Test name prefixes (e.g. fs_ imfs_). Defaults to all.", + ) + parser.add_argument( + "-o", "--out", dest="output_csv", help="Write results to CSV file" + ) + parser.add_argument( + "-d", "--debug", action="store_true" + ) + + return parser.parse_args() + + +def collect_tests(patterns): + """Return benchmark paths matching patterns.""" + if not patterns: + patterns = [""] + files = [] + for p in patterns: + for path in BENCH_DIR.glob(f"{p}*"): + if path.name in ("bench.c"): + continue + if path.is_file() and path.suffix in (".c", ".grate"): + files.append(path) + return files + + +def build_grate_description(grate_file: Path) -> str: + grate_desc = get_test_description(grate_file) + parts = grate_file.with_suffix("").name.split(".") + + descs = [] + for part in parts: + c_desc = get_test_description(BENCH_DIR / f"{part}.c") + if c_desc: + descs.append(c_desc) + + if grate_desc: + descs.append(grate_desc) + + if not descs: + return "--" + + return " / ".join(descs) + + +def main(): + args = parse_args() + logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) + tests = collect_tests(args.patterns) + res = {} + + for test in tests: + if test.suffix == ".c": + print("Running: ", test) + native_path = compile_native(test) + lind_path = compile_lind(test) + if not native_path or not lind_path: + print("Failed to compile. Skipping.") + continue + desc = get_test_description(test) + run_lind([lind_path], res, "lind", desc) + run_native(native_path, res, desc) + elif test.suffix == ".grate": + print("Running: ", test) + desc = build_grate_description(test) + status = run_grate_test(test.with_suffix(""), res, desc) + if not status: + print("Failed to compile. Skipping.") + + if args.output_csv: + write_csv(res, Path(args.output_csv)) + else: + print_results(res) + + +if __name__ == "__main__": + main() diff --git a/tests/benchmarks/README.md b/tests/benchmarks/README.md new file mode 100644 index 000000000..4a642dbb0 --- /dev/null +++ b/tests/benchmarks/README.md @@ -0,0 +1,129 @@ +# Lind Benchmarks + +This directory contains microbenchmarks used by `scripts/benchrunner.py`. +Each benchmark prints results in a tab-delimited format so the runner can parse them. + +Sample outputs: + +```bash +lind@232affd4dc4d:~/lind-wasm$ ./scripts/benchrunner.py fs_read imfs-grate.fs_read.grate +Running: /home/lind/lind-wasm/tests/benchmarks/fs_read.c +Running: /home/lind/lind-wasm/tests/benchmarks/imfs-grate.fs_read.grate +TEST PARAM LINUX (ns) LIND (ns) GRATE (ns) ITERATIONS DESCRIPTION +---- ----- ----------- ----------- ------------ ---------- ---------------------------------------- +Read 1 214 (1.000) 414 (1.935) 824 (3.850) 1000000 Issues pread() for buffer of size PARAM. / Grate used: In-Memory File System. +Read 1024 226 (1.000) 436 (1.929) 855 (3.783) 1000000 +Read 4096 244 (1.000) 433 (1.775) 930 (3.811) 1000000 +Read 10240 359 (1.000) 532 (1.482) 1046 (2.914) 1000 +``` + +```bash +lind@232affd4dc4d:~/lind-wasm$ ./scripts/benchrunner.py -o results.csv fs_read imfs-grate.fs_read.grate +Running: /home/lind/lind-wasm/tests/benchmarks/fs_read.c +Running: /home/lind/lind-wasm/tests/benchmarks/imfs-grate.fs_read.grate +TEST,PARAM,LINUX (ns),LIND (ns),GRATE (ns),ITERATIONS,DESCRIPTION +Read,1,"214 (1.000)","414 (1.935)","824 (3.850)",1000000,"Issues pread() for buffer of size PARAM. / Grate used: In-Memory File System." +Read,1024,"226 (1.000)","436 (1.929)","855 (3.783)",1000000, +Read,4096,"244 (1.000)","433 (1.775)","930 (3.811)",1000000, +Read,10240,"359 (1.000)","532 (1.482)","1046 (2.914)",1000, +``` + +## Output Format + +Each benchmark uses `bench.c/bench.h` and prints exactly one line per data point: + +``` +\t\t\t +``` + +Fields: +- `test`: human-readable label (string) +- `param`: parameter for the test (size, id, etc.) +- `loops`: number of iterations used to compute the average +- `avg_ns`: average time per iteration in **nanoseconds** + +The helper function `emit_result()` in `bench.c` enforces this format. + +Descriptions (optional) can be placed at the top of a test file in the form: + +``` +// DESCRIPTION: Issues pread() for buffer of size PARAM. +``` + +For `.grate` tests, the description is formed by concatenating the `.c` test +description(s) with the `.grate` file description. The console table prints the +description only on the first row for a given test; CSV mirrors the console output. + +## How Timing Works + +Benchmarks record a start and end timestamp with `gettimens()` +and compute `avg = (end - start) / loops`. +`gettimens()` uses `clock_gettime(CLOCK_MONOTONIC)` to avoid time jumps. + +## Running + +From repo root: + +``` +python3 scripts/benchrunner.py +``` + +Optional: +- `python3 scripts/benchrunner.py fs_ sys_` runs only tests whose filename starts with `fs_` or `sys_` +- `python3 scripts/benchrunner.py --out results.csv` writes CSV instead of a table + +## Benchmark Types + +### Syscall Tests +Examples: `sys_close.c`, `sys_geteuid.c` +These call a single syscall in a tight loop and report average time per call. + +### File System Tests +Examples: `fs_read.c`, `fs_write.c` +These vary the operation size (`param` is bytes) and adjust loop counts +to keep runtimes reasonable. + +### IPC Tests +Examples: `ipc_pipe.c`, `ipc_uds.c` +These measure round-trip time (RTT) for different message sizes. +`param` is message size in bytes. + +## Grate Tests + +Files ending in `.grate` represent a grate-backed benchmark. +`benchrunner.py` interprets the filename as a dot-separated list of inputs. +For example: + +``` +imfs-grate.fs_read.grate +``` + +This means: +- fetch `examples/imfs-grate/` from the [lind-wasm-example-grates](https://github.com/Lind-Project/lind-wasm-example-grates) repo +- compile the grate via `compile_grate.sh` +- compile `tests/benchmarks/fs_read.c` as a cage +- run `lind_run imfs_grate fs_read` so the syscall is interposed by the grate + +The `.grate` tests files are used to encode the expected order to launch grates and cages, +and can optionally include a `// DESCRIPTION:` line to describe the intended workflow being benchmarked. + +## Adding a New Test + +### Regular (non-grate) tests +1. Create a new `.c` file in `tests/benchmarks/`. +2. Include `bench.h` and use `gettimens()` + `emit_result(...)`. +3. Make sure any required artifacts (files, sockets, temp paths) are created and + cleaned up inside the test itself. +4. Use a consistent `test` label in `emit_result(...)`. Results with the same label and + `param` are aggregated across platforms during a single `benchrunner.py` run. + +### Grate tests +Taking the example of adding a `geteuid_grate` test: +1. Ensure that the grate exists in the `lind-wasm-example-grates` repo with a valid `compile_grate.sh` script. +2. Add a file ending with `.grate`. The name of this file encodes the order of execution for this test. Name the file `geteuid-grate.geteuid.grate` which would run `lind-boot geteuid_grate.cwasm geteuid.cwasm`. You can add a `// DESCRIPTION:` line here for grate-specific context. +3. For components that are not grates, these files must already be present in `tests/benchmarks/`. In this case, `geteuid.c` must already exist. + +### Aggregation: +- Results are keyed by the `test` label and `param`. +- If you have multiple binaries (linux/lind/grate) that emit the same `(label, param)` the + results are grouped together in the final table. diff --git a/tests/benchmarks/bench.c b/tests/benchmarks/bench.c new file mode 100644 index 000000000..d8b780036 --- /dev/null +++ b/tests/benchmarks/bench.c @@ -0,0 +1,25 @@ +#include "bench.h" +#include +#include + +// Shared sizes for FS and IPC read/writes. +int fs_sizes[4] = {1, KiB(1), KiB(4), KiB(10)}; +int ipc_sizes[4] = {1, KiB(1), KiB(4), KiB(10)}; + +// Returns a monotonic timestamp in nanoseconds. +long long gettimens() { + struct timespec tp; + clock_gettime(CLOCK_MONOTONIC, &tp); + return (long long)tp.tv_sec * 1000000000LL + tp.tv_nsec; +} + +// Emits one benchmark row in the format: +// \t\t\t +void emit_result(char *test, int param, long long average, int loops) { + printf("%s\t%d\t%d\t%lld\n", test, param, loops, average); +} + +// Emits benchmark row with a string param. +void emit_result_string(char *test, char *param, long long average, int loops) { + printf("%s\t%s\t%d\t%lld\n", test, param, loops, average); +} diff --git a/tests/benchmarks/bench.h b/tests/benchmarks/bench.h new file mode 100644 index 000000000..5d8364aab --- /dev/null +++ b/tests/benchmarks/bench.h @@ -0,0 +1,27 @@ +// Helper macros for IPC and FS test message sizes. +#define KiB(x) ((size_t)(x) << 10) +#define MiB(x) ((size_t)(x) << 20) + +// Iteration constants +#define LOOPS_SMALL 10000 +#define LOOPS_LARGE 1000000 +#define IO_THRESHOLD 4096 + +// For payloads of larger sizes, running larger loops slows down the benchmarking without providing any meaningful improvement of data. +// Dynamically pick a smaller loop count for larger payloads. +#define IO_LOOP_COUNT(size) ((size) > IO_THRESHOLD ? LOOPS_SMALL : LOOPS_LARGE) + +#define FS_SIZE_COUNT (sizeof(fs_sizes)/sizeof(fs_sizes[0])) +#define IPC_SIZE_COUNT (sizeof(ipc_sizes)/sizeof(ipc_sizes[0])) + +extern int fs_sizes[4]; +extern int ipc_sizes[4]; + +// Monotonic timer in nanoseconds for microbenchmarks. +long long gettimens(); + +// Print one benchmark row in benchrunner.py's tab-delimited format: +// \t\t\t +void emit_result(char* test, int param, long long average, int loops); + +void emit_result_string(char* test, char* param, long long average, int loops); diff --git a/tests/benchmarks/fs_read.c b/tests/benchmarks/fs_read.c new file mode 100644 index 000000000..e688604ce --- /dev/null +++ b/tests/benchmarks/fs_read.c @@ -0,0 +1,49 @@ +// DESCRIPTION: Issues pread() for buffer of size PARAM. +#include "bench.h" +#include +#include +#include +#include +#include + +void read_size(size_t count) { + char *buf = malloc(count); + + int fd = open("tmp_fs_read.txt", O_RDONLY, 0); + + int loops = IO_LOOP_COUNT(count); + + long long start_time = gettimens(); + for (int i = 0; i < loops; i++) { + pread(fd, buf, count, 0); + } + long long end_time = gettimens(); + + long long avg_time = (end_time - start_time) / loops; + emit_result("Read", count, avg_time, loops); + + close(fd); + free(buf); +} + +int main(int argc, char *argv[]) { + // Create a temporary file of appropriate size to be read later. + int max_size = 0; + for (int i = 0; i < FS_SIZE_COUNT; i++) { + if (max_size < fs_sizes[i]) + max_size = fs_sizes[i]; + } + char wbuf[max_size]; + memset(wbuf, 'A', max_size); + + int fd = open("tmp_fs_read.txt", O_CREAT | O_WRONLY, 0666); + write(fd, &wbuf, max_size); + close(fd); + + // Run benchmarks. + for (int i = 0; i < FS_SIZE_COUNT; i++) { + read_size(fs_sizes[i]); + } + + unlink("tmp_fs_read.txt"); +} diff --git a/tests/benchmarks/fs_write.c b/tests/benchmarks/fs_write.c new file mode 100644 index 000000000..019af1608 --- /dev/null +++ b/tests/benchmarks/fs_write.c @@ -0,0 +1,45 @@ +// DESCRIPTION: Issues pwrite() for buffer of size PARAM. +#include "bench.h" +#include +#include +#include +#include +#include + +void write_size(size_t count) { + char *buf = malloc(count); + if (buf == NULL) { + perror("malloc"); + exit(1); + } + + memset(buf, 'A' + (count % 26), count); + + int fd = open("fs_write.txt", O_CREAT | O_WRONLY, 0644); + + int loops = IO_LOOP_COUNT(count); + + long long start_time = gettimens(); + for (int i = 0; i < loops; i++) { + pwrite(fd, buf, count, 0); + } + long long end_time = gettimens(); + + long long total_time = end_time - start_time; + + close(fd); + free(buf); + + long long avg_time = total_time / loops; + + emit_result("Write", count, avg_time, loops); +} + +int main(int argc, char *argv[]) { + // Run benchmarks. + for (int i = 0; i < FS_SIZE_COUNT; i++) { + write_size(fs_sizes[i]); + } + + unlink("fs_write.txt"); +} diff --git a/tests/benchmarks/geteuid-grate.sys_geteuid.grate b/tests/benchmarks/geteuid-grate.sys_geteuid.grate new file mode 100644 index 000000000..8e070b73f --- /dev/null +++ b/tests/benchmarks/geteuid-grate.sys_geteuid.grate @@ -0,0 +1 @@ +// DESCRIPTION: Interposing on geteuid() to measure latency of no-op grate syscalls. diff --git a/tests/benchmarks/imfs-grate.fs_read.grate b/tests/benchmarks/imfs-grate.fs_read.grate new file mode 100644 index 000000000..772357e64 --- /dev/null +++ b/tests/benchmarks/imfs-grate.fs_read.grate @@ -0,0 +1 @@ +// DESCRIPTION: In-Memory File System grate interposing on FS calls. diff --git a/tests/benchmarks/imfs-grate.fs_write.grate b/tests/benchmarks/imfs-grate.fs_write.grate new file mode 100644 index 000000000..772357e64 --- /dev/null +++ b/tests/benchmarks/imfs-grate.fs_write.grate @@ -0,0 +1 @@ +// DESCRIPTION: In-Memory File System grate interposing on FS calls. diff --git a/tests/benchmarks/ipc_pipe.c b/tests/benchmarks/ipc_pipe.c new file mode 100644 index 000000000..2331e3a42 --- /dev/null +++ b/tests/benchmarks/ipc_pipe.c @@ -0,0 +1,93 @@ +// DESCRIPTION: Anonymous pipe RTT for PARAM bytes via parent/child ping-pong. +#include "bench.h" +#include +#include +#include +#include +#include +#include +#include + +void bench_pipe(int msg_size) { + int p2c[2], c2p[2]; + + int loops = IO_LOOP_COUNT(msg_size); + + if (pipe(p2c) || pipe(c2p)) { + perror("pipe"); + exit(1); + } + + pid_t pid = fork(); + if (pid < 0) { + perror("fork"); + exit(1); + } + + // Child + if (pid == 0) { + close(p2c[1]); + close(c2p[0]); + + char *buf = malloc(msg_size); + if (buf == NULL) { + exit(0); + } + for (int i = 0; i < loops; i++) { + ssize_t off = 0; + while (off < msg_size) { + ssize_t n = read(p2c[0], buf, msg_size); + off += n; + } + off = 0; + while (off < msg_size) { + ssize_t n = write(c2p[1], buf, msg_size); + off += n; + } + } + + free(buf); + + close(p2c[0]); + close(c2p[0]); + _exit(0); + } + + // Parent + close(p2c[0]); + close(c2p[1]); + char *buf = malloc(msg_size); + if (buf == NULL) { + exit(0); + } + memset(buf, 0x42, msg_size); + + long long t0 = gettimens(); + for (int i = 0; i < loops; i++) { + ssize_t off = 0; + while (off < msg_size) { + ssize_t n = write(p2c[1], buf, msg_size); + off += n; + } + off = 0; + while (off < msg_size) { + ssize_t n = read(c2p[0], buf, msg_size); + off += n; + } + } + long long t1 = gettimens(); + + free(buf); + + close(p2c[1]); + close(c2p[0]); + wait(NULL); + + emit_result("Pipe (RTT)", msg_size, (t1 - t0) / loops, loops); +} + +int main() { + for (int i = 0; i < IPC_SIZE_COUNT; i++) { + bench_pipe(ipc_sizes[i]); + } +} diff --git a/tests/benchmarks/ipc_uds.c b/tests/benchmarks/ipc_uds.c new file mode 100644 index 000000000..b0ba88288 --- /dev/null +++ b/tests/benchmarks/ipc_uds.c @@ -0,0 +1,134 @@ +// DESCRIPTION: (AF_UNIX, STREAM|DGRAM) RTT through ping-pong of PARAM bytes. +#include "bench.h" +#include +#include +#include +#include +#include +#include +#include +#include + +void uds_dgram(int msg_size) { + int sv[2]; + if (socketpair(AF_UNIX, SOCK_DGRAM, 0, sv)) { + perror("socketpair"); + exit(1); + } + + int loops = IO_LOOP_COUNT(msg_size); + + pid_t pid = fork(); + + if (pid < 0) { + perror("fork"); + exit(1); + } + + // Child + if (pid == 0) { + close(sv[0]); + char *buf = malloc(msg_size); + if (buf == NULL) { + exit(1); + } + for (int i = 0; i < loops; i++) { + ssize_t n = recv(sv[1], buf, msg_size, 0); + if (n <= 0) { + exit(1); + } + send(sv[1], buf, n, 0); + } + close(sv[1]); + exit(0); + } + + // Parent + close(sv[1]); + char *buf = malloc(msg_size); + if (buf == NULL) { + exit(1); + } + memset(buf, 0x42, msg_size); + + long long start = gettimens(); + for (int i = 0; i < loops; i++) { + send(sv[0], buf, msg_size, 0); + recv(sv[0], buf, msg_size, 0); + } + long long end = gettimens(); + + free(buf); + + char param[30] = ""; + sprintf(param, "(DGRAM, %d)", msg_size); + emit_result_string("Unix Domain Socket (RTT)", param, + (end - start) / loops, loops); +} + +void uds_stream(int msg_size) { + int sv[2]; + if (socketpair(AF_UNIX, SOCK_STREAM, 0, sv)) { + perror("socketpair"); + exit(1); + } + + int loops = IO_LOOP_COUNT(msg_size); + pid_t pid = fork(); + + if (pid < 0) { + perror("fork"); + exit(1); + } + + // Child + if (pid == 0) { + close(sv[0]); + char *buf = malloc(msg_size); + if (buf == NULL) { + exit(1); + } + for (int i = 0; i < loops; i++) { + ssize_t n = recv(sv[1], buf, msg_size, 0); + if (n <= 0) { + exit(1); + } + send(sv[1], buf, n, 0); + } + close(sv[1]); + free(buf); + exit(0); + } + + // Parent + close(sv[1]); + char *buf = malloc(msg_size); + if (buf == NULL) { + exit(0); + } + memset(buf, 0x42, msg_size); + + long long start = gettimens(); + for (int i = 0; i < loops; i++) { + send(sv[0], buf, msg_size, 0); + recv(sv[0], buf, msg_size, 0); + } + long long end = gettimens(); + + free(buf); + + char param[30] = ""; + sprintf(param, "(STREAM, %d)", msg_size); + emit_result_string("Unix Domain Socket (RTT)", param, + (end - start) / loops, loops); +} + +int main() { + for (int i = 0; i < IPC_SIZE_COUNT; i++) { + uds_stream(ipc_sizes[i]); + } + + for (int i = 0; i < IPC_SIZE_COUNT; i++) { + uds_dgram(ipc_sizes[i]); + } +} diff --git a/tests/benchmarks/perf_fibonacci.c b/tests/benchmarks/perf_fibonacci.c new file mode 100644 index 000000000..a713c60b9 --- /dev/null +++ b/tests/benchmarks/perf_fibonacci.c @@ -0,0 +1,33 @@ +// DESCRIPTION: Compute Fibonacci(1000) to measure compute overhead. +#include "bench.h" + +#define LOOP_COUNT 10000 +#define FIB_INPUT 1000 + +unsigned long long fibonacci(int n) { + if (n <= 1) + return n; + volatile unsigned long long a = 0, b = 1, c; + for (int i = 2; i <= n; i++) { + c = a + b; + a = b; + b = c; + } + return b; +} + +int main() { + volatile unsigned long long sum = 0; + + long long start_time = gettimens(); + for (int i = 0; i < LOOP_COUNT; i++) { + sum += fibonacci(FIB_INPUT); + } + long long end_time = gettimens(); + + long long avg_time = (end_time - start_time) / LOOP_COUNT; + + emit_result("Fibonacci", 0, avg_time, LOOP_COUNT); + + return 0; +} diff --git a/tests/benchmarks/sys_close.c b/tests/benchmarks/sys_close.c new file mode 100644 index 000000000..3a171accf --- /dev/null +++ b/tests/benchmarks/sys_close.c @@ -0,0 +1,14 @@ +// DESCRIPTION: Evaluate no-op syscall latency using close(-1). +#include "bench.h" +#include + +int main() { + long long start_time = gettimens(); + for (int i = 0; i < LOOPS_LARGE; i++) { + close(-1); + } + long long end_time = gettimens(); + long long average_time = (end_time - start_time) / LOOPS_LARGE; + + emit_result("close", -1, average_time, LOOPS_LARGE); +} diff --git a/tests/benchmarks/sys_geteuid.c b/tests/benchmarks/sys_geteuid.c new file mode 100644 index 000000000..9eda5b7a4 --- /dev/null +++ b/tests/benchmarks/sys_geteuid.c @@ -0,0 +1,18 @@ +// DESCRIPTION: Evaluate kernel-resolved syscall latency using geteuid(). +#include "bench.h" +#include +#include +#include + +int main(int argc, char *argv[]) { + int ret; + + long long start = gettimens(); + for (int i = 0; i < LOOPS_LARGE; i++) { + ret = geteuid(); + } + long long end = gettimens(); + long long avg = (end - start) / LOOPS_LARGE; + + emit_result_string("geteuid", "-", avg, LOOPS_LARGE); +}