From 76228ce4ae92b4d50977c13f4fd384da5b67cb5a Mon Sep 17 00:00:00 2001 From: Chris Golden <551285+cirego@users.noreply.github.com> Date: Fri, 15 Jan 2021 16:32:53 -0800 Subject: [PATCH] Rework chbench snapshot generation (#5330) This PR introduces a number of changes around how we generate snapshots from chbench, for use with the ingest tests. First, our snapshot generation code was never 100% complete, as it never wrote out the source_offsets (I had created it by hand and it was part of the S3 archive). Snapshot_view_states now captures the offsets for each source. Second, offsets.json hardcoded the view_name to topic name. Instead of duplicating this information, add code to Peeker to archive the configuration used to generate the snapshot. Included in this configuration file are the sources that comprise each view (currently expected to be a 1 to 1 mapping for our ingest benchmarks). Third, the TOML config file for Peeker is interpreted by Peeker (it substitutes environment variables) and cannot be directly loaded by other programming languages. To fix this, I added a method to Peeker to write out the computed version of the configuration (and loaded it into TOML to validate the config). Fourth, the TOML config file for Peeker could not be serialized due to sleep_ms being a Duration field and not being the last field in the struct. I moved this field to the end of each struct where mentioned, but this created a second problem. Our deserializer converted a single string into a Duration object, which is serialized as a table, thereby breaking symmetry in serialize / deserialize. I removed the custom deserialization so that we can load the serialized file using the same deserialization logic. I think we can write a SQL query / some recursive logic to programmatically get the source names for each view, but this current PR works for saving snapshots and archiving the Peeker config file. --- demo/chbench/mzcompose.yml | 10 +- misc/mzutil/requirements.txt | 1 + misc/mzutil/scripts/snapshot_view_states.py | 31 +++++ misc/mzutil/scripts/wait_for_view_states.py | 67 ++++++++-- src/peeker/config.toml | 51 +++++++- src/peeker/src/args.rs | 132 +++++++++++--------- src/peeker/src/main.rs | 5 + 7 files changed, 222 insertions(+), 75 deletions(-) diff --git a/demo/chbench/mzcompose.yml b/demo/chbench/mzcompose.yml index 5c0222fd09d0c..8e63b735f922f 100644 --- a/demo/chbench/mzcompose.yml +++ b/demo/chbench/mzcompose.yml @@ -264,6 +264,9 @@ services: command: ${PEEKER_CMD:---queries q01,q02,q17} volumes: - ./peeker-config:/etc/peeker + - type: bind + source: ${MZ_CHBENCH_SNAPSHOT:-/tmp} + target: /snapshot test-correctness: # NOTE: we really don't want to include depends_on, it causes dependencies to be restarted mzbuild: test-correctness @@ -380,10 +383,6 @@ mzworkflows: - step: run service: mzutil command: snapshot_view_states.py - # Output summary information about the topics - - step: run - service: kafka-util - command: summarize_topics.py # Run a workflow to measure the ingest performance of Materialize. Assumes that the you have # already called setup-ingest-benchmark and the cluster is still running @@ -401,6 +400,9 @@ mzworkflows: # Take a snapshot of topic schemas and contents generate-snapshot: steps: + - step: run + service: peeker + command: --write-config /snapshot/config.toml - step: workflow workflow: setup-ingest-benchmark - step: run diff --git a/misc/mzutil/requirements.txt b/misc/mzutil/requirements.txt index 3557d79c82e71..abe103d8708a6 100644 --- a/misc/mzutil/requirements.txt +++ b/misc/mzutil/requirements.txt @@ -1 +1,2 @@ psycopg2-binary==2.8.6 +toml==0.10.2 diff --git a/misc/mzutil/scripts/snapshot_view_states.py b/misc/mzutil/scripts/snapshot_view_states.py index 1c4e3afebcbfa..968ea064da868 100755 --- a/misc/mzutil/scripts/snapshot_view_states.py +++ b/misc/mzutil/scripts/snapshot_view_states.py @@ -18,6 +18,7 @@ import argparse import os +import sys import typing import psycopg2 # type: ignore @@ -34,6 +35,16 @@ def view_names( yield row[0] +def source_names( + conn: psycopg2.extensions.connection, +) -> typing.Generator[str, None, None]: + """Return a generator containing all sources in Materialize.""" + with conn.cursor() as cursor: + cursor.execute("SELECT source_name FROM mz_source_info") + for row in cursor: + yield row[0] + + def snapshot_materialize_views(args: argparse.Namespace) -> None: """Record the current table status of all views installed in Materialize.""" @@ -46,6 +57,25 @@ def snapshot_materialize_views(args: argparse.Namespace) -> None: cursor.copy_expert(query, outfile) +def snapshot_source_offsets(args: argparse.Namespace) -> None: + """Record the current topic offset of all sources installed in Materialize.""" + + with psycopg2.connect(f"postgresql://{args.host}:{args.port}/materialize") as conn: + for source in source_names(conn): + with conn.cursor() as cursor: + query = "SELECT mz_source_info.offset as offset FROM mz_source_info WHERE source_name = %s" + cursor.execute(query, (source,)) + + if cursor.rowcount != 1: + print(f"ERROR: Expected one row for {source}: {cursor.fetchall()}") + sys.exit(1) + + viewfile = os.path.join(args.snapshot_dir, f"{source}.offset") + with open(viewfile, "w") as outfile: + offset = cursor.fetchone()[0] + outfile.write(f"{offset}") + + def main() -> None: """Parse arguments and snapshot materialized views.""" @@ -67,6 +97,7 @@ def main() -> None: args = parser.parse_args() snapshot_materialize_views(args) + snapshot_source_offsets(args) if __name__ == "__main__": diff --git a/misc/mzutil/scripts/wait_for_view_states.py b/misc/mzutil/scripts/wait_for_view_states.py index 0000d0ab2ce17..b4c8772b84684 100755 --- a/misc/mzutil/scripts/wait_for_view_states.py +++ b/misc/mzutil/scripts/wait_for_view_states.py @@ -24,6 +24,7 @@ import sys import pathlib import time +import toml import typing import psycopg2 # type: ignore @@ -57,15 +58,23 @@ def view_contents(cursor: psycopg2.extensions.cursor, view: str, timestamp: int) return stream.getvalue().strip() +class SourceInfo: + """Container class containing information about a source.""" + + def __init__(self, topic_name: str, offset: int): + self.topic_name = topic_name + self.offset = offset + + def source_at_offset( - cursor: psycopg2.extensions.cursor, source_name: str, desired_offset: int + cursor: psycopg2.extensions.cursor, source_info: SourceInfo ) -> typing.Union[None, int]: """Return the mz timestamp from a source if it has reached the desired offset.""" query = ( 'SELECT timestamp FROM mz_source_info WHERE source_name = %s and "offset" = %s' ) try: - cursor.execute(query, (source_name, desired_offset)) + cursor.execute(query, (source_info.topic_name, source_info.offset)) if cursor.rowcount > 1: print("ERROR: More than one row returned when querying source offsets:") for row in cursor: @@ -86,14 +95,58 @@ def wait_for_materialize_views(args: argparse.Namespace) -> None: start_time = time.monotonic() # Create a dictionary mapping view names (as calculated from the filename) to expected contents - view_snapshots = { + view_snapshots: typing.Dict[str, str] = { p.stem: p.read_text().strip() for p in pathlib.Path(args.snapshot_dir).glob("*.sql") } + source_offsets: typing.Dict[str, int] = { + p.stem: int(p.read_text().strip()) + for p in pathlib.Path(args.snapshot_dir).glob("*.offset") + } + # Create a dictionary mapping view names to source name and offset - with open(os.path.join(args.snapshot_dir, "offsets.json")) as fd: - source_offsets = json.load(fd) + view_sources: typing.Dict[str, SourceInfo] = {} + with open(os.path.join(args.snapshot_dir, "config.toml")) as fd: + conf = toml.load(fd) + + if len(conf["sources"]) != 1: + print(f"ERROR: Expected just one source block: {conf['sources']}") + sys.exit(1) + + source_info = conf["sources"][0] + topic_prefix: str = source_info["topic_namespace"] + source_names: typing.List[str] = source_info["names"] + + for query_info in conf["queries"]: + + # Ignore views not in this snapshot (they likely have multiple sources...) + view: str = query_info["name"] + if view not in view_snapshots: + continue + + sources: typing.List[str] = query_info["sources"] + if len(query_info["sources"]) != 1: + print( + f"ERROR: Expected just one source for view {view}: {query_info['sources']}" + ) + sys.exit(1) + + source_name: str = query_info["sources"][0] + if source_name not in source_name: + print( + f"ERROR: No matching source {source_name} for view {view}: {source_names}" + ) + sys.exit(1) + + topic_name = f"{topic_prefix}{source_name}" + if topic_name not in source_offsets: + print( + f"ERROR: Missing offset information for source {topic_name}: {source_offsets}" + ) + sys.exit(1) + + view_sources[view] = SourceInfo(topic_name, source_offsets[topic_name]) with psycopg2.connect(f"postgresql://{args.host}:{args.port}/materialize") as conn: installed_views = set(view_names(conn)) @@ -118,9 +171,7 @@ def wait_for_materialize_views(args: argparse.Namespace) -> None: # Determine if the source is at the desired offset and identify the # mz_logical_timestamp associated with the offset - desired_offset = source_offsets[view]["offset"] - source_name = source_offsets[view]["topic"] - timestamp = source_at_offset(cursor, source_name, desired_offset) + timestamp = source_at_offset(cursor, view_sources[view]) if not timestamp: continue diff --git a/src/peeker/config.toml b/src/peeker/config.toml index d46b2d397b4f0..ada0a3d617bb3 100644 --- a/src/peeker/config.toml +++ b/src/peeker/config.toml @@ -34,12 +34,14 @@ names = [ ] [default_query] -# the amount of time to sleep between peeks -sleep_ms = 0 # Spin up this many threads querying this view # This affects the ratio of times this query is executed vs other queries thread_count = 1 +[default_query.sleep] +secs = 0 +nanos = 0 + [[groups]] # group settings override per-query enabled, enable/disable groups with the `-q` flag, # same as queries @@ -51,7 +53,10 @@ queries = ["q01", "q03", "q17", "q22"] # default values for groups: thread_count = 1 enabled = true -sleep_ms = 0 + +[groups.sleep] +secs = 0 +nanos = 0 [[groups]] name = "loadtest" @@ -85,10 +90,10 @@ WHERE ol_delivery_d > TIMESTAMP '2007-01-02 00:00:00.000000' GROUP BY ol_number ORDER BY ol_number """ +sources = ["orderline"] [[queries]] -sleep_ms = 1000 name = "q02" query = """ SELECT su_suppkey, su_name, n_name, i_id, i_name, su_address, su_phone, su_comment @@ -115,6 +120,11 @@ WHERE i_id = s_i_id AND s_quantity = m_s_quantity ORDER BY n_name, su_name, i_id """ +sources = ["item", "nation", "region", "stock", "supplier"] + +[queries.sleep] +secs = 1 +nanos = 0 [[queries]] name = "q03" @@ -135,6 +145,7 @@ WHERE c_state LIKE 'A%' GROUP BY ol_o_id, ol_w_id, ol_d_id, o_entry_d ORDER BY revenue DESC, o_entry_d """ +sources = ["customer", "neworder", "order", "orderline"] [[queries]] name = "q04" @@ -154,6 +165,7 @@ WHERE o_entry_d >= TIMESTAMP '2007-01-02 00:00:00.000000' GROUP BY o_ol_cnt ORDER BY o_ol_cnt """ +sources = ["order", "orderline"] [[queries]] name = "q05" @@ -179,6 +191,8 @@ WHERE c_id = o_c_id GROUP BY n_name ORDER BY revenue DESC; """ +sources = ["customer", "nation", "order", "orderline", "region", "stock", "supplier"] + [[queries]] name = "q06" query = """ @@ -188,6 +202,7 @@ WHERE ol_delivery_d >= TIMESTAMP '1999-01-01 00:00:00.000000' AND ol_delivery_d < TIMESTAMP '2020-01-01 00:00:00.000000' AND ol_quantity BETWEEN 1 AND 100000; """ +sources = ["orderline"] [[queries]] name = "q07" @@ -225,6 +240,7 @@ WHERE ol_supply_w_id = s_w_id GROUP BY su_nationkey, substr(c_state, 1, 1), extract(year FROM o_entry_d) ORDER BY su_nationkey, cust_nation, l_year; """ +sources = ["customer", "nation", "order", "orderline", "stock", "supplier"] [[queries]] name = "q08" @@ -254,6 +270,7 @@ WHERE i_id = s_i_id GROUP BY extract(year FROM o_entry_d) ORDER BY l_year; """ +sources = ["customer", "item", "nation", "order", "orderline", "region", "stock", "supplier"] [[queries]] name = "q09" @@ -274,6 +291,7 @@ WHERE ol_i_id = s_i_id GROUP BY n_name, extract(year FROM o_entry_d) ORDER BY n_name, l_year DESC; """ +sources = ["item", "nation", "order", "orderline", "stock", "supplier"] [[queries]] name = "q10" @@ -293,6 +311,7 @@ WHERE c_id = o_c_id GROUP BY c_id, c_last, c_city, c_phone, n_name ORDER BY revenue DESC; """ +sources = ["customer", "nation", "order", "orderline"] [[queries]] name = "q11" @@ -312,6 +331,7 @@ HAVING sum(s_order_cnt) > ( ) ORDER BY ordercount DESC; """ +sources = ["nation", "stock", "supplier"] [[queries]] name = "q12" @@ -330,6 +350,7 @@ WHERE ol_w_id = o_w_id GROUP BY o_ol_cnt ORDER BY o_ol_cnt; """ +sources = ["order", "orderline"] [[queries]] name = "q13" @@ -347,6 +368,7 @@ FROM ( GROUP BY c_count ORDER BY custdist DESC, c_count DESC; """ +sources = ["customer", "order"] [[queries]] name = "q14" @@ -358,6 +380,7 @@ WHERE ol_i_id = i_id AND ol_delivery_d >= TIMESTAMP '2007-01-02 00:00:00.000000' AND ol_delivery_d < TIMESTAMP '2020-01-02 00:00:00.000000' """ +sources = ["item", "orderline"] [[queries]] name = "q15" @@ -391,6 +414,7 @@ WHERE su_suppkey = supplier_no ) ORDER BY su_suppkey; """ +sources = ["orderline", "stock", "supplier"] [[queries]] name = "q16" @@ -409,6 +433,7 @@ WHERE i_id = s_i_id GROUP BY i_name, substr(i_data, 1, 3), i_price ORDER BY supplier_cnt DESC; """ +sources = ["item", "stock", "supplier"] [[queries]] name = "q17" @@ -426,6 +451,7 @@ FROM WHERE ol_i_id = t.i_id AND ol_quantity < t.a; """ +sources = ["item", "orderline"] [[queries]] name = "q18" @@ -442,6 +468,7 @@ GROUP BY o_id, o_w_id, o_d_id, c_id, c_last, o_entry_d, o_ol_cnt HAVING sum(ol_amount) > 200 ORDER BY sum(ol_amount); """ +sources = ["customer", "order", "orderline"] [[queries]] name = "q19" @@ -471,6 +498,7 @@ WHERE ( AND ol_w_id in (1, 5, 3) ) """ +sources = ["item", "orderline"] [[queries]] name = "q20" @@ -490,6 +518,7 @@ WHERE su_suppkey IN ( AND n_name = 'GERMANY' ORDER BY su_name; """ +sources = ["item", "nation", "orderline", "stock", "supplier"] [[queries]] name = "q21" @@ -518,6 +547,7 @@ WHERE ol_o_id = o_id GROUP BY su_name ORDER BY numwait DESC, su_name; """ +sources = ["nation", "order", "orderline", "stock", "supplier"] [[queries]] name = "q22" @@ -542,6 +572,7 @@ WHERE substr(c_phone, 1, 1) IN ('1', '2', '3', '4', '5', '6', '7') GROUP BY substr(c_state, 1, 1) ORDER BY substr(c_state, 1, 1); """ +sources = ["customer", "order"] [[queries]] name = "count_customer" @@ -550,6 +581,7 @@ SELECT count(*) AS num_customers FROM customer; """ +sources = ["customer"] [[queries]] name = "count_item" @@ -558,6 +590,7 @@ SELECT count(*) AS num_items FROM item; """ +sources = ["item"] [[queries]] name = "count_nation" @@ -566,6 +599,7 @@ SELECT count(*) AS num_nations FROM nation; """ +sources = ["nation"] [[queries]] name = "count_neworder" @@ -574,6 +608,7 @@ SELECT count(*) AS num_neworders FROM neworder; """ +sources = ["neworder"] [[queries]] name = "count_order" @@ -582,6 +617,7 @@ SELECT count(*) AS num_orders FROM order; """ +sources = ["order"] [[queries]] name = "count_orderline" @@ -590,6 +626,7 @@ SELECT count(*) AS num_orderlines FROM orderline; """ +sources = ["orderline"] [[queries]] name = "count_stock" @@ -598,6 +635,7 @@ SELECT count(*) AS num_stocks FROM stock; """ +sources = ["stock"] [[queries]] name = "count_supplier" @@ -606,6 +644,7 @@ SELECT count(*) AS num_suppliers FROM supplier; """ +sources = ["supplier"] [[queries]] name = "count_history" @@ -614,6 +653,7 @@ SELECT count(*) AS num_historys FROM history; """ +sources = ["history"] [[queries]] name = "count_district" @@ -622,6 +662,7 @@ SELECT count(*) AS num_districts FROM district; """ +sources = ["district"] [[queries]] name = "count_region" @@ -630,6 +671,7 @@ SELECT count(*) AS num_regions FROM region; """ +sources = ["region"] [[queries]] name = "count_warehouse" @@ -638,3 +680,4 @@ SELECT count(*) AS num_warehouses FROM warehouse; """ +sources = ["warehouse"] diff --git a/src/peeker/src/args.rs b/src/peeker/src/args.rs index 712134dd21f6e..a9a1037b0bcc4 100644 --- a/src/peeker/src/args.rs +++ b/src/peeker/src/args.rs @@ -12,13 +12,13 @@ use std::collections::{HashMap, HashSet}; use std::convert::TryFrom; use std::env; -use std::result::Result as StdResult; +use std::fs; use std::time::Duration; use lazy_static::lazy_static; use log::{debug, info}; use regex::Regex; -use serde::{Deserialize, Deserializer}; +use serde::{Deserialize, Serialize}; use structopt::StructOpt; use crate::{Error, Result}; @@ -61,33 +61,13 @@ pub struct Args { /// A value of 0 never shuts down. #[structopt(long, default_value = "0")] pub run_seconds: u32, + /// Write out the parsed contents of the config file + #[structopt(long)] + pub write_config: Option, } pub fn load_config(config_path: Option<&str>, cli_queries: Option<&str>) -> Result { - // load and parse th toml - let config_file = config_path - .map(std::fs::read_to_string) - .unwrap_or_else(|| Ok(DEFAULT_CONFIG.to_string())); - let conf = match &config_file { - Ok(contents) => { - let contents = substitute_config_env_vars(contents); - toml::from_str::(&contents).map_err(|e| { - format!( - "Unable to parse config file {}: {}", - config_path.as_deref().unwrap_or("DEFAULT"), - e - ) - })? - } - Err(e) => { - eprintln!( - "unable to read config file {:?}: {}", - config_path.as_deref().unwrap_or("DEFAULT"), - e - ); - std::process::exit(1); - } - }; + let conf = load_raw_config(config_path); // Get everything into the normalized QueryGroup representation let mut config = Config::try_from(conf)?; @@ -119,6 +99,35 @@ pub fn load_config(config_path: Option<&str>, cli_queries: Option<&str>) -> Resu Ok(config) } +fn load_raw_config(config_path: Option<&str>) -> RawConfig { + // load and parse th toml + let config_file = config_path + .map(std::fs::read_to_string) + .unwrap_or_else(|| Ok(DEFAULT_CONFIG.to_string())); + let config = match &config_file { + Ok(contents) => substitute_config_env_vars(contents), + Err(e) => { + eprintln!( + "unable to read config file {:?}: {}", + config_path.as_deref().unwrap_or("DEFAULT"), + e + ); + std::process::exit(1); + } + }; + match toml::from_str::(&config) { + Ok(config) => config, + Err(e) => { + eprintln!( + "Unable to parse config file {}: {}", + config_path.as_deref().unwrap_or("DEFAULT"), + e + ); + std::process::exit(1); + } + } +} + pub fn print_config_supplied(config: Config) { println!("named queries:"); let mut groups = config.groups.iter().collect::>(); @@ -141,6 +150,27 @@ pub fn print_config_supplied(config: Config) { } } +pub fn write_config_supplied(config_path: Option<&str>, outfile: &str) { + let config_contents = toml::to_string(&load_raw_config(config_path)); + match &config_contents { + Ok(contents) => match fs::write(outfile, contents) { + Ok(()) => {} + Err(e) => { + eprintln!("unable to write config file {:?}: {}", outfile, e); + std::process::exit(1); + } + }, + Err(e) => { + eprintln!( + "unable to generate config file {:?}: {}", + config_path.as_deref().unwrap_or("DEFAULT"), + e + ); + std::process::exit(1); + } + }; +} + /// A query configuration /// /// This is a normalized version of [`RawConfig`], which is what is actually parsed @@ -233,7 +263,7 @@ impl QueryGroup { fn from_raw_query(q: RawQuery, default: &DefaultQuery) -> QueryGroup { QueryGroup { name: q.name.clone(), - sleep: q.sleep_ms.unwrap_or(default.sleep_ms), + sleep: q.sleep.unwrap_or(default.sleep), thread_count: q.thread_count.unwrap_or(default.thread_count), queries: vec![Query { name: q.name, @@ -247,7 +277,7 @@ impl QueryGroup { let g_name = g.name.clone(); Ok(QueryGroup { name: g_name.clone(), - sleep: g.sleep_ms, + sleep: g.sleep, thread_count: g.thread_count, queries: g .queries @@ -271,21 +301,21 @@ pub struct Query { pub query: String, } -#[derive(Clone, Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize, Serialize)] pub struct Source { pub schema_registry: String, pub kafka_broker: String, pub topic_namespace: String, - pub names: Vec, /// If true, `create MATERIALIZED source` #[serde(default)] pub materialized: bool, + pub names: Vec, } // inner parsing helpers /// The raw config file, it is parsed and then defaults are supplied, resulting in [`Config`] -#[derive(Debug, Deserialize)] +#[derive(Debug, Deserialize, Serialize)] struct RawConfig { /// Default to be filled in for other queries default_query: DefaultQuery, @@ -297,42 +327,42 @@ struct RawConfig { sources: Vec, } -#[derive(Clone, Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize, Serialize)] struct DefaultQuery { - #[serde(deserialize_with = "deser_duration_ms")] - sleep_ms: Duration, thread_count: u32, /// Groups share their connection and only one query happens at a time #[serde(default)] group: Option, + sleep: Duration, } /// An explicitly created, named group -#[derive(Clone, Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize, Serialize)] struct GroupConfig { name: String, - /// The names of the queries that belong in this group, must be specified separately - /// in the config file - queries: Vec, #[serde(default = "one")] thread_count: u32, - #[serde(default, deserialize_with = "deser_duration_ms")] - sleep_ms: Duration, /// Whether to enabled this group. Overrides enabled in queries #[serde(default = "btrue")] enabled: bool, + /// The names of the queries that belong in this group, must be specified separately + /// in the config file + queries: Vec, + #[serde(default)] + sleep: Duration, } -#[derive(Clone, Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize, Serialize)] struct RawQuery { name: String, query: String, #[serde(default = "btrue")] enabled: bool, - #[serde(default, deserialize_with = "deser_duration_ms_opt")] - sleep_ms: Option, #[serde(default)] thread_count: Option, + sources: Vec, + #[serde(default)] + sleep: Option, } /// helper for serde default @@ -344,22 +374,6 @@ fn one() -> u32 { 1 } -fn deser_duration_ms<'de, D>(deser: D) -> StdResult -where - D: Deserializer<'de>, -{ - let d = Duration::from_millis(Deserialize::deserialize(deser)?); - Ok(d) -} - -fn deser_duration_ms_opt<'de, D>(deser: D) -> StdResult, D::Error> -where - D: Deserializer<'de>, -{ - let d = Duration::from_millis(Deserialize::deserialize(deser)?); - Ok(Some(d)) -} - lazy_static! { static ref BASHLIKE_ENV_VAR_PATTERN: Regex = Regex::new(r"(?P\$\{(?P[^:}]+)(:-(?P[^}]+))?\})").unwrap(); diff --git a/src/peeker/src/main.rs b/src/peeker/src/main.rs index 0a01827b83eb7..5c11f05a2c113 100644 --- a/src/peeker/src/main.rs +++ b/src/peeker/src/main.rs @@ -46,6 +46,11 @@ fn main() -> Result<()> { let args: Args = ore::cli::parse_args(); + if let Some(write_config) = args.write_config { + args::write_config_supplied(args.config_file.as_deref(), &write_config); + return Ok(()); + } + let config = args::load_config(args.config_file.as_deref(), args.queries.as_deref())?; if args.help_config {