diff --git a/.gitignore b/.gitignore index 636e1015..f2ea7452 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,10 @@ debug/ target/ -/doc/manual/build/ + +# Currently generated man pages are checked in. +#/doc/manual/build/ +/doc/manual/build/doctrees +/doc/manual/build/man/_static # These are backup files generated by rustfmt **/*.rs.bk diff --git a/Cargo.lock b/Cargo.lock index f1dbeef3..6e5a0f2b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.24.2" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +checksum = "1b5d307320b3181d6d7954e663bd7c774a838b8220fe0593c86d9fb09f498b4b" dependencies = [ "gimli", ] @@ -32,12 +32,6 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - [[package]] name = "android_system_properties" version = "0.1.5" @@ -49,9 +43,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.20" +version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ae563653d1938f79b1ab1b5e668c87c76a9930414574a6583a7b7e11a8e6192" +checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" dependencies = [ "anstyle", "anstyle-parse", @@ -64,9 +58,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" +checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" [[package]] name = "anstyle-parse" @@ -122,9 +116,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "backtrace" -version = "0.3.75" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" +checksum = "bb531853791a215d7c62a30daf0dde835f381ab5de4589cfe7c649d2cbe92bd6" dependencies = [ "addr2line", "cfg-if", @@ -132,14 +126,30 @@ dependencies = [ "miniz_oxide", "object", "rustc-demangle", - "windows-targets 0.52.6", + "windows-link", +] + +[[package]] +name = "base64" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "bcder" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f7c42c9913f68cf9390a225e81ad56a5c515347287eb98baa710090ca1de86d" +dependencies = [ + "bytes", + "smallvec", ] [[package]] name = "bitflags" -version = "2.9.3" +version = "2.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34efbcccd345379ca2868b2b2c9d3782e9cc58ba87bc7d79d5b53d9c9ae6f25d" +checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" [[package]] name = "bumpalo" @@ -155,10 +165,11 @@ checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "cc" -version = "1.2.34" +version = "1.2.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42bc4aea80032b7bf409b0bc7ccad88853858911b7713a8062fdc0623867bedc" +checksum = "e1d05d92f4b1fd76aad469d46cdd858ca761576082cd37df81416691e50199fb" dependencies = [ + "find-msvc-tools", "shlex", ] @@ -170,11 +181,10 @@ checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" [[package]] name = "chrono" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ - "android-tzdata", "iana-time-zone", "js-sys", "num-traits", @@ -184,9 +194,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.46" +version = "4.5.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c5e4fcf9c21d2e544ca1ee9d8552de13019a42aa7dbf32747fa7aaf1df76e57" +checksum = "e2134bb3ea021b78629caa971416385309e0131b351b25e01dc16fb54e1b5fae" dependencies = [ "clap_builder", "clap_derive", @@ -194,9 +204,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.46" +version = "4.5.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fecb53a0e6fcfb055f686001bc2e2592fa527efaf38dbe81a6a9563562e57d41" +checksum = "c2ba64afa3c0a6df7fa517765e31314e983f51dda798ffba27b988194fb65dc9" dependencies = [ "anstream", "anstyle", @@ -207,14 +217,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.45" +version = "4.5.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14cb31bb0a7d536caef2639baa7fad459e15c3144efefa6dbd1c84562c4739f6" +checksum = "bbfd7eae0b0f1a6e63d4b13c9c478de77c2eb546fba158ad50b4203dc24b9f9c" dependencies = [ "heck", "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] [[package]] @@ -300,9 +310,9 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "deranged" -version = "0.4.0" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +checksum = "a41953f86f8a05768a6cda24def994fd2f424b04ec5c719cf89989779f199071" dependencies = [ "powerfmt", ] @@ -313,50 +323,69 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "dnst" -version = "0.1.1-dev" +version = "0.1.0-alpha" dependencies = [ "bytes", "chrono", "clap", "const_format", "domain", + "futures", + "indenter", "jiff", "lazy_static", "lexopt", "octseq", "openssl", "pretty_assertions", + "rand 0.9.2", "rayon", "regex", "ring", + "serde", + "serde_json", "tempfile", "test_bin", "tokio", "tracing", "tracing-subscriber", + "url", ] [[package]] name = "domain" version = "0.11.1-dev" -source = "git+https://github.com/NLnetLabs/domain.git?rev=17fb0e38120c9939ca28462af082d88ae8bc8b1d#17fb0e38120c9939ca28462af082d88ae8bc8b1d" +source = "git+https://github.com/NLnetLabs/domain.git?branch=patches-for-nameshed-prototype#1753a62e1e4290181153d401ab2bcc14843bccbd" dependencies = [ "arc-swap", + "bcder", "bumpalo", "bytes", "chrono", "domain-macros", "futures-util", "hashbrown", + "kmip-protocol", "libc", "log", "moka", "octseq", "openssl", "parking_lot", - "rand", + "rand 0.8.5", "ring", "rustversion", "secrecy", @@ -368,16 +397,18 @@ dependencies = [ "tokio-stream", "tracing", "tracing-subscriber", + "url", + "uuid", ] [[package]] name = "domain-macros" version = "0.11.1-dev" -source = "git+https://github.com/NLnetLabs/domain.git?rev=17fb0e38120c9939ca28462af082d88ae8bc8b1d#17fb0e38120c9939ca28462af082d88ae8bc8b1d" +source = "git+https://github.com/NLnetLabs/domain.git?branch=patches-for-nameshed-prototype#1753a62e1e4290181153d401ab2bcc14843bccbd" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] [[package]] @@ -386,14 +417,42 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" +[[package]] +name = "enum-display-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f16ef37b2a9b242295d61a154ee91ae884afff6b8b933b486b12481cc58310ca" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "enum-flags" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3682d2328e61f5529088a02cd20bb0a9aeaeeeb2f26597436dd7d75d1340f8f5" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + [[package]] name = "errno" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.60.2", + "windows-sys 0.61.1", ] [[package]] @@ -423,6 +482,12 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +[[package]] +name = "find-msvc-tools" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0399f9d26e5191ce32c498bebd31e7a3ceabc2745f0ac54af3f335126c3f24b3" + [[package]] name = "foreign-types" version = "0.3.2" @@ -438,12 +503,63 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + [[package]] name = "futures-core" version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + [[package]] name = "futures-macro" version = "0.3.31" @@ -452,9 +568,15 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + [[package]] name = "futures-task" version = "0.3.31" @@ -467,28 +589,18 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ + "futures-channel", "futures-core", + "futures-io", "futures-macro", + "futures-sink", "futures-task", + "memchr", "pin-project-lite", "pin-utils", "slab", ] -[[package]] -name = "generator" -version = "0.8.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "605183a538e3e2a9c1038635cc5c2d194e2ee8fd0d1b66b8349fad7dbacce5a2" -dependencies = [ - "cc", - "cfg-if", - "libc", - "log", - "rustversion", - "windows", -] - [[package]] name = "getrandom" version = "0.2.16" @@ -509,14 +621,14 @@ dependencies = [ "cfg-if", "libc", "r-efi", - "wasi 0.14.3+wasi-0.2.4", + "wasi 0.14.7+wasi-0.2.4", ] [[package]] name = "gimli" -version = "0.31.1" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7" [[package]] name = "hashbrown" @@ -533,11 +645,17 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + [[package]] name = "iana-time-zone" -version = "0.1.63" +version = "0.1.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -557,6 +675,119 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" + +[[package]] +name = "icu_properties" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "potential_utf", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" + +[[package]] +name = "icu_provider" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" +dependencies = [ + "displaydoc", + "icu_locale_core", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "indenter" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "964de6e86d545b246d84badc0fef527924ace5134f30641c203ef52ba83f58d5" + [[package]] name = "io-uring" version = "0.7.10" @@ -601,19 +832,57 @@ checksum = "03343451ff899767262ec32146f6d559dd759fdadf42ff0e227c7c48f72594b4" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "ec48937a97411dcb524a265206ccd4c90bb711fca92b2792c407f268825b9305" dependencies = [ "once_cell", "wasm-bindgen", ] +[[package]] +name = "kmip-protocol" +version = "0.5.0" +source = "git+https://github.com/NLnetLabs/kmip-protocol?branch=next#ad08d63ffd9bbb96ec29d1272d08244e86ed74e6" +dependencies = [ + "cfg-if", + "enum-display-derive", + "enum-flags", + "hex", + "kmip-ttlv", + "log", + "maybe-async", + "r2d2", + "rustc_version", + "rustls", + "rustls-pemfile", + "serde", + "serde_bytes", + "serde_derive", + "tracing", + "trait-set", + "webpki-roots", +] + +[[package]] +name = "kmip-ttlv" +version = "0.4.0" +source = "git+https://github.com/NLnetLabs/kmip-ttlv?branch=next#4ca144e19e69375a6ccd63cf40b0e61f89462f97" +dependencies = [ + "cfg-if", + "hex", + "maybe-async", + "rustc_version", + "serde", + "tracing", + "trait-set", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -628,15 +897,21 @@ checksum = "9fa0e2a1fcbe2f6be6c42e342259976206b383122fc152e872795338b5a3f3a7" [[package]] name = "libc" -version = "0.2.175" +version = "0.2.176" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" +checksum = "58f929b4d672ea937a23a1ab494143d968337a5f47e56d0815df1e0890ddf174" [[package]] name = "linux-raw-sys" -version = "0.9.4" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" + +[[package]] +name = "litemap" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "lock_api" @@ -650,37 +925,35 @@ dependencies = [ [[package]] name = "log" -version = "0.4.27" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" [[package]] -name = "loom" -version = "0.7.2" +name = "matchers" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" dependencies = [ - "cfg-if", - "generator", - "scoped-tls", - "tracing", - "tracing-subscriber", + "regex-automata", ] [[package]] -name = "matchers" -version = "0.1.0" +name = "maybe-async" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +checksum = "5cf92c10c7e361d6b99666ec1c6f9805b0bea2c3bd8c78dc6fe98ac5bd78db11" dependencies = [ - "regex-automata 0.1.10", + "proc-macro2", + "quote", + "syn 2.0.106", ] [[package]] name = "memchr" -version = "2.7.5" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "miniz_oxide" @@ -704,34 +977,32 @@ dependencies = [ [[package]] name = "moka" -version = "0.12.10" +version = "0.12.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" +checksum = "8261cd88c312e0004c1d51baad2980c66528dfdb2bee62003e643a4d8f86b077" dependencies = [ "async-lock", "crossbeam-channel", "crossbeam-epoch", "crossbeam-utils", + "equivalent", "event-listener", "futures-util", - "loom", "parking_lot", "portable-atomic", "rustc_version", "smallvec", "tagptr", - "thiserror", "uuid", ] [[package]] name = "nu-ansi-term" -version = "0.46.0" +version = "0.50.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" dependencies = [ - "overload", - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -751,9 +1022,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.7" +version = "0.37.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" dependencies = [ "memchr", ] @@ -804,14 +1075,14 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] [[package]] name = "openssl-src" -version = "300.5.2+3.5.2" +version = "300.5.3+3.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d270b79e2926f5150189d475bc7e9d2c69f9c4697b185fa917d5a32b792d21b4" +checksum = "dc6bad8cd0233b63971e232cc9c5e83039375b8586d2312f31fda85db8f888c2" dependencies = [ "cc", ] @@ -829,12 +1100,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - [[package]] name = "parking" version = "2.2.1" @@ -864,6 +1129,12 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + [[package]] name = "pin-project-lite" version = "0.2.16" @@ -897,6 +1168,15 @@ dependencies = [ "portable-atomic", ] +[[package]] +name = "potential_utf" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" +dependencies = [ + "zerovec", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -933,9 +1213,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.40" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" dependencies = [ "proc-macro2", ] @@ -946,6 +1226,17 @@ version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" +[[package]] +name = "r2d2" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" +dependencies = [ + "log", + "parking_lot", + "scheduled-thread-pool", +] + [[package]] name = "rand" version = "0.8.5" @@ -953,8 +1244,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", ] [[package]] @@ -964,7 +1265,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", ] [[package]] @@ -976,6 +1287,15 @@ dependencies = [ "getrandom 0.2.16", ] +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.3", +] + [[package]] name = "rayon" version = "1.11.0" @@ -1007,42 +1327,27 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.2" +version = "1.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" +checksum = "8b5288124840bee7b386bc413c487869b360b2b4ec421ea56425128692f2a82c" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.10", - "regex-syntax 0.8.6", + "regex-automata", + "regex-syntax", ] [[package]] name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" -dependencies = [ - "regex-syntax 0.6.29", -] - -[[package]] -name = "regex-automata" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6" +checksum = "833eb9ce86d40ef33cb1306d8accf7bc8ec2bfea4355cbdebb3df68b40925cad" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.6", + "regex-syntax", ] -[[package]] -name = "regex-syntax" -version = "0.6.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" - [[package]] name = "regex-syntax" version = "0.8.6" @@ -1080,15 +1385,59 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.8" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" dependencies = [ "bitflags", "errno", "libc", "linux-raw-sys", - "windows-sys 0.60.2", + "windows-sys 0.61.1", +] + +[[package]] +name = "rustls" +version = "0.23.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40" +dependencies = [ + "log", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pemfile" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" +dependencies = [ + "base64", +] + +[[package]] +name = "rustls-pki-types" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +dependencies = [ + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e10b3f4191e8a80e6b43eebabfac91e5dcecebb27a71f04e820c47ec41d314bf" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", ] [[package]] @@ -1098,10 +1447,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] -name = "scoped-tls" -version = "1.0.1" +name = "ryu" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "scheduled-thread-pool" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" +dependencies = [ + "parking_lot", +] [[package]] name = "scopeguard" @@ -1120,28 +1478,61 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" [[package]] name = "serde" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_bytes" +version = "0.11.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5d440709e79d88e51ac01c4b72fc6cb7314017bb7da9eeff678aa94c10e3ea8" +dependencies = [ + "serde", + "serde_core", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", +] + +[[package]] +name = "serde_json" +version = "1.0.145" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", + "serde_core", ] [[package]] @@ -1187,12 +1578,35 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "strsim" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + [[package]] name = "syn" version = "2.0.106" @@ -1204,6 +1618,17 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "tagptr" version = "0.2.0" @@ -1212,15 +1637,15 @@ checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" [[package]] name = "tempfile" -version = "3.21.0" +version = "3.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15b61f8f20e3a6f7e0649d825294eaf317edce30f82cf6026e7e4cb9222a7d1e" +checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" dependencies = [ "fastrand", "getrandom 0.3.3", "once_cell", "rustix", - "windows-sys 0.60.2", + "windows-sys 0.61.1", ] [[package]] @@ -1239,26 +1664,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e7a7de15468c6e65dd7db81cf3822c1ec94c71b2a3c1a976ea8e4696c91115c" -[[package]] -name = "thiserror" -version = "1.0.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "thread_local" version = "1.1.9" @@ -1270,9 +1675,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.41" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" dependencies = [ "deranged", "itoa", @@ -1285,20 +1690,30 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" [[package]] name = "time-macros" -version = "0.2.22" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" dependencies = [ "num-conv", "time-core", ] +[[package]] +name = "tinystr" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tokio" version = "1.47.1" @@ -1325,7 +1740,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] [[package]] @@ -1359,7 +1774,7 @@ checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] [[package]] @@ -1385,14 +1800,14 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" dependencies = [ "matchers", "nu-ansi-term", "once_cell", - "regex", + "regex-automata", "sharded-slab", "smallvec", "thread_local", @@ -1401,11 +1816,22 @@ dependencies = [ "tracing-log", ] +[[package]] +name = "trait-set" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "875c4c873cc824e362fa9a9419ffa59807244824275a44ad06fec9684fff08f2" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" [[package]] name = "unicode-xid" @@ -1419,6 +1845,24 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" +[[package]] +name = "url" +version = "2.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.2" @@ -1427,9 +1871,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.18.0" +version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f33196643e165781c20a5ead5582283a7dacbb87855d867fbc2df3f81eddc1be" +checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ "getrandom 0.3.3", "js-sys", @@ -1456,44 +1900,54 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" -version = "0.14.3+wasi-0.2.4" +version = "0.14.7+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a51ae83037bdd272a9e28ce236db8c07016dd0d50c27038b3f407533c030c95" +checksum = "883478de20367e224c0090af9cf5f9fa85bed63a95c1abf3afc5c083ebc06e8c" +dependencies = [ + "wasip2", +] + +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" dependencies = [ "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "c1da10c01ae9f1ae40cbfac0bac3b1e724b320abfcf52229f80b547c0d250e2d" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", + "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +checksum = "671c9a5a66f49d8a47345ab942e2cb93c7d1d0339065d4f8139c486121b43b19" dependencies = [ "bumpalo", "log", "proc-macro2", "quote", - "syn", + "syn 2.0.106", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "7ca60477e4c59f5f2986c50191cd972e3a50d8a95603bc9434501cf156a9a119" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -1501,75 +1955,40 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "bad67dc8b2a1a6e5448428adec4c3e84c43e561d8c9ee8a9e5aabeb193ec41d1" dependencies = [ "unicode-ident", ] [[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "windows" -version = "0.61.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" -dependencies = [ - "windows-collections", - "windows-core", - "windows-future", - "windows-link", - "windows-numerics", -] - -[[package]] -name = "windows-collections" -version = "0.2.0" +name = "webpki-roots" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8" +checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" dependencies = [ - "windows-core", + "rustls-pki-types", ] [[package]] name = "windows-core" -version = "0.61.2" +version = "0.62.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" +checksum = "6844ee5416b285084d3d3fffd743b925a6c9385455f64f6d4fa3031c4c2749a9" dependencies = [ "windows-implement", "windows-interface", @@ -1578,69 +1997,48 @@ dependencies = [ "windows-strings", ] -[[package]] -name = "windows-future" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" -dependencies = [ - "windows-core", - "windows-link", - "windows-threading", -] - [[package]] name = "windows-implement" -version = "0.60.0" +version = "0.60.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +checksum = "edb307e42a74fb6de9bf3a02d9712678b22399c87e6fa869d6dfcd8c1b7754e0" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] [[package]] name = "windows-interface" -version = "0.59.1" +version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +checksum = "c0abd1ddbc6964ac14db11c7213d6532ef34bd9aa042c2e5935f59d7908b46a5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", ] [[package]] name = "windows-link" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" - -[[package]] -name = "windows-numerics" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1" -dependencies = [ - "windows-core", - "windows-link", -] +checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" [[package]] name = "windows-result" -version = "0.3.4" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" +checksum = "7084dcc306f89883455a206237404d3eaf961e5bd7e0f312f7c91f57eb44167f" dependencies = [ "windows-link", ] [[package]] name = "windows-strings" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +checksum = "7218c655a553b0bed4426cf54b20d7ba363ef543b52d515b3e48d7fd55318dda" dependencies = [ "windows-link", ] @@ -1669,7 +2067,16 @@ version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" dependencies = [ - "windows-targets 0.53.3", + "windows-targets 0.53.4", +] + +[[package]] +name = "windows-sys" +version = "0.61.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f109e41dd4a3c848907eb83d5a42ea98b3769495597450cf6d153507b166f0f" +dependencies = [ + "windows-link", ] [[package]] @@ -1690,9 +2097,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.3" +version = "0.53.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" +checksum = "2d42b7b7f66d2a06854650af09cfdf8713e427a439c97ad65a6375318033ac4b" dependencies = [ "windows-link", "windows_aarch64_gnullvm 0.53.0", @@ -1705,15 +2112,6 @@ dependencies = [ "windows_x86_64_msvc 0.53.0", ] -[[package]] -name = "windows-threading" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" -dependencies = [ - "windows-link", -] - [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" @@ -1812,9 +2210,15 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "wit-bindgen" -version = "0.45.0" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052283831dbae3d879dc7f51f3d92703a316ca49f91540417d38591826127814" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" + +[[package]] +name = "writeable" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" [[package]] name = "yansi" @@ -1822,28 +2226,106 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" +[[package]] +name = "yoke" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", + "synstructure", +] + [[package]] name = "zerocopy" -version = "0.8.26" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.26" +version = "0.8.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.106", + "synstructure", ] [[package]] name = "zeroize" -version = "1.8.1" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" + +[[package]] +name = "zerotrie" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] diff --git a/Cargo.toml b/Cargo.toml index 74692c3a..a12c1d07 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "dnst" -version = "0.1.1-dev" +version = "0.1.0-alpha" edition = "2021" default-run = "dnst" readme = "README.md" @@ -10,16 +10,17 @@ description = "Rust reimplementation of important ldns programs." categories = ["command-line-utilities"] license = "BSD-3-Clause" keywords = ["DNS", "domain", "ldns"] -rust-version = "1.80" +rust-version = "1.82" [[bin]] name = "ldns" path = "src/bin/ldns.rs" [features] -default = ["openssl", "ring"] +default = ["kmip", "openssl", "ring"] # Cryptographic backends +kmip = ["domain/kmip", "dep:indenter", "dep:rand"] openssl = ["domain/openssl"] ring = ["domain/ring"] @@ -31,10 +32,7 @@ static-openssl = ["openssl/vendored"] bytes = "1.8.0" chrono = "0.4.38" clap = { version = "4.3.4", features = ["cargo", "derive", "wrap_help"] } -# Until a release of domain is made that includes the features we need, pin -# to a specific commit of the domain main branch to ensure that the dnst main -# branch does not get broken if domain main is not stable. -domain = { git = "https://github.com/NLnetLabs/domain.git", rev = "17fb0e38120c9939ca28462af082d88ae8bc8b1d", features = [ +domain = { git = "https://github.com/NLnetLabs/domain.git", branch = "patches-for-nameshed-prototype", features = [ "bytes", "net", "resolv", @@ -45,9 +43,11 @@ domain = { git = "https://github.com/NLnetLabs/domain.git", rev = "17fb0e38120c9 "unstable-validator", "unstable-zonetree" ] } +indenter = { version = "0.3.4", optional = true } lexopt = "0.3.0" rayon = "1.10.0" octseq = "0.5.2" +rand = { version = "0.9.2", optional = true } ring = "0.17.8" tokio = "1.40.0" openssl = { version = "*", features = ["vendored"], optional = true } @@ -60,73 +60,93 @@ jiff = { version = "0.2", default-features = false, features = ["alloc", "std"] # still uses it. And sharded-slab is used by tracing-subscriber, which is # used by domain, which is used by us. _unused_lazy_static = { package = "lazy_static", version = "1.0.2" } +serde_json = "1.0.137" +serde = "1.0.217" tracing = "0.1.41" tracing-subscriber = "0.3.19" +url = "2.5.4" +futures = "0.3.31" [dev-dependencies] const_format = " 0.2.33" test_bin = "0.4.0" tempfile = "3.20.0" regex = "1.11.1" -domain = { git = "https://github.com/NLnetLabs/domain.git", rev = "17fb0e38120c9939ca28462af082d88ae8bc8b1d", features = [ +domain = { git = "https://github.com/NLnetLabs/domain.git", branch = "patches-for-nameshed-prototype", features = [ "unstable-stelline", ] } pretty_assertions = "1.4.1" # Related reading: https://wiki.debian.org/Teams/RustPackaging/Policy [package.metadata.deb] +# Package a Cascade specific variant of dnst because Cascade depends on dnst +# and dnst replaces ldns-utils, so uses of Cascade would have their ldns-utils +# uninstalled but may want to use ldns-verify-zone for example with Cascade. +# So we make a separate cascade-dnst package that doesn't uninstall ldns-utils, +# for now at least. Another reason for a separate package is at present this +# keyset branch of dnst depends on as yet unreleased domain crate patches which +# is fine for the alpha of Cascade but not for a normal dnst release. +name = "cascade-dnst" depends = "$auto" section = "net" priority = "optional" assets = [ - ["target/release/dnst", "usr/bin/", "755"], - ["README.md", "usr/share/doc/dnst/", "644"], +# ["target/release/dnst", "usr/bin/", "755"], + # Install into the Cascade directory, and name separately to dnst, to + # avoid collisions with the real dnst package and binary. + ["target/release/dnst", "usr/libexec/cascade/cascade-dnst", "755"], + ["README.md", "usr/share/doc/cascade-dnst/", "644"], # TODO: Extend Ploutos to generate the man pages from sources. - ["doc/manual/build/man/dnst.1", "/usr/share/man/man1/dnst.1", "644"], - ["doc/manual/build/man/dnst-key2ds.1", "/usr/share/man/man1/dnst-key2ds.1", "644"], - ["doc/manual/build/man/dnst-keygen.1", "/usr/share/man/man1/dnst-keygen.1", "644"], - ["doc/manual/build/man/dnst-notify.1", "/usr/share/man/man1/dnst-notify.1", "644"], - ["doc/manual/build/man/dnst-nsec3-hash.1", "/usr/share/man/man1/dnst-nsec3-hash.1", "644"], - ["doc/manual/build/man/dnst-signzone.1", "/usr/share/man/man1/dnst-signzone.1", "644"], - ["doc/manual/build/man/dnst-update.1", "/usr/share/man/man1/dnst-update.1", "644"], - ["doc/manual/build/man/ldns-key2ds.1", "/usr/share/man/man1/ldns-key2ds.1", "644"], - ["doc/manual/build/man/ldns-keygen.1", "/usr/share/man/man1/ldns-keygen.1", "644"], - ["doc/manual/build/man/ldns-notify.1", "/usr/share/man/man1/ldns-notify.1", "644"], - ["doc/manual/build/man/ldns-nsec3-hash.1", "/usr/share/man/man1/ldns-nsec3-hash.1", "644"], - ["doc/manual/build/man/ldns-signzone.1", "/usr/share/man/man1/ldns-signzone.1", "644"], - ["doc/manual/build/man/ldns-update.1", "/usr/share/man/man1/ldns-update.1", "644"], + # Don't install the normal dnst man pages in case the user actually does + # have the real dnst package installed. + ["doc/manual/build/man/dnst.1", "/usr/share/man/man1/cascade-dnst.1", "644"], + #["doc/manual/build/man/dnst-key2ds.1", "/usr/share/man/man1/dnst-key2ds.1", "644"], + #["doc/manual/build/man/dnst-keygen.1", "/usr/share/man/man1/dnst-keygen.1", "644"], + ["doc/manual/build/man/dnst-keyset.1", "/usr/share/man/man1/cascade-dnst-keyset.1", "644"], + #["doc/manual/build/man/dnst-notify.1", "/usr/share/man/man1/dnst-notify.1", "644"], + #["doc/manual/build/man/dnst-nsec3-hash.1", "/usr/share/man/man1/dnst-nsec3-hash.1", "644"], + #["doc/manual/build/man/dnst-signzone.1", "/usr/share/man/man1/dnst-signzone.1", "644"], + #["doc/manual/build/man/dnst-update.1", "/usr/share/man/man1/dnst-update.1", "644"], + #["doc/manual/build/man/ldns-key2ds.1", "/usr/share/man/man1/ldns-key2ds.1", "644"], + #["doc/manual/build/man/ldns-keygen.1", "/usr/share/man/man1/ldns-keygen.1", "644"], + #["doc/manual/build/man/ldns-notify.1", "/usr/share/man/man1/ldns-notify.1", "644"], + #["doc/manual/build/man/ldns-nsec3-hash.1", "/usr/share/man/man1/ldns-nsec3-hash.1", "644"], + #["doc/manual/build/man/ldns-signzone.1", "/usr/share/man/man1/ldns-signzone.1", "644"], + #["doc/manual/build/man/ldns-update.1", "/usr/share/man/man1/ldns-update.1", "644"], ] changelog = "target/debian/changelog" # this will be generated by the pkg workflow copyright = "Copyright (c) 2024, NLnet Labs. All rights reserved." maintainer-scripts = "pkg/debian" -# See: https://www.debian.org/doc/debian-policy/ch-relationships.html#replacing-whole-packages-forcing-their-removal -conflicts = "ldnsutils" -replaces = "ldnsutils" +# # See: https://www.debian.org/doc/debian-policy/ch-relationships.html#replacing-whole-packages-forcing-their-removal +# conflicts = "ldnsutils" +# replaces = "ldnsutils" # Related reading: https://docs.fedoraproject.org/en-US/packaging-guidelines/Rust/ [package.metadata.generate-rpm] +name = "cascade-dnst" # see explanation above in the cargo-deb section. assets = [ - { source = "target/release/dnst", dest = "/usr/bin/dnst", mode = "755" }, + { source = "target/release/dnst", dest = "/usr/libexec/cascade/cascade-dnst", mode = "755" }, # TODO: Extend Ploutos to generate the man pages from sources. - { source = "doc/manual/build/man/dnst.1", dest = "/usr/share/man/man1/dnst.1", mode = "644", doc = true }, - { source = "doc/manual/build/man/dnst-key2ds.1", dest = "/usr/share/man/man1/dnst-key2ds.1", mode = "644", doc = true }, - { source = "doc/manual/build/man/dnst-keygen.1", dest = "/usr/share/man/man1/dnst-keygen.1", mode = "644", doc = true }, - { source = "doc/manual/build/man/dnst-notify.1", dest = "/usr/share/man/man1/dnst-notify.1", mode = "644", doc = true }, - { source = "doc/manual/build/man/dnst-nsec3-hash.1", dest = "/usr/share/man/man1/dnst-nsec3-hash.1", mode = "644", doc = true }, - { source = "doc/manual/build/man/dnst-signzone.1", dest = "/usr/share/man/man1/dnst-signzone.1", mode = "644", doc = true }, - { source = "doc/manual/build/man/dnst-update.1", dest = "/usr/share/man/man1/dnst-update.1", mode = "644", doc = true }, - { source = "doc/manual/build/man/ldns-key2ds.1", dest = "/usr/share/man/man1/ldns-key2ds.1", mode = "644", doc = true }, - { source = "doc/manual/build/man/ldns-keygen.1", dest = "/usr/share/man/man1/ldns-keygen.1", mode = "644", doc = true }, - { source = "doc/manual/build/man/ldns-notify.1", dest = "/usr/share/man/man1/ldns-notify.1", mode = "644", doc = true }, - { source = "doc/manual/build/man/ldns-nsec3-hash.1", dest = "/usr/share/man/man1/ldns-nsec3-hash.1", mode = "644", doc = true }, - { source = "doc/manual/build/man/ldns-signzone.1", dest = "/usr/share/man/man1/ldns-signzone.1", mode = "644", doc = true }, - { source = "doc/manual/build/man/ldns-update.1", dest = "/usr/share/man/man1/ldns-update.1", mode = "644", doc = true }, + { source = "doc/manual/build/man/dnst.1", dest = "/usr/share/man/man1/cascade-dnst.1", mode = "644", doc = true }, + #{ source = "doc/manual/build/man/dnst-key2ds.1", dest = "/usr/share/man/man1/dnst-key2ds.1", mode = "644", doc = true }, + #{ source = "doc/manual/build/man/dnst-keygen.1", dest = "/usr/share/man/man1/dnst-keygen.1", mode = "644", doc = true }, + { source = "doc/manual/build/man/dnst-keyset.1", dest = "/usr/share/man/man1/cascade-dnst-keyset.1", mode = "644", doc = true }, + #{ source = "doc/manual/build/man/dnst-notify.1", dest = "/usr/share/man/man1/dnst-notify.1", mode = "644", doc = true }, + #{ source = "doc/manual/build/man/dnst-nsec3-hash.1", dest = "/usr/share/man/man1/dnst-nsec3-hash.1", mode = "644", doc = true }, + #{ source = "doc/manual/build/man/dnst-signzone.1", dest = "/usr/share/man/man1/dnst-signzone.1", mode = "644", doc = true }, + #{ source = "doc/manual/build/man/dnst-update.1", dest = "/usr/share/man/man1/dnst-update.1", mode = "644", doc = true }, + #{ source = "doc/manual/build/man/ldns-key2ds.1", dest = "/usr/share/man/man1/ldns-key2ds.1", mode = "644", doc = true }, + #{ source = "doc/manual/build/man/ldns-keygen.1", dest = "/usr/share/man/man1/ldns-keygen.1", mode = "644", doc = true }, + #{ source = "doc/manual/build/man/ldns-notify.1", dest = "/usr/share/man/man1/ldns-notify.1", mode = "644", doc = true }, + #{ source = "doc/manual/build/man/ldns-nsec3-hash.1", dest = "/usr/share/man/man1/ldns-nsec3-hash.1", mode = "644", doc = true }, + #{ source = "doc/manual/build/man/ldns-signzone.1", dest = "/usr/share/man/man1/ldns-signzone.1", mode = "644", doc = true }, + #{ source = "doc/manual/build/man/ldns-update.1", dest = "/usr/share/man/man1/ldns-update.1", mode = "644", doc = true }, ] # These get set using cargo-generate-rpm --set-metadata at package build time. #post_trans_script = ... #post_uninstall_script = ... -# Set Obsoletes per https://docs.fedoraproject.org/en-US/packaging-guidelines/#renaming-or-replacing-existing-packages. -[package.metadata.generate-rpm.obsoletes] -ldns-utils = "< 0:1.8.4-2" +# # Set Obsoletes per https://docs.fedoraproject.org/en-US/packaging-guidelines/#renaming-or-replacing-existing-packages. +# [package.metadata.generate-rpm.obsoletes] +# ldns-utils = "< 0:1.8.4-2" diff --git a/doc/manual/build/man/dnst-key2ds.1 b/doc/manual/build/man/dnst-key2ds.1 index 64de0f6b..4d11e5c8 100644 --- a/doc/manual/build/man/dnst-key2ds.1 +++ b/doc/manual/build/man/dnst-key2ds.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "DNST-KEY2DS" "1" "Jun 02, 2025" "0.1.0-rc1" "dnst" +.TH "DNST-KEY2DS" "1" "Oct 03, 2025" "0.1.0-rc2" "dnst" .SH NAME dnst-key2ds \- Generate DS RRs from the DNSKEYs in a keyfile .SH SYNOPSIS @@ -39,6 +39,13 @@ dnst-key2ds \- Generate DS RRs from the DNSKEYs in a keyfile .sp The following file will be created for each key: \fBK++.ds\fP\&. The base name \fBK++\fP will be printed to stdout. +.SH ARGUMENTS +.INDENT 0.0 +.TP +.B +\fB\fP must be a file containing one or more RFC 4034 \fBDNSKEY\fP +resource records in presentation format. +.UNINDENT .SH OPTIONS .INDENT 0.0 .TP @@ -67,11 +74,6 @@ Write the generated DS records to stdout instead of a file. Print the help text (short summary with \fB\-h\fP, long help with \fB\-\-help\fP). .UNINDENT -.INDENT 0.0 -.TP -.B \-V, \-\-version -Print the version. -.UNINDENT .SH AUTHOR NLnet Labs .SH COPYRIGHT diff --git a/doc/manual/build/man/dnst-keygen.1 b/doc/manual/build/man/dnst-keygen.1 index 6453752b..c51b2362 100644 --- a/doc/manual/build/man/dnst-keygen.1 +++ b/doc/manual/build/man/dnst-keygen.1 @@ -1,3 +1,4 @@ +'\" t .\" Man page generated from reStructuredText. . . @@ -27,7 +28,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "DNST-KEYGEN" "1" "Jun 02, 2025" "0.1.0-rc1" "dnst" +.TH "DNST-KEYGEN" "1" "Oct 03, 2025" "0.1.0-rc2" "dnst" .SH NAME dnst-keygen \- Generate a new key pair for a domain name .SH SYNOPSIS @@ -57,17 +58,23 @@ in zone file format. It is only created for key signing keys. .sp .sp Upon completion, \fBK++\fP will be printed. +.SH ARGUMENTS +.INDENT 0.0 +.TP +.B +The owner name of the apex of the zone which the generated key is +intended to sign. +.UNINDENT .SH OPTIONS .INDENT 0.0 .TP .B \-a -Use the given signing algorithm. +Use the given signing algorithm. Mandatory. .sp Possible values are: .TS -center; -|l|l|l|. -_ +box center; +l|l|l. T{ \fBMnemonic\fP T} T{ @@ -122,7 +129,6 @@ T} T{ T} T{ ED448 T} -_ .TE .UNINDENT .INDENT 0.0 diff --git a/doc/manual/build/man/dnst-keyset.1 b/doc/manual/build/man/dnst-keyset.1 new file mode 100644 index 00000000..98aba2da --- /dev/null +++ b/doc/manual/build/man/dnst-keyset.1 @@ -0,0 +1,947 @@ +.\" Man page generated from reStructuredText. +. +. +.nr rst2man-indent-level 0 +. +.de1 rstReportMargin +\\$1 \\n[an-margin] +level \\n[rst2man-indent-level] +level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] +- +\\n[rst2man-indent0] +\\n[rst2man-indent1] +\\n[rst2man-indent2] +.. +.de1 INDENT +.\" .rstReportMargin pre: +. RS \\$1 +. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] +. nr rst2man-indent-level +1 +.\" .rstReportMargin post: +.. +.de UNINDENT +. RE +.\" indent \\n[an-margin] +.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] +.nr rst2man-indent-level -1 +.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] +.in \\n[rst2man-indent\\n[rst2man-indent-level]]u +.. +.TH "DNST-KEYSET" "1" "Oct 03, 2025" "0.1.0-rc2" "dnst" +.SH NAME +dnst-keyset \- Manage DNSSEC signing keys for a domain +.SH SYNOPSIS +.sp +\fBdnst keyset\fP \fB\-c \fP \fB[OPTIONS]\fP \fB\fP \fB[ARGS]\fP +.SH DESCRIPTION +.sp +The \fBkeyset\fP subcommand manages a set of DNSSEC (\X'tty: link https://www.rfc-editor.org/rfc/rfc9364'\fI\%RFC 9364\fP\X'tty: link') signing keys. +This subcommand is meant to be part of a DNSSEC signing solution. +The \fBkeyset\fP subcommand manages signing keys and generates a signed DNSKEY RRset. +A separate zone signer (not part of dnst) is expected to use the zone +signing keys in the key set, +sign the zone and include the DNSKEY RRset (as well as the CDS and CDNSKEY +RRsets). +The keyset subcommand supports keys stored in files and, when the dnst +program is built with the kmip feature flag, keys stored in a +Hardware Security Module (HSM) that can be accessed using the +Key Management Interoperability Protocol (KMIP). +.sp +The keyset subcommand operates on one zone at a time. +For each zone, keyset +maintains a configuration file that stores configuration parameters for +key generation (which algorithm to use, whether to use a CSK or a +KSK and ZSK pair), parameters for key rolls (whether key rolls are automatic +or not), the lifetimes of keys and signatures, etc. +The keyset subcommand also maintains a state file for each zone. +The state file lists the keys in the key set, the current key roll state, +and has the DNSKEY, CDS, and CDNSKEY RRsets. +key generation (which algorithm to use, whether to use a CSK and a +KSK and a ZSK), parameters for key rolls (whether key rolls are automatic +or not), the lifetimes of keys and signatures, etc. +The keyset subcommand also maintains state file for each zone. +The state file lists the keys in the key set, the current key roll state, +and has the DNSKEY, CDS, and CDNSKEY RRsets. +.sp +In addition to the configuration and state files, keyset maintains files for +keys that are stored on in the filesystem. +Additionally, keyset can optionally maintain a credentials file that +contains user names and passwords for the KMIP connections. +.sp +The keyset subcommand uses the Keyset type from the Rust Domain crate to store +the set of keys together with their properties such as whether a key +should sign the zone, timestamps when keys are created or become stale. +The Keyset data type also implements the basic logic of key rolls. +.sp +The keyset subcommand supports importing existing keys, both standalone +public keys as well as public/private key pairs can be imported. +A standalone public key can only be imported from a file whereas public/private +key pairs can be either files or references to keys stored in an HSM. +Note that the public and private key either need to be both files or both +stored in an HSM. +.SS Signer +.sp +The zone signer is expected to read the state file that is maintained by +keyset to find the current zone signing keys, to find the signed +DNSKEY/CDS/CDNSKEY RRset and to find the KMIP configuration. +.sp +See for a description of the state file. +.sp +The signer needs to poll the state file for changes. +If the signer is in full control of running keyset, then the state file needs +to be checked for changes after running keyset with commands the can +potientially change the state file (status subcommands, etc. do not change +the state file). +If however keyset can be invoked independently of the signer then the signer needs +to periodically check for changes, for example, at least every hour. +.SS Cron +.sp +The signatures of the DNSKEY, CDS and CDNSKEY RRsets need to updated +periodically. +In addition, key roll automation requires periodic invocation of keyset +to start new key rolls and to make progress on ones that are currently +executing. +.sp +For this purpose, keyset has a cron subcommand. +This subcommand handles any house keeping work that needs to be done. +The cron subcommand can be executed at regular times, for example, +once an hour from the cron(1) utility. +.sp +However, keyset also maintains a field in the state file, called +\fBcron\-next\fP, that specifies when the cron subcommand should be run next. +Running the cron subcommand early is fine, the current time is compared +again the \fBcron\-next\fP field and the subcommand exits early if +\fBcron\-next\fP is in the future. +Running the cron subcommand late may cause signatures to expire. +.SS Create / Init +.sp +The initialisation of a key set for a zone consists of two steps. +First the create subcommand create a configuration file with mostly default +values and a state file without any keys. +The init subcommand finishes the initialisation. +.sp +This two step procedure allows configuration parameters to be set between +the create and the init subcommand, for example, the algorithm to use. +It also allows existing public/private key pairs to be imported. +.sp +The init subcommand checks if any public/private key pairs have been imported. +If so, init checks if both a both roles (KSK and ZSK) are present. +A single CSK combines both rolls. +Absent a CSK, both a KSK and a ZSK need to be present otherwise the init command +fails. +Any imported public keys are ignored by init. +.sp +If no public/private key pairs have been imported then the init subcommand +will start an algorithm roll. +The algorithm roll will create new keys based on the current configuration: +either as files or in an HSM and either a CSK or a pair of KSK and ZSK. +.SS Key Rolls +.sp +The keyset subcommand can perform four different types of key rolls: +KSK rolls, ZSK rolls, CSK rolls and algorithm rolls. +A KSK roll replaces one KSK with a new KSK. +Similarly, a ZSK roll replaces one ZSK with a new ZSK. +A CSK roll also replaces a CSK with a new CSK but the roll also treats a +pair of KSK and ZSK keys as equivalent to a CSK. +So a CSK roll can also roll from KSK plus ZSK to a new CSK or from a CSK +to new a KSK and ZSK pair. +Note that a roll from KSK plus ZSK to a new KSK plus ZSK pair +is also supported. +Finally, an algorithm roll is similar to a CSK roll, but designed in +a specific way to handle the case where the new key or keys have an algorithm +that is different from one used by the current signing keys. +.sp +The KSK and ZSK rolls are completely independent and can run in parallel. +Consistency checks are performed at the start of a key roll. +For example, a KSK key roll cannot start when another KSK is in progress or +when a CSK or algorithm roll is in progress. +A KSK roll cannot start either when the current signing key is a CSK or +when the configuration specifies that the new signing key has to be a CSK. +Finally, KSK rolls are also prevented when the algorithm for new keys is +different from the one used by the current key. +Similar limitations apply to the other roll types. Note however that an +algorithm roll can be started even when it is not needed. +.sp +A key roll consists of six steps: \fBstart\-roll\fP, \fBpropagation1\-complete\fP, +\fBcache\-expired1\fP, \fBpropagation2\-complete\fP, \fBcache\-expired2\fP, and +\fBroll\-done\fP\&. +For each key roll these six steps follow in the same order. +Associated which each step is a (possibly empty) list of actions. +Actions fall in three categories. +The first category consists of actions that require updating the zone or the +parent zone. +The second category consists of actions that require checking if changes +have propagated to all nameservers and require reporting of the +TTLs of the changed RRset as seen at the nameservers. +Finally, the last category requires waiting for changes to propagate to +all nameservers but there is no need to report the TTL. +.sp +Typically, in a list of actions, an action of the first category is paired +with one from the second of third category. +For example, \fBUpdateDnskeyRrset\fP is paired with eiher +\fBReportDnskeyPropagated\fP or \fBWaitDnskeyPropagated\fP\&. +.sp +A key roll starts with the \fBstart\-roll\fP step, which creates new keys. +The next step, \fBpropagation1\-complete\fP has a TTL argument which is the +maximum of the TTLs of the Report actions. +The \fBcache\-expired1\fP and \fBcache\-expired2\fP have no associated actions. +They simply require waiting for the TTL (in seconds) reported by the +previous \fBpropagation1\-complete\fP or \fBpropagation2\-complete\fP\&. +The \fBpropagation2\-complete\fP step is similar to the \fBpropagation1\-complete\fP step. +Finally, the \fBroll\-done\fP step typically has associated Wait actions. +These actions are cleanup actions and are harmless but confusing if they +are skipped. +.sp +The keyset subcommand provides fine grained control over automation. +Automation is configured separately for each of the four roll types. +For each roll type, there are four booleans called \fBstart\fP, \fBreport\fP, +\fBexpire\fP and \fBdone\fP\&. +.sp +When set, the \fBstart\fP boolean directs the cron subcommand to start a key roll +when a relevant key has expired. +A KSK or a ZSK key roll can start automatically if respectively a KSK or a ZSK +has expired. +A CSK roll can start automatically when a CSK has expired but also when a KSK or +ZSK has expired and the new key will be a CSK. +Finally, an algorithm roll can start automatically when the new algorithm is +different from the one used by the existing keys and any key has expired. +.sp +The \fBreport\fP flags control the automation of the \fBpropagation1\-complete\fP +and \fBpropagation2\-complete\fP steps. +When enabled, the cron subcommand contacts the nameservers of the zone or +(in the case of \fBReportDsPropagated\fP, the nameservers of the parent zone) +to check if changes have propagated to all nameservers. +The check obtains the list of nameservers from the apex of the (parent) zone +and collects all IPv4 and IPv6 addresses. +For the \fBReportDnskeyPropagated\fP and \fBReportDsPropagated\fP actions, each address is +the queried to see if the DNSKEY RRset or DS RRset match +the KSKs. +The \fBReportRrsigPropagated\fP action is more complex. +First the entire zone is transferred from the primary nameserver listed in the +SOA record. +Then all relevant signatures are checked if they have the expected key tags. +The maximum TTL in the zone is recorded to be reported. +Finally, all addresses of listed nameservers are checked to see if they +have a SOA serial that is greater than or equal to the one that was checked. +.sp +Automation of \fBcache\-expired1\fP and \fBcache\-expired2\fP is enabled by the +\fBexpire\fP boolean. +When enabled, the cron subcommand simply checks if enough time has passed +to invoke \fBcache\-expired1\fP or \fBcache\-expired2\fP\&. +.sp +Finally the \fBdone\fP boolean enables automation of the \fBroll\-done\fP step. +This automation is very similar to the \fBreport\fP automation. +The only difference is that the Wait actions are automated so propagation +is tracked but no TTL is reported. +.sp +Fine grained control of over automation makes it possible to automate +KSK or algorithm without starting them automatically. +Or let a key roll progress automatically except that the \fBcache\-expired\fP +steps must be done manually in order to be able to insert extra manual steps. +.sp +The \fBreport\fP and \fBdone\fP automations require that keyset has network access +to all nameservers of the zone and all nameservers of the parent. +.SS HSM Support (KMIP) +.sp +The keyset subcommand supports keys in Hardware Security Modules (HSM) through +the KMIP protocol. +The most common way to access keys in HSMs is through the PKCS #11 interface. +The PKCS #11 interface involves loading a shared library into the process +that needs to access the HSM. +This is unattractive for two reasons: +.INDENT 0.0 +.IP 1. 3 +Loading an arbitrary (binary) shared libary negates the memory security +features of an application written in Rust. A mistake in the shared library +could corrupt memory that is used by the application. For this reason it is +attractive to load the shared library into a separate process. +.IP 2. 3 +Setting up the run\-time environment of the shared library is often complex. +The library may require specific environment variables or access to specific +files or devices. This complexity impacts every application that wants +to use the shared library. +.UNINDENT +.sp +For these reasons it was decided to write a separate program, called +kmip2kpcs11, that uses the PKCS #11 standard to have access to an HSM and +provides a KMIP server interface. This makes it possible to contain both +the configuration complexity and the possibility of memory corruption in +a single program. +Other programs, such as the keyset subcommand then use the KMIP protocol to +indirectly access the HSM via the kmip2kpcs11 program. +Support for the KMIP protocol also makes it possible to directly connect to +KMIP capable HSMs. +.sp +The keyset subcommand stores two pieces of KMIP configuration. +The first is a list of KMIP servers. +Each KMIP server has a \fBserver ID\fP that is used in key references to specify +in which server the key is stored. +A server also has a DNS name or IP address and a port to connect to the server. +The second piece of configuration is the ID of the server to be used for +creating new keys. +It is possible to specify that no server is to be used for new keys, in that +case new keys will be created by keyset and stored as files. +.sp +Authentication can be done either with a user name and password or with +a client\-side certificate. +The user name and password are KMIP concepts that are mapped by the kmip2pkcs11 +server to a PKCS #11 slot or token name and the PIN. +With this approach the kmip2pkcs11 server des not have to store secrets +that provide access to the HSM. +User names and passwords are stored in a separate file to avoid storing +secrets in the keyset configuration or state files. +.sp +Unlike other configuration, the list of KMIP servers is stored in the state +file. +The reason for doing that is that signers also need the same KMIP server list +to be able to sign a zone. +By storing the server list in the state file, a signer has to read only the +state file to be able to use KMIP keys. +.sp +Options that can be configured for a server include not checking the +server\(aqs certificate, specifying the server\(aqs certificate or certificate +authority, various connection parameters such as connect timeout, read +timeout, write timeout and maximum response size. +.sp +When generating new keys, the label of the key can have a user supplied prefix. +This can be used, for example, to show that a key is for +development or testing. +Finally, some HSMs allow longer labels than others. +On HSMs that allow longer labels than the 32 character default, raising the +maximum label length can avoid truncation for longer domain names. +On HSMs that have a limit that is lower than the default, setting the correct +length avoids errors when creating keys. +.SS Importing Keys +.sp +There are three basic ways to import exiting keys: public\-key, +a public/private key pair from files or a public/private key pair in an HSM. +.sp +A public key can only be imported from a file. +When the key is imported the name of the file is converted to a URL and stored in the key set and +the key will be included in the DNSKEY RRset. +This is useful for certain migrations and to manually implement a +multi\-signer DNSSEC signing setup. +Note that automation does not work for this case. +.sp +A public/private key pair can be imported from files. +It is sufficient to give the name of the file that holds the public key if +the filename ends in \fB\&.key\fP and the filename of the private key is the +same except that it ends in \fB\&.private\fP\&. +If this is not the case then the private key filename must be specified +separately. +.sp +In order to use keys stored in a HSM the \fBdnst keyset kmip add\-server\fP subcommand must first be used to associate the KMIP server connection settings with a user defined server ID. +.sp +The first server defined becomes the default. if a default KMIP server has been defined it will be used to generate all future keys, unless the \fBdnst keyset kmip disable\fP command is issued. If more than one KMIP server is defined, only one can be the default server at any time. Use the \fBdnst keyset kmip set\-default\fP command to change which KMIP server will be used to generate future keys. Note that like all \fBdnst keyset\(ga subcommands, the KMIP subcommands set behaviour for a single zone. Additionally there are \(ga\(galist\-servers\fP, \fBget\-server\fP, \fBmodify\-server\fP and \fBremove\-server\fP subcommands for inspecting and altering the configured KMIP server settings. +.sp +Importing a public/private key stored in an HSM requires specifying the KMIP +server ID, the ID of the public key, the ID of the private key, the +DNSSEC algorithm of the key and the flags (typically 256 for a ZSK and +257 for a KSK). +.sp +Normally, keyset assumes ownership of any keys it holds. +This means that when a key is deleted from the key set, the keyset subcommand +will also delete the files that hold the public and private keys or delete the +keys from the HSM that was used to create them. +.sp +For an imported public/private key pair this is considered too dangerous +because another signer may need the keys. +For this reason keys are imported in so\-called \fBdecoupled\fP state. +When a decoupled key is deleted, only the reference to the key is deleted +from the key set, the underlying keys are left untouched. +There is a \fB\-\-coupled\fP option to tell keyset to take ownership of the key. +.SS Migration +.sp +The keyset subcommand has no direct support for migration. +Migration has to be done manually using the import commands. +The semantics of the import commands are decribed in the previous section. +This section focuses on how the import command can be used to perform a +migration. +.sp +There are three migration strategies: 1) importing the existing signer\(aqs +(private) signing keys, 2) a full multi\-signer migration and 3) +a partial multi\-signer migration. +.SS Importing the existing signer\(aqs signing keys +.sp +Importing the existing signer\(aqs public/private keys pairs is the easiest +migration mechanism. +The basic process is the following: +.INDENT 0.0 +.IP \(bu 2 +Disable (automatic) key rolls on the existing signer. +.IP \(bu 2 +Disable automatic key rolls before executing the create command. +For example by setting the KSK, ZSK, and CSK validities to \fBoff\fP\&. +.IP \(bu 2 +Import the KSK and ZSK (or CSK) as files or using KMIP between the +create and init commands. +.IP \(bu 2 +Check with tools such as ldns\-verify\-zone that the new zone is secure with +the existing DS record at the parent. +.IP \(bu 2 +Switch the downstream secondaries that serve the zone to receive the +signed zone from the new signer. +.IP \(bu 2 +Perform key rolls for the KSK and ZSK (or the CSK). +.IP \(bu 2 +(If wanted) enable automatic key rolls. +.IP \(bu 2 +Remove the zone from the old signer. +.UNINDENT +.sp +Note that after the key roll, the signer has to make sure that it +keeps access to signing keys. +In case of KMIP keys, the old signer can also delete the keys from the HSM. +For this reason it is best to perform key rolls of all keys before removing +the zone from the old signer. +.sp +This document describes key management. Care should be taken that other +parameters, such as the use of NSEC or NSEC3, are +the same (to avoid confusion) and that the SOA serial policy is the same +(to avoid problems with zone transfers). +.SS Full multi\-signer migration +.sp +The basic idea is to execute the following steps: +.INDENT 0.0 +.IP \(bu 2 +Disable (automatic) key rolls on the existing signer. +.IP \(bu 2 +If the parent supports automatic updating of the DS record using CDS/CDNSKEY +(RFC 8078) then disable the generation of CDS/CDNSKEY records on the +existing signer or disable CDS/CDNSKEY processing for this zone at the parent. +.IP \(bu 2 +Issue the create command. +.IP \(bu 2 +Disable automatic key rolls. +.IP \(bu 2 +(Disable CDS/CDNSKEY generation. Keyset cannot disable CDS/CDNSKEY generation at the moment) +.IP \(bu 2 +Import the public key of the existing signer\(aqs ZSK (or CSK) use the +\fBkeyset import public\-key\fP subcommand. +.IP \(bu 2 +Issue the init command. +.IP \(bu 2 +Make sure in the next step to only add a DS record at the parent, not +delete the existing one. +.IP \(bu 2 +Complete the initial algorithm roll. +.IP \(bu 2 +Verify using tools such as ldns\-veridy\-zone that the zone is correctly +signed. +.IP \(bu 2 +Import the public key of the new ZSK (or CSK) in the existing signer. +.IP \(bu 2 +Verify that all nameservers that serve the zone have the new ZSK in the +DNSKEY RRset of the existing signer. +.IP \(bu 2 +Transition the nameservers from the existing signer to the new signer. +.IP \(bu 2 +Let caches expire for the DNSKEY RRset of the old signer and the +zone RRSIGs of the old signer. +.IP \(bu 2 +Remove the DS record for the old signer from the parent. +.IP \(bu 2 +Remove the imported public key. +.IP \(bu 2 +(If wanted) enable automatic key rolls and generation of CDS/CDNSKEY +records. +.UNINDENT +.SS Partial multi\-signer migration +.sp +A partial multi\-signer migration is the right approach when the existing +signer cannot import the new signers ZSK. +A requirement is that the new signer can transfer the signed zone from the +existing signer and that the new signer supports so\-called \(dqpass\-through\(dq +mode. +In pass\-through mode a signer leaves signatures for zone records unchanged +but does replace the DNSKEY, CDS and CDNSKEY RRset with the ones from +this subcommand. +.sp +The basic idea is to execute the following steps: +.INDENT 0.0 +.IP \(bu 2 +Disable (automatic) key rolls on the existing signer. +.IP \(bu 2 +If the parent supports automatic updating of the DS record using CDS/CDNSKEY +(RFC 8078) then disable the generation of CDS/CDNSKEY records in the +existing signer or disable CDS/CDNSKEY processing for this zone at the parent. +.IP \(bu 2 +Issue the create command. +.IP \(bu 2 +Disable automatic key rolls. +.IP \(bu 2 +(Disable CDS/CDNSKEY generation. Keyset cannot disable CDS/CDNSKEY generation at the moment) +.IP \(bu 2 +Import the public key of the existing signer\(aqs ZSK (or CSK). +.IP \(bu 2 +Issue the init command. +.IP \(bu 2 +Switch the new signer to pass\-through mode. The signer has to transfer the +signed zone from the existing signer. +.IP \(bu 2 +Make sure in the next step to only add a DS record at the parent, not +the delete the existing one. +.IP \(bu 2 +Complete the initial algorithm roll. +.IP \(bu 2 +Verify using tools such as ldns\-veridy\-zone that the zone is correctly +signed. +.IP \(bu 2 +Transition the nameservers from the existing signer to the new signer. +.IP \(bu 2 +Let caches expire for the DNSKEY RRset of the old signer. +.IP \(bu 2 +Remove the DS record for the old signer from the parent. +.IP \(bu 2 +Switch off pass\-through mode. +.IP \(bu 2 +Let caches expire for the zone RRSIGs of the old signer. +.IP \(bu 2 +Remove the imported public key. +.IP \(bu 2 +(If wanted) enable automatic key rolls and generation of CDS/CDNSKEY +records. +.UNINDENT +.SH OPTIONS +.INDENT 0.0 +.TP +.B \-c +Configuration file. +.UNINDENT +.INDENT 0.0 +.TP +.B \-h, \-\-help +Print the help text (short summary with \fB\-h\fP, long help with +\fB\-\-help\fP). +.UNINDENT +.SH COMMANDS +.sp +The keyset subcommand provides the following commands: +.INDENT 0.0 +.IP \(bu 2 +create +.sp +Create empty configuration and state files for a domain. +.INDENT 2.0 +.TP +.B \-n +The name of the domain +.UNINDENT +.INDENT 2.0 +.TP +.B \-s +The name of the state file. +.UNINDENT +.IP \(bu 2 +init +.sp +Initialize the keyset. +If a KSK and ZSK (or a CSK) have been imported then the DNSKEY RRset will +be created and signed. +If there are no keys, then a KSK and a ZSK will be created (unless the +use\-csk option is set to true) and an algorithm roll will be started. +The init command will fail if the keyset has been initialized already. +.IP \(bu 2 +ksk, zsk, csk, and algorithm +.sp +The ksk, zsk, csk, and algorithm commands perform manual key roll steps. +These commands have the following subcommands: +.INDENT 2.0 +.IP \(bu 2 +start\-roll +.sp +Start a key roll of the type specified by the command. +.IP \(bu 2 +propagation1\-complete +.sp +Inform keyset that the changed RRsets and signatures have propagated. +Report the maximum TTL of the report actions. +.IP \(bu 2 +cache\-expired1 +.sp +Inform keyset that enough time has passed that caches should have expired. +Note that this command will fail if invoked too early. +.IP \(bu 2 +propgation2\-complete +.sp +This command is similar to propagation1\-complete. +.IP \(bu 2 +cache\-expired2 +.sp +This command is similar to +.IP \(bu 2 +roll\-done +.sp +Inform keyset that the changed RRsets and signatures have propagated +and that any wait actions have been executed successfully. +.UNINDENT +.IP \(bu 2 +import +.sp +The import command can either import a public key in a file or a +public/private key pair in either files or as KMIP references. +.INDENT 2.0 +.IP \(bu 2 +public\-key +.sp +A reference to a public key in is added to the keyset. +Imported public keys are added to the DNSKEY RRset. +.IP \(bu 2 +ksk, zsk, csk +.sp +A key pair is imported as a KSK, ZSK, or CSK. +When a key is imported, there is the question what to do when the +imported key is later deleted. +By default, keyset imports keys in \fBdecoupled\fP state. +When a decoupled key is later removed, only the reference is deleted from +the key set. +The file that contains the key is not deleted and the key is not deleted +from an HSM. +Passing the option \fB\-\-coupled\fP when importing a key, direct keyset to +take ownership of the key. +.sp +The key pair can be imported in two ways: +.INDENT 2.0 +.IP \(bu 2 +file +.sp +The argument refers to the public key. The filename of the +private key is derived from the public key unless the \fB\-\-private\-key\fP +option is used to specify the filename that holds the private key. +.INDENT 2.0 +.TP +.B \-\-coupled +Take ownership of the imported keys. +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-private\-key +Explicitly pass the name of the file that holds the private key. +.UNINDENT +.IP \(bu 2 +kmip +.sp +The argument specifies one of the KMIP servers that has been +configured using the \fBkmip add\-server\fP command. +The and arguments are the KMIP identifiers of +respectively the public key and the private key. +The DNSSEC algorithm is specified using the argument and +finally the argument (usually 256 or 257) is the value of the +flags field in the DNSKEY record for the public key. +.INDENT 2.0 +.TP +.B \-\-coupled +Take ownership of the imported keys. +.UNINDENT +.UNINDENT +.UNINDENT +.IP \(bu 2 +remove\-key +.sp +Remove a key or key pair from the key set. +The argument is the URL of the public key. +If the key is \fBcoupled\fP then the files that hold the keys are also removed +or, in case of KMIP keys, the keys are remove from the HSM. +Normally, keys are only removed when they are stale. +.INDENT 2.0 +.TP +.B \-\-force +Force a key to be removed even if the key is not stale. +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-continue +Continue when removing a key file fails or when a key cannot be removed +from an HSM. +.UNINDENT +.IP \(bu 2 +status +.sp +Provide status information about key rolls, key expiration and signature +expiration. +.INDENT 2.0 +.TP +.B \-v \-\-verbose +Make status verbose. +.UNINDENT +.IP \(bu 2 +actions +.sp +Show the actions that have to be executed for any key rolls. +.IP \(bu 2 +keys +.sp +Give detailed information about all keys in the key set. +.IP \(bu 2 +get +.sp +The the values of the following configuration variables: use\-csk, +autoremove, algorithm, ds\-algorithm, dnskey\-lifetime, cds\-lifetime. +This is a subset of all configuration variables. +.sp +Additionally, the dnskey argument returns the current DNSKEY RRset plus +signatures, cds returns the CDS and CDNSKEY RRsets plus signatures and +ds returns DS records that should be added to the parent zone. +.IP \(bu 2 +set +.sp +Set configuation variables. +Note that setting configuration variables after the create command +but before the init command can be used to affect the initial key creation. +.INDENT 2.0 +.IP \(bu 2 +use\-csk +.sp +When true, new keys will be created as CSK otherwise a KSK and a ZSK +will be created. +.IP \(bu 2 +autoremove +.sp +When true, keys that are stale will be removed automatically. +Currently there is no delay in removing keys. +.IP \(bu 2 +algorithm +.sp +Set the algorithm to be used when creating new keys. Supported values +are RSASHA256, RSASHA512, ECDSAP256SHA256, ECDSAP384SHA384, ED25519, +and ED448. +Not all values are supported for KMIP keys. +.INDENT 2.0 +.TP +.B \-b +For RSA keys, the length of the key in bits. +.UNINDENT +.IP \(bu 2 +auto\-ksk, auto\-zsk, auto\-csk, auto\-algorithm +.sp +These commands take four boolean arguments: . +When set to true, the corresponding step or steps of the key roll specified +by the command is executed automatically. +.sp +For example, \fBauto\-csk true false true false\fP means that +CSK rolls will start automatically, that the propagation1\-complete, +propagation2\-complete, and roll\-done need to be executed manually. +The cache\-expired1 and cache\-expired2 steps are executed automatically. +.IP \(bu 2 +ds\-algorithm +.sp +Set the hash algorithm to be used for generating DS records. +Possible values are \fBSHA\-256\fP and \fBSHA\-384\fP\&. +.IP \(bu 2 +dnskey\-lifetime , cds\-lifetime +.sp +When a DNSKEY RRset is signed (dnskey\-lifetime) or when CDS or CDNSKEY +RRsets are signed (cds\-lifetime), how far in the future are the signatures +set to expire. +The duration is an integer followed by a suffix, \fBs\fP or \fBsecs\fP for +seconds, \fBm\fP or \fBmins\fP for minutes, \fBh\fP or \fBhours\fP, \fBd\fP or \fBdays\fP, \fBw\fP or \fBweeks\fP\&. +.IP \(bu 2 +dnskey\-remain\-time , cds\-remain\-time +.sp +The minimum amount of remaining time that signatures for the DNSKEY RRset +(dnskey\-remain\-time) or the CDS or CDNSKEY RRsets (cds\-remain\-time) have +to be valid. +New signatures are generated when the remaining time drops below the +specified duration. +For the syntax of see \fBdnskey\-lifetime\fP\&. +.IP \(bu 2 +dnskey\-inception\-offset , cds\-inception\-offset +.sp +When generating signatures for the DNSKEY RRset (dnskey\-inception\-offset) +or the CDS and CDNSKEY RRsets (cds\-inception\-offset), set the inception +timestamp this amount in the past to compensate for clocks that are a +bit off or in the wrong time zone. +For the syntax of see \fBdnskey\-lifetime\fP\&. +.IP \(bu 2 +ksk\-validity | \fBoff\fP, zsk\-validity | \fBoff\fP, csk\-validity | \fBoff\fP +.sp +Set how long a KSK, ZSK, or CSK is considered valid. +The special value \fBoff\fP means that no limit has been set. +For the syntax of see \fBdnskey\-lifetime\fP\&. +.sp +When a key is no longer considered valid and automatic starting of the +appropriate key roll has been enabled then a key roll will start at the +next invocation of the cron command. +.sp +The status command shows which keys are no longer valid or when their +validity will end. +.IP \(bu 2 +update\-ds\-command +.sp +Set a command to to run when the DS records in the parent zone need +to be updated. +This command can, for example, alert to operator or use an API provided +by the parent zone to update the DS records automatically. +.UNINDENT +.IP \(bu 2 +show +.sp +Show all configuration variable. +.IP \(bu 2 +cron +.sp +Execute any automatic steps such a refreshing signatures or automatic steps +in key rolls. +.IP \(bu 2 +kmip +.sp +The kmip command manages the list of configured KMIP servers and the +default server to use for generating new keys. +The kmip command has the following subcommands: +.INDENT 2.0 +.IP \(bu 2 +disable +.sp +Disable use of KMIP for generating new keys. +.IP \(bu 2 +add\-server +.sp +Add a KMIP server with name and DNS name or IP address +. +The name of the server is used in a key reference to identify which KMIP +server holds the key. +.INDENT 2.0 +.TP +.B \-\-port +TCP port to connect to the KMIP server on. The default port is 5696. +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-pending +Add the server but don\(aqt make it the default. +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-credential\-store +Optional path to a JSON file to read/write username/password +credentials from/to. +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-username +Optional username to authenticate to the KMIP server as. +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-password +Optional password to authenticate to the KMIP server with. +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-client\-cert +.INDENT 7.0 +.TP +.B Optional path to a TLS certificate to authenticate to the KMIP server +with. +.UNINDENT +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-client\-key +Optional path to a private key for client certificate authentication. +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-insecure +Whether or not to accept the KMIP server TLS certificate without +verifying it. +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-server\-cert +Optional path to a TLS PEM certificate for the server. +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-ca\-cert +Optional path to a TLS PEM certificate for a Certificate Authority. +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-connect\-timeout +TCP connect timeout. Default 3 seconds. +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-read\-timeout +TCP response read timeout. Default 30 seconds. +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-write\-timeout +TCP request write timeout. Default 3 seconds. +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-max\-response\-bytes +Maximum KMIP response size to accept (in bytes). Default 8192 bytes. +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-key\-label\-prefix +Can be used to denote the s/w that created the key, and/or to indicate +which installation/environment it belongs to, e.g. dev, test, prod, +etc. +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-key\-label\-max\-bytes +Maximum label length (in bytes) permitted by the HSM. Default 32 bytes. +.UNINDENT +.IP \(bu 2 +modify\-server +.sp +Modify the settings of the server with ID . This subcommand +takes most of the options documented at \fBkmip add\-server\fP\&. +Some options have the same name but are slightly different. +There are also a few additional options. +The new and modified options are listed below. +.INDENT 2.0 +.TP +.B \-\-address +Modify the hostname or IP address of the KMIP server. +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-no\-credentials +Disable use of username / password authentication. +Note: This will remove any credentials from the credential\-store for +this server id. +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-no\-client\-auth +Disable use of TLS client certificate authentication. +.UNINDENT +.INDENT 2.0 +.TP +.B \-\-insecure +Modify whether or not to accept the KMIP server TLS certificate +without verifying it. +.UNINDENT +.IP \(bu 2 +remove\-server +.sp +Remove an existing non\-default KMIP server. +To remove the default KMIP server use \fIkmip disable\fP first. +A server cannot be removed if there are keys that reference it. +.IP \(bu 2 +set\-default\-server +.sp +Set the default KMIP server to use for key generation. +.IP \(bu 2 +get\-server +.sp +Get the details of an existing KMIP server. +.IP \(bu 2 +list\-servers +.sp +List all configured KMIP servers. +.UNINDENT +.UNINDENT +.SH AUTHOR +NLnet Labs +.SH COPYRIGHT +2024–2025, NLnet Labs +.\" Generated by docutils manpage writer. +. diff --git a/doc/manual/build/man/dnst-notify.1 b/doc/manual/build/man/dnst-notify.1 index a4b0976a..d2d9d1cc 100644 --- a/doc/manual/build/man/dnst-notify.1 +++ b/doc/manual/build/man/dnst-notify.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "DNST-NOTIFY" "1" "Jun 02, 2025" "0.1.0-rc1" "dnst" +.TH "DNST-NOTIFY" "1" "Oct 03, 2025" "0.1.0-rc2" "dnst" .SH NAME dnst-notify \- Send a NOTIFY message to a list of name servers .SH SYNOPSIS @@ -35,17 +35,30 @@ dnst-notify \- Send a NOTIFY message to a list of name servers \fBdnst notify\fP \fB[OPTIONS]\fP \fB\-z \fP \fB...\fP .SH DESCRIPTION .sp -\fBdnst notify\fP sends a NOTIFY message to the specified name servers. A name -server can be specified as a domain name or IP address. +\fBdnst notify\fP sends a NOTIFY message to the specified name servers. .sp This tells them that an updated zone is available at the primaries. It can perform TSIG signatures, and it can add a SOA serial number of the updated zone. If a server already has that serial number it will disregard the message. +.SH ARGUMENTS +.INDENT 0.0 +.TP +.B ... +One or more name servers to which NOTIFY messages will be sent, by +default on port 53. +.sp +Each name server can be specified as a domain name or IP address. +.UNINDENT .SH OPTIONS .INDENT 0.0 .TP .B \-z -The zone to send the NOTIFY for. +The zone to send the NOTIFY for. Mandatory. +.UNINDENT +.INDENT 0.0 +.TP +.B \-I
+Source IP to send the message from. .UNINDENT .INDENT 0.0 .TP diff --git a/doc/manual/build/man/dnst-nsec3-hash.1 b/doc/manual/build/man/dnst-nsec3-hash.1 index 8f94c25e..3c979993 100644 --- a/doc/manual/build/man/dnst-nsec3-hash.1 +++ b/doc/manual/build/man/dnst-nsec3-hash.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "DNST-NSEC3-HASH" "1" "Jun 02, 2025" "0.1.0-rc1" "dnst" +.TH "DNST-NSEC3-HASH" "1" "Oct 03, 2025" "0.1.0-rc2" "dnst" .SH NAME dnst-nsec3-hash \- Print out the NSEC3 hash of a domain name .SH SYNOPSIS @@ -36,6 +36,12 @@ dnst-nsec3-hash \- Print out the NSEC3 hash of a domain name .SH DESCRIPTION .sp \fBdnst nsec3\-hash\fP prints the NSEC3 hash of a given domain name. +.SH ARGUMENTS +.INDENT 0.0 +.TP +.B +The domain name to generate an NSEC3 hash for. +.UNINDENT .SH OPTIONS .INDENT 0.0 .TP @@ -61,11 +67,6 @@ in hexadecimal format. Defaults to an empty salt. Print the help text (short summary with \fB\-h\fP, long help with \fB\-\-help\fP). .UNINDENT -.INDENT 0.0 -.TP -.B \-V, \-\-version -Print the version. -.UNINDENT .SH AUTHOR NLnet Labs .SH COPYRIGHT diff --git a/doc/manual/build/man/dnst-signzone.1 b/doc/manual/build/man/dnst-signzone.1 index f8dde182..24148249 100644 --- a/doc/manual/build/man/dnst-signzone.1 +++ b/doc/manual/build/man/dnst-signzone.1 @@ -27,16 +27,18 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "DNST-SIGNZONE" "1" "Jun 02, 2025" "0.1.0-rc1" "dnst" +.TH "DNST-SIGNZONE" "1" "Oct 03, 2025" "0.1.0-rc2" "dnst" .SH NAME dnst-signzone \- Sign the zone with the given key(s) .SH SYNOPSIS .sp -\fBdnst signzone\fP \fB[OPTIONS]\fP \fB\fP \fB...\fP +\fBdnst signzone\fP \fB[OPTIONS]\fP \fB\-o \fP \fB\fP \fB...\fP .SH DESCRIPTION .sp \fBdnst signzone\fP signs the zonefile with the given key(s). .sp +Signing a zone adds DNS Security Extensions (DNSSEC) resource records +.sp Keys must be specified by their base name (usually \fBK++\fP), i.e. WITHOUT the \fB\&.private\fP or \fB\&.key\fP extension. Both \fB\&.private\fP and \fB\&.key\fP files are required. @@ -79,7 +81,7 @@ Set the inception date of signatures to this date (see .INDENT 0.0 .TP .B \-o -Set the origin for the zone. Mandatory. +Use this owner name as the apex of the zone. Mandatory. .UNINDENT .INDENT 0.0 .TP @@ -173,11 +175,6 @@ The following options can be used with \fB\-n\fP to override the default NSEC3 settings used. .INDENT 0.0 .TP -.B \-a -Specify the hashing algorithm. Defaults to SHA\-1. -.UNINDENT -.INDENT 0.0 -.TP .B \-s Specify the salt as a hex string. Defaults to \fB\-\fP, meaning empty salt. .UNINDENT diff --git a/doc/manual/build/man/dnst-update.1 b/doc/manual/build/man/dnst-update.1 index 298376c2..727a6c5b 100644 --- a/doc/manual/build/man/dnst-update.1 +++ b/doc/manual/build/man/dnst-update.1 @@ -27,53 +27,168 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "DNST-UPDATE" "1" "Jun 02, 2025" "0.1.0-rc1" "dnst" +.TH "DNST-UPDATE" "1" "Oct 03, 2025" "0.1.0-rc2" "dnst" .SH NAME dnst-update \- Send a dynamic update packet to update an IP (or delete all existing IPs) for a domain name .SH SYNOPSIS .sp -\fBdnst update\fP \fB\fP \fB[ZONE]\fP \fB\fP -\fB[ ]\fP +\fBdnst update\fP \fB[OPTIONS]\fP \fB\fP \fB\fP +.sp +\fBdnst update\fP \fB[OPTIONS]\fP \fB\fP \fBadd\fP \fB\fP \fB[RDATA]...\fP +.sp +\fBdnst update\fP \fB[OPTIONS]\fP \fB\fP \fBdelete\fP \fB\fP \fB[RDATA]...\fP +.sp +\fBdnst update\fP \fB[OPTIONS]\fP \fB\fP \fBclear\fP \fB\fP .SH DESCRIPTION .sp -\fBdnst update\fP sends a dynamic update packet to update an IP (or delete all -existing IPs) for a domain name. +\fBdnst update\fP sends an RFC 2136 Dynamic Update message to the name servers +for a zone to add, update, or delete arbitrary Resource Records for a domain +name. +.sp +The message to be sent can be optionally authenticated using a given TSIG key. +.sp +\fBdnst update [...] add\fP adds the given RRs to the domain. +.sp +\fBdnst update [...] delete\fP deletes the given RRs from the domain. It can be +used to delete individual RRs or a whole RRset. +.sp +\fBdnst update [...] clear\fP clears (deletes) all RRs of any type from the +domain name. .SH ARGUMENTS .INDENT 0.0 .TP .B -The domain name to update the IP address of +The domain name of the RR(s) to update. +.UNINDENT +.INDENT 0.0 +.TP +.B +Which action to take: add, delete, or clear. +.UNINDENT +.SH OPTIONS: +.INDENT 0.0 +.TP +.B \-c, \-\-class +Class +.sp +Defaults to IN. +.UNINDENT +.INDENT 0.0 +.TP +.B \-t, \-\-ttl +TTL in seconds or with unit suffix (s, m, h, d). +.sp +Is only used by the \fBadd\fP command and is otherwise ignored. +.sp +Defaults to 3600. +.UNINDENT +.INDENT 0.0 +.TP +.B \-s, \-\-server +The name server to send the update to. +.sp +By default, the update will be sent to the list of name servers fetched +from the zone\(aqs NS RRset as per RFC 2136. +.UNINDENT +.INDENT 0.0 +.TP +.B \-z, \-\-zone +The zone the domain name belongs to (to skip a SOA query) .UNINDENT .INDENT 0.0 .TP -.B -The zone to send the update to (if omitted, derived from SOA record) +.B \-y, \-\-tsig +TSIG credentials for the UPDATE packet .UNINDENT .INDENT 0.0 .TP -.B -The IP to update the domain with (\fBnone\fP to remove any existing IPs) +.B \-\-rrset\-exists +Require that at least one RR with the given NAME and TYPE exists. +This option can be provided multiple times, with format \fB +\fP each, to build up a list of RRs. +.sp +If the domain name is relative, it will be relative to the zone\(aqs apex. +.sp +[aliases: \-\-rrset] .UNINDENT .INDENT 0.0 .TP -.B -TSIG key name +.B \-\-rrset\-exists\-exact +Require that an RRset exists and contains exactly the RRs with the given +NAME, TYPE, and RDATA. This option can be provided multiple times, each +with one RR in zonefile format, to build up one or more RRsets that is +required to exist. CLASS and TTL can be omitted. +.sp +If the domain name is relative, it will be relative to the zone\(aqs apex. +.sp +[aliases: \-\-rrset\-exact] .UNINDENT .INDENT 0.0 .TP -.B -TSIG algorithm (e.g. \(dqhmac\-sha256\(dq) +.B \-\-rrset\-non\-existent +RRset does not exist. This option can be provided multiple times, with +format \fB \fP each, to build up a list of RRs that +specify that no RRs with a specified NAME and TYPE can exist. +.sp +If the domain name is relative, it will be relative to the zone\(aqs apex. +.sp +[aliases: \-\-rrset\-empty] .UNINDENT .INDENT 0.0 .TP -.B -Base64 encoded TSIG key data. +.B \-\-name\-in\-use +Name is in use. This option can be provided multiple times, with format +\fB\fP each, to collect a list of NAMEs that must own at least +one RR. +.sp +Note that this prerequisite is NOT satisfied by empty nonterminals. +.sp +If the domain name is relative, it will be relative to the zone\(aqs apex. +.sp +[aliases: \-\-name\-used] +.UNINDENT +.INDENT 0.0 +.TP +.B \-\-name\-not\-in\-use +Name is not in use. This option can be provided multiple times, with +format \fB\fP each, to collect a list of NAMEs that must NOT +own any RRs. +.sp +Note that this prerequisite IS satisfied by empty nonterminals. +.sp +If the domain name is relative, it will be relative to the zone\(aqs apex. +.sp +[aliases: \-\-name\-unused] .UNINDENT .INDENT 0.0 .TP .B \-h, \-\-help Print the help text (short summary with \fB\-h\fP, long help with -\fB\-\-help\fP). +\fB\-\-help\fP). Can also be used on the individual sub commands. +.UNINDENT +.SH ARGUMENTS FOR ADD AND DELETE +.INDENT 0.0 +.TP +.B +The RR type to add or delete. +.UNINDENT +.INDENT 0.0 +.TP +.B [RDATA]... +One or more RDATA arguments for \fBadd\fP, and zero or more for +\fBdelete\fP\&. +.sp +Each argument corresponds to a single RR\(aqs RDATA, so beware of (shell and +DNS) quoting rules. +.sp +Each RDATA argument will be parsed as if it was read from a zone file. +.nf +Examples: +\fBdnst update some.example.com add AAAA ::1 2001:db8::\fP +\fBdnst update some.example.com add TXT \(aq\(dqSpacious String\(dq \(dqAnother +string for the same TXT record\(dq\(aq \(aq\(dqThis is another TXT RR\(dq\(aq\fP +.fi +.sp .UNINDENT .SH AUTHOR NLnet Labs diff --git a/doc/manual/build/man/dnst.1 b/doc/manual/build/man/dnst.1 index 390b0b61..0cc6eda5 100644 --- a/doc/manual/build/man/dnst.1 +++ b/doc/manual/build/man/dnst.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "DNST" "1" "Jun 02, 2025" "0.1.0-rc1" "dnst" +.TH "DNST" "1" "Oct 03, 2025" "0.1.0-rc2" "dnst" .SH NAME dnst \- DNS Management Tools .SH SYNOPSIS @@ -42,7 +42,25 @@ managing DNS servers and DNS zones. .sp Please consult the manual pages for these individual commands for more information. -.SH DNST COMMANDS +.SH OPTIONS +.INDENT 0.0 +.TP +.B \-v, \-\-verbosity +Set the verbosity to 0\-5 or a level name (\fBoff\fP, \fBerror\fP, \fBwarn\fP, +\fBinfo\fP, \fBdebug\fP or \fBtrace\fP). Defaults to \fBwarn\fP\&. +.UNINDENT +.INDENT 0.0 +.TP +.B \-h, \-\-help +Print the help text (short summary with \fB\-h\fP, long help with +\fB\-\-help\fP). +.UNINDENT +.INDENT 0.0 +.TP +.B \-V, \-\-version +Print the version. +.UNINDENT +.SH COMMANDS .INDENT 0.0 .TP .B \fI\%dnst\-key2ds\fP (1) @@ -51,6 +69,9 @@ Generate DS RRs from the DNSKEYs in a keyfile. .B \fI\%dnst\-keygen\fP (1) Generate a new key pair for a domain name. .TP +.B \fI\%dnst\-keyset\fP (1) +Manage DNSSEC signing keys for a domain. +.TP .B \fI\%dnst\-notify\fP (1) Send a NOTIFY message to a list of name servers. .TP diff --git a/doc/manual/build/man/ldns-key2ds.1 b/doc/manual/build/man/ldns-key2ds.1 index cedbf6ed..2e8f7e5c 100644 --- a/doc/manual/build/man/ldns-key2ds.1 +++ b/doc/manual/build/man/ldns-key2ds.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "LDNS-KEY2DS" "1" "Jun 02, 2025" "0.1.0-rc1" "dnst" +.TH "LDNS-KEY2DS" "1" "Oct 03, 2025" "0.1.0-rc2" "dnst" .SH NAME ldns-key2ds \- Generate DS RRs from the DNSKEYs in a keyfile .SH SYNOPSIS diff --git a/doc/manual/build/man/ldns-keygen.1 b/doc/manual/build/man/ldns-keygen.1 index ff5156f3..2454331d 100644 --- a/doc/manual/build/man/ldns-keygen.1 +++ b/doc/manual/build/man/ldns-keygen.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "LDNS-KEYGEN" "1" "Jun 02, 2025" "0.1.0-rc1" "dnst" +.TH "LDNS-KEYGEN" "1" "Oct 03, 2025" "0.1.0-rc2" "dnst" .SH NAME ldns-keygen \- Generate a new key pair for a domain name .SH SYNOPSIS @@ -35,12 +35,13 @@ ldns-keygen \- Generate a new key pair for a domain name \fBldns\-keygen\fP \fB[OPTIONS]\fP \fB\fP .SH DESCRIPTION .sp -\fBldns\-keygen\fP is used to generate a private/public keypair. When run, it will -create 3 files; a \fB\&.key\fP file with the public DNSKEY, a \fB\&.private\fP file -with the private keydata and a \fB\&.ds\fP file with the DS record of the DNSKEY -record. +\fBldns\-keygen\fP is used to generate a private/public keypair. .sp -ldns\-keygen prints the basename for the key files: \fBK++\fP +When run, it will create 3 files; a \fB\&.key\fP file with the public DNSKEY, a +\fB\&.private\fP file with the private keydata and a \fB\&.ds\fP file with the DS +record of the DNSKEY record. +.sp +\fBldns\-keygen\fP prints the basename for the key files: \fBK++\fP .SH OPTIONS .INDENT 0.0 .TP @@ -49,6 +50,9 @@ Create a key with this algorithm. Specifying \(aqlist\(aq here gives a list of supported algorithms. Several alias names are also accepted (from older versions and other software), the list gives names from the RFC. Also the plain algorithm number is accepted. +.sp +Note: Unlike the original LDNS, this implementation does not support +creation of symmetric keys (for TSIG). .UNINDENT .INDENT 0.0 .TP diff --git a/doc/manual/build/man/ldns-notify.1 b/doc/manual/build/man/ldns-notify.1 index ec29d6e9..7bcdb434 100644 --- a/doc/manual/build/man/ldns-notify.1 +++ b/doc/manual/build/man/ldns-notify.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "LDNS-NOTIFY" "1" "Jun 02, 2025" "0.1.0-rc1" "dnst" +.TH "LDNS-NOTIFY" "1" "Oct 03, 2025" "0.1.0-rc2" "dnst" .SH NAME ldns-notify \- Send a NOTIFY message to a list of name servers .SH SYNOPSIS diff --git a/doc/manual/build/man/ldns-nsec3-hash.1 b/doc/manual/build/man/ldns-nsec3-hash.1 index 7122223a..c98bb2cb 100644 --- a/doc/manual/build/man/ldns-nsec3-hash.1 +++ b/doc/manual/build/man/ldns-nsec3-hash.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "LDNS-NSEC3-HASH" "1" "Jun 02, 2025" "0.1.0-rc1" "dnst" +.TH "LDNS-NSEC3-HASH" "1" "Oct 03, 2025" "0.1.0-rc2" "dnst" .SH NAME ldns-nsec3-hash \- Print out the NSEC3 hash of a domain name .SH SYNOPSIS diff --git a/doc/manual/build/man/ldns-signzone.1 b/doc/manual/build/man/ldns-signzone.1 index e62f6de6..380e4efc 100644 --- a/doc/manual/build/man/ldns-signzone.1 +++ b/doc/manual/build/man/ldns-signzone.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "LDNS-SIGNZONE" "1" "Jun 02, 2025" "0.1.0-rc1" "dnst" +.TH "LDNS-SIGNZONE" "1" "Oct 03, 2025" "0.1.0-rc2" "dnst" .SH NAME ldns-signzone \- Sign the zone with the given key(s) .SH SYNOPSIS @@ -75,7 +75,7 @@ file, and have the Secure Entry Point flag set. .IP \(bu 2 Public keys corresponding to \fB\&.private\fP key MUST be supplied, either as DNSKEY RRs in the given zone or as \fB\&.key\fP files. This -Implementation is not able to generate missing public keys. +implementation is not able to generate missing public keys. .IP \(bu 2 Supported DNSKEY algorithms are the ones supported by the domain crate. Supported algorithms include RSASHA256, @@ -151,14 +151,16 @@ Use NSEC3 instead of NSEC. If specified, you can use extra options (see .INDENT 0.0 .TP .B \-o -Use this as the origin for the zone (only necessary for zonefiles with -relative names and no $ORIGIN). +Use this owner name as the apex of the zone. +.sp +If not specified the owner name of the first SOA record will be used as +the apex of the zone. .UNINDENT .INDENT 0.0 .TP .B \-u Set the SOA serial in the resulting zonefile to the given number of -seconds since Jan 1st 1970. +seconds since January 1st 1970. .UNINDENT .INDENT 0.0 .TP diff --git a/doc/manual/build/man/ldns-update.1 b/doc/manual/build/man/ldns-update.1 index 3ae0cfa0..3a174431 100644 --- a/doc/manual/build/man/ldns-update.1 +++ b/doc/manual/build/man/ldns-update.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "LDNS-UPDATE" "1" "Jun 02, 2025" "0.1.0-rc1" "dnst" +.TH "LDNS-UPDATE" "1" "Oct 03, 2025" "0.1.0-rc2" "dnst" .SH NAME ldns-update \- Send a dynamic update packet to update an IP (or delete all existing IPs) for a domain name .SH SYNOPSIS diff --git a/doc/manual/source/conf.py b/doc/manual/source/conf.py index 61c02acb..b6d208d1 100644 --- a/doc/manual/source/conf.py +++ b/doc/manual/source/conf.py @@ -194,6 +194,7 @@ ('man/ldns-key2ds', 'ldns-key2ds', 'Generate DS RRs from the DNSKEYs in a keyfile', author, 1), ('man/dnst-keygen', 'dnst-keygen', 'Generate a new key pair for a domain name', author, 1), ('man/ldns-keygen', 'ldns-keygen', 'Generate a new key pair for a domain name', author, 1), + ('man/dnst-keyset', 'dnst-keyset', 'Manage DNSSEC signing keys for a domain', author, 1), ('man/dnst-notify', 'dnst-notify', 'Send a NOTIFY message to a list of name servers', author, 1), ('man/ldns-notify', 'ldns-notify', 'Send a NOTIFY message to a list of name servers', author, 1), ('man/dnst-nsec3-hash', 'dnst-nsec3-hash', 'Print out the NSEC3 hash of a domain name', author, 1), diff --git a/doc/manual/source/index.rst b/doc/manual/source/index.rst index 8a102e4a..9609cd23 100644 --- a/doc/manual/source/index.rst +++ b/doc/manual/source/index.rst @@ -26,6 +26,7 @@ It depends on OpenSSL for its cryptography related functions. man/dnst man/dnst-key2ds man/dnst-keygen + man/dnst-keyset man/dnst-notify man/dnst-nsec3-hash man/dnst-signzone diff --git a/doc/manual/source/man/dnst-keyset.rst b/doc/manual/source/man/dnst-keyset.rst new file mode 100644 index 00000000..3b4e32d5 --- /dev/null +++ b/doc/manual/source/man/dnst-keyset.rst @@ -0,0 +1,902 @@ +dnst keyset +=========== + +Synopsis +-------- + +:program:`dnst keyset` ``-c `` ``[OPTIONS]`` ```` ``[ARGS]`` + +Description +----------- + +The **keyset** subcommand manages a set of DNSSEC (`RFC 9364`_) signing keys. +This subcommand is meant to be part of a DNSSEC signing solution. +The **keyset** subcommand manages signing keys and generates a signed DNSKEY RRset. +A separate zone signer (not part of dnst) is expected to use the zone +signing keys in the key set, +sign the zone and include the DNSKEY RRset (as well as the CDS and CDNSKEY +RRsets). +The keyset subcommand supports keys stored in files and, when the dnst +program is built with the kmip feature flag, keys stored in a +Hardware Security Module (HSM) that can be accessed using the +Key Management Interoperability Protocol (KMIP). + +.. _RFC 9364: https://www.rfc-editor.org/rfc/rfc9364 + +The keyset subcommand operates on one zone at a time. +For each zone, keyset maintains a configuration file that stores configuration parameters for +key generation (which algorithm to use, whether to use a CSK or a +KSK and ZSK pair), parameters for key rolls (whether key rolls are automatic +or not), the lifetimes of keys and signatures, etc. +The keyset subcommand also maintains a state file for each zone. +The state file lists the keys in the key set, the current key roll state, +and has the DNSKEY, CDS, and CDNSKEY RRsets. +key generation (which algorithm to use, whether to use a CSK and a +KSK and a ZSK), parameters for key rolls (whether key rolls are automatic +or not), the lifetimes of keys and signatures, etc. +The keyset subcommand also maintains state file for each zone. +The state file lists the keys in the key set, the current key roll state, +and has the DNSKEY, CDS, and CDNSKEY RRsets. + +In addition to the configuration and state files, keyset maintains files for +keys that are stored on in the filesystem. +Additionally, keyset can optionally maintain a credentials file that +contains user names and passwords for the KMIP connections. + +The keyset subcommand uses the Keyset type from the Rust Domain crate to store +the set of keys together with their properties such as whether a key +should sign the zone, timestamps when keys are created or become stale. +The Keyset data type also implements the basic logic of key rolls. + +The keyset subcommand supports importing existing keys, both standalone +public keys as well as public/private key pairs can be imported. +A standalone public key can only be imported from a file whereas public/private +key pairs can be either files or references to keys stored in an HSM. +Note that the public and private key either need to be both files or both +stored in an HSM. + +Signer +^^^^^^ + +The zone signer is expected to read the state file that is maintained by +keyset to find the current zone signing keys, to find the signed +DNSKEY/CDS/CDNSKEY RRset and to find the KMIP configuration. + +See for a description of the state file. + +The signer needs to poll the state file for changes. +If the signer is in full control of running keyset, then the state file needs +to be checked for changes after running keyset with commands the can +potientially change the state file (status subcommands, etc. do not change +the state file). +If however keyset can be invoked independently of the signer then the signer needs +to periodically check for changes, for example, at least every hour. + +Cron +~~~~ + +The signatures of the DNSKEY, CDS and CDNSKEY RRsets need to updated +periodically. +In addition, key roll automation requires periodic invocation of keyset +to start new key rolls and to make progress on ones that are currently +executing. + +For this purpose, keyset has a cron subcommand. +This subcommand handles any house keeping work that needs to be done. +The cron subcommand can be executed at regular times, for example, +once an hour from the cron(1) utility. + +However, keyset also maintains a field in the state file, called +``cron-next``, that specifies when the cron subcommand should be run next. +Running the cron subcommand early is fine, the current time is compared +again the ``cron-next`` field and the subcommand exits early if +``cron-next`` is in the future. +Running the cron subcommand late may cause signatures to expire. + +Create / Init +~~~~~~~~~~~~~ + +The initialisation of a key set for a zone consists of two steps. +First the create subcommand create a configuration file with mostly default +values and a state file without any keys. +The init subcommand finishes the initialisation. + +This two step procedure allows configuration parameters to be set between +the create and the init subcommand, for example, the algorithm to use. +It also allows existing public/private key pairs to be imported. + +The init subcommand checks if any public/private key pairs have been imported. +If so, init checks if both a both roles (KSK and ZSK) are present. +A single CSK combines both rolls. +Absent a CSK, both a KSK and a ZSK need to be present otherwise the init command +fails. +Any imported public keys are ignored by init. + +If no public/private key pairs have been imported then the init subcommand +will start an algorithm roll. +The algorithm roll will create new keys based on the current configuration: +either as files or in an HSM and either a CSK or a pair of KSK and ZSK. + +Key Rolls +~~~~~~~~~ + +The keyset subcommand can perform four different types of key rolls: +KSK rolls, ZSK rolls, CSK rolls and algorithm rolls. +A KSK roll replaces one KSK with a new KSK. +Similarly, a ZSK roll replaces one ZSK with a new ZSK. +A CSK roll also replaces a CSK with a new CSK but the roll also treats a +pair of KSK and ZSK keys as equivalent to a CSK. +So a CSK roll can also roll from KSK plus ZSK to a new CSK or from a CSK +to new a KSK and ZSK pair. +Note that a roll from KSK plus ZSK to a new KSK plus ZSK pair +is also supported. +Finally, an algorithm roll is similar to a CSK roll, but designed in +a specific way to handle the case where the new key or keys have an algorithm +that is different from one used by the current signing keys. + +The KSK and ZSK rolls are completely independent and can run in parallel. +Consistency checks are performed at the start of a key roll. +For example, a KSK key roll cannot start when another KSK is in progress or +when a CSK or algorithm roll is in progress. +A KSK roll cannot start either when the current signing key is a CSK or +when the configuration specifies that the new signing key has to be a CSK. +Finally, KSK rolls are also prevented when the algorithm for new keys is +different from the one used by the current key. +Similar limitations apply to the other roll types. Note however that an +algorithm roll can be started even when it is not needed. + +A key roll consists of six steps: ``start-roll``, ``propagation1-complete``, +``cache-expired1``, ``propagation2-complete``, ``cache-expired2``, and +``roll-done``. +For each key roll these six steps follow in the same order. +Associated which each step is a (possibly empty) list of actions. +Actions fall in three categories. +The first category consists of actions that require updating the zone or the +parent zone. +The second category consists of actions that require checking if changes +have propagated to all nameservers and require reporting of the +TTLs of the changed RRset as seen at the nameservers. +Finally, the last category requires waiting for changes to propagate to +all nameservers but there is no need to report the TTL. + +Typically, in a list of actions, an action of the first category is paired +with one from the second of third category. +For example, ``UpdateDnskeyRrset`` is paired with either +``ReportDnskeyPropagated`` or ``WaitDnskeyPropagated``. + +A key roll starts with the ``start-roll`` step, which creates new keys. +The next step, ``propagation1-complete`` has a TTL argument which is the +maximum of the TTLs of the Report actions. +The ``cache-expired1`` and ``cache-expired2`` have no associated actions. +They simply require waiting for the TTL (in seconds) reported by the +previous ``propagation1-complete`` or ``propagation2-complete``. +The ``propagation2-complete`` step is similar to the ``propagation1-complete`` step. +Finally, the ``roll-done`` step typically has associated Wait actions. +These actions are cleanup actions and are harmless but confusing if they +are skipped. + +The keyset subcommand provides fine grained control over automation. +Automation is configured separately for each of the four roll types. +For each roll type, there are four booleans called ``start``, ``report``, +``expire`` and ``done``. + +When set, the ``start`` boolean directs the cron subcommand to start a key roll +when a relevant key has expired. +A KSK or a ZSK key roll can start automatically if respectively a KSK or a ZSK +has expired. +A CSK roll can start automatically when a CSK has expired but also when a KSK or +ZSK has expired and the new key will be a CSK. +Finally, an algorithm roll can start automatically when the new algorithm is +different from the one used by the existing keys and any key has expired. + +The ``report`` flags control the automation of the ``propagation1-complete`` +and ``propagation2-complete`` steps. +When enabled, the cron subcommand contacts the nameservers of the zone or +(in the case of ``ReportDsPropagated``, the nameservers of the parent zone) +to check if changes have propagated to all nameservers. +The check obtains the list of nameservers from the apex of the (parent) zone +and collects all IPv4 and IPv6 addresses. +For the ``ReportDnskeyPropagated`` and ``ReportDsPropagated`` actions, each address is +the queried to see if the DNSKEY RRset or DS RRset match +the KSKs. +The ``ReportRrsigPropagated`` action is more complex. +First the entire zone is transferred from the primary nameserver listed in the +SOA record. +Then all relevant signatures are checked if they have the expected key tags. +The maximum TTL in the zone is recorded to be reported. +Finally, all addresses of listed nameservers are checked to see if they +have a SOA serial that is greater than or equal to the one that was checked. + +Automation of ``cache-expired1`` and ``cache-expired2`` is enabled by the +``expire`` boolean. +When enabled, the cron subcommand simply checks if enough time has passed +to invoke ``cache-expired1`` or ``cache-expired2``. + +Finally the ``done`` boolean enables automation of the ``roll-done`` step. +This automation is very similar to the ``report`` automation. +The only difference is that the Wait actions are automated so propagation +is tracked but no TTL is reported. + +Fine grained control of over automation makes it possible to automate +KSK or algorithm without starting them automatically. +Or let a key roll progress automatically except that the ``cache-expired`` +steps must be done manually in order to be able to insert extra manual steps. + +The ``report`` and ``done`` automations require that keyset has network access +to all nameservers of the zone and all nameservers of the parent. + +HSM Support (KMIP) +~~~~~~~~~~~~~~~~~~ + +The keyset subcommand supports keys in Hardware Security Modules (HSM) through +the KMIP protocol. +The most common way to access keys in HSMs is through the PKCS #11 interface. +The PKCS #11 interface involves loading a shared library into the process +that needs to access the HSM. +This is unattractive for two reasons: + +1) Loading an arbitrary (binary) shared libary negates the memory security + features of an application written in Rust. A mistake in the shared library + could corrupt memory that is used by the application. For this reason it is + attractive to load the shared library into a separate process. + +2) Setting up the run-time environment of the shared library is often complex. + The library may require specific environment variables or access to specific + files or devices. This complexity impacts every application that wants + to use the shared library. + +For these reasons it was decided to write a separate program, called +kmip2kpcs11, that uses the PKCS #11 standard to have access to an HSM and +provides a KMIP server interface. This makes it possible to contain both +the configuration complexity and the possibility of memory corruption in +a single program. +Other programs, such as the keyset subcommand then use the KMIP protocol to +indirectly access the HSM via the kmip2kpcs11 program. +Support for the KMIP protocol also makes it possible to directly connect to +KMIP capable HSMs. + +The keyset subcommand stores two pieces of KMIP configuration. +The first is a list of KMIP servers. +Each KMIP server has a ``server ID`` that is used in key references to specify +in which server the key is stored. +A server also has a DNS name or IP address and a port to connect to the server. +The second piece of configuration is the ID of the server to be used for +creating new keys. +It is possible to specify that no server is to be used for new keys, in that +case new keys will be created by keyset and stored as files. + +Authentication can be done either with a user name and password or with +a client-side certificate. +The user name and password are KMIP concepts that are mapped by the kmip2pkcs11 +server to a PKCS #11 slot or token name and the PIN. +With this approach the kmip2pkcs11 server des not have to store secrets +that provide access to the HSM. +User names and passwords are stored in a separate file to avoid storing +secrets in the keyset configuration or state files. + +Unlike other configuration, the list of KMIP servers is stored in the state +file. +The reason for doing that is that signers also need the same KMIP server list +to be able to sign a zone. +By storing the server list in the state file, a signer has to read only the +state file to be able to use KMIP keys. + +Options that can be configured for a server include not checking the +server's certificate, specifying the server's certificate or certificate +authority, various connection parameters such as connect timeout, read +timeout, write timeout and maximum response size. + +When generating new keys, the label of the key can have a user supplied prefix. +This can be used, for example, to show that a key is for +development or testing. +Finally, some HSMs allow longer labels than others. +On HSMs that allow longer labels than the 32 character default, raising the +maximum label length can avoid truncation for longer domain names. +On HSMs that have a limit that is lower than the default, setting the correct +length avoids errors when creating keys. + +Importing Keys +~~~~~~~~~~~~~~ + +There are three basic ways to import exiting keys: public-key, +a public/private key pair from files or a public/private key pair in an HSM. + +A public key can only be imported from a file. +When the key is imported the name of the file is converted to a URL and stored in the key set and +the key will be included in the DNSKEY RRset. +This is useful for certain migrations and to manually implement a +multi-signer DNSSEC signing setup. +Note that automation does not work for this case. + +A public/private key pair can be imported from files. +It is sufficient to give the name of the file that holds the public key if +the filename ends in ``.key`` and the filename of the private key is the +same except that it ends in ``.private``. +If this is not the case then the private key filename must be specified +separately. + +In order to use keys stored in a HSM the ``dnst keyset kmip add-server`` +subcommand must first be used to associate the KMIP server connection settings +with a user defined server ID. + +The first server defined becomes the default. If a default KMIP server has +been defined it will be used to generate all future keys, unless the ``dnst +keyset kmip disable`` command is issued. If more than one KMIP server is +defined, only one can be the default server at any time. Use the ``dnst +keyset kmip set-default`` command to change which KMIP server will be used +to generate future keys. Note that like all ``dnst keyset`` subcommands, the +KMIP subcommands set behaviour for a single zone. Additionally there are +``list-servers``, ``get-server``, ``modify-server`` and ``remove-server`` +subcommands for inspecting and altering the configured KMIP server settings. + +Importing a public/private key stored in an HSM requires specifying the KMIP +server ID, the ID of the public key, the ID of the private key, the +DNSSEC algorithm of the key and the flags (typically 256 for a ZSK and +257 for a KSK). + + +Normally, keyset assumes ownership of any keys it holds. +This means that when a key is deleted from the key set, the keyset subcommand +will also delete the files that hold the public and private keys or delete the +keys from the HSM that was used to create them. + +For an imported public/private key pair this is considered too dangerous +because another signer may need the keys. +For this reason keys are imported in so-called ``decoupled`` state. +When a decoupled key is deleted, only the reference to the key is deleted +from the key set, the underlying keys are left untouched. +There is a ``--coupled`` option to tell keyset to take ownership of the key. + + +Migration +~~~~~~~~~ + +The keyset subcommand has no direct support for migration. +Migration has to be done manually using the import commands. +The semantics of the import commands are described in the previous section. +This section focuses on how the import command can be used to perform a +migration. + +There are three migration strategies: 1) importing the existing signer's +(private) signing keys, 2) a full multi-signer migration and 3) +a partial multi-signer migration. + +Importing the existing signer's signing keys +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Importing the existing signer's public/private keys pairs is the easiest +migration mechanism. +The basic process is the following: + +* Disable (automatic) key rolls on the existing signer. + +* Disable automatic key rolls before executing the create command. + For example by setting the KSK, ZSK, and CSK validities to ``off``. + +* Import the KSK and ZSK (or CSK) as files or using KMIP between the + create and init commands. + +* Check with tools such as ldns-verify-zone that the new zone is secure with + the existing DS record at the parent. + +* Switch the downstream secondaries that serve the zone to receive the + signed zone from the new signer. + +* Perform key rolls for the KSK and ZSK (or the CSK). + +* (If wanted) enable automatic key rolls. + +* Remove the zone from the old signer. + +Note that after the key roll, the signer has to make sure that it +keeps access to signing keys. +In case of KMIP keys, the old signer can also delete the keys from the HSM. +For this reason it is best to perform key rolls of all keys before removing +the zone from the old signer. + +This document describes key management. Care should be taken that other +parameters, such as the use of NSEC or NSEC3, are +the same (to avoid confusion) and that the SOA serial policy is the same +(to avoid problems with zone transfers). + +Full multi-signer migration +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The basic idea is to execute the following steps: + +* Disable (automatic) key rolls on the existing signer. + +* If the parent supports automatic updating of the DS record using CDS/CDNSKEY + (RFC 8078) then disable the generation of CDS/CDNSKEY records on the + existing signer or disable CDS/CDNSKEY processing for this zone at the parent. + +* Issue the create command. + +* Disable automatic key rolls. + +* (Disable CDS/CDNSKEY generation. Keyset cannot disable CDS/CDNSKEY generation at the moment) + +* Import the public key of the existing signer's ZSK (or CSK) use the + ``keyset import public-key`` subcommand. + +* Issue the init command. + +* Make sure in the next step to only add a DS record at the parent, not + delete the existing one. + +* Complete the initial algorithm roll. + +* Verify using tools such as ldns-verify-zone that the zone is correctly + signed. + +* Import the public key of the new ZSK (or CSK) in the existing signer. + +* Verify that all nameservers that serve the zone have the new ZSK in the + DNSKEY RRset of the existing signer. + +* Transition the nameservers from the existing signer to the new signer. + +* Let caches expire for the DNSKEY RRset of the old signer and the + zone RRSIGs of the old signer. + +* Remove the DS record for the old signer from the parent. + +* Remove the imported public key. + +* (If wanted) enable automatic key rolls and generation of CDS/CDNSKEY + records. + +Partial multi-signer migration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A partial multi-signer migration is the right approach when the existing +signer cannot import the new signers ZSK. +A requirement is that the new signer can transfer the signed zone from the +existing signer and that the new signer supports so-called "pass-through" +mode. +In pass-through mode a signer leaves signatures for zone records unchanged +but does replace the DNSKEY, CDS and CDNSKEY RRset with the ones from +this subcommand. + +The basic idea is to execute the following steps: + +* Disable (automatic) key rolls on the existing signer. + +* If the parent supports automatic updating of the DS record using CDS/CDNSKEY + (RFC 8078) then disable the generation of CDS/CDNSKEY records in the + existing signer or disable CDS/CDNSKEY processing for this zone at the parent. + +* Issue the create command. + +* Disable automatic key rolls. + +* (Disable CDS/CDNSKEY generation. Keyset cannot disable CDS/CDNSKEY generation at the moment) + +* Import the public key of the existing signer's ZSK (or CSK). + +* Issue the init command. + +* Switch the new signer to pass-through mode. The signer has to transfer the + signed zone from the existing signer. + +* Make sure in the next step to only add a DS record at the parent, not + the delete the existing one. + +* Complete the initial algorithm roll. + +* Verify using tools such as ldns-verify-zone that the zone is correctly + signed. + +* Transition the nameservers from the existing signer to the new signer. + +* Let caches expire for the DNSKEY RRset of the old signer. + +* Remove the DS record for the old signer from the parent. + +* Switch off pass-through mode. + +* Let caches expire for the zone RRSIGs of the old signer. + +* Remove the imported public key. + +* (If wanted) enable automatic key rolls and generation of CDS/CDNSKEY + records. + +Options +------- + +.. option:: -c + + Configuration file. + +.. + .. option:: -v + + Enable verbose output. + +.. option:: -h, --help + + Print the help text (short summary with ``-h``, long help with + ``--help``). + +Commands +-------- + +The keyset subcommand provides the following commands: + +* create + + Create empty configuration and state files for a domain. + + .. option:: -n + + The name of the domain for which signing keys will be managed. + + .. option:: -s + + The name of the state file. + +* init + + Initialize the keyset. + If a KSK and ZSK (or a CSK) have been imported then the DNSKEY RRset will + be created and signed. + If there are no keys, then a KSK and a ZSK will be created (unless the + use-csk option is set to true) and an algorithm roll will be started. + The init command will fail if the keyset has been initialized already. + +* ksk, zsk, csk, and algorithm + + The ksk, zsk, csk, and algorithm commands perform manual key roll steps. + These commands have the following subcommands: + + * start-roll + + Start a key roll of the type specified by the command. + + * propagation1-complete + + Inform keyset that the changed RRsets and signatures have propagated. + Report the maximum TTL of the report actions. + + * cache-expired1 + + Inform keyset that enough time has passed that caches should have expired. + Note that this command will fail if invoked too early. + + * propagation2-complete + + This command is similar to propagation1-complete. + + * cache-expired2 + + This command is similar to cache-expired1. + + * roll-done + + Inform keyset that the changed RRsets and signatures have propagated + and that any wait actions have been executed successfully. + +* import + + The import command can either import a public key in a file or a + public/private key pair in either files or as KMIP references. + + * public-key + + A reference to a public key in is added to the keyset. + Imported public keys are added to the DNSKEY RRset. + + * ksk, zsk, csk + + A key pair is imported as a KSK, ZSK, or CSK. + When a key is imported, there is the question what to do when the + imported key is later deleted. + By default, keyset imports keys in ``decoupled`` state. + When a decoupled key is later removed, only the reference is deleted from + the key set. + The file that contains the key is not deleted and the key is not deleted + from an HSM. + Passing the option ``--coupled`` when importing a key, directs keyset to + take ownership of the key. + + The key pair can be imported in two ways: + + * file + + The argument refers to the public key. The filename of the + private key is derived from the public key unless the ``--private-key`` + option is used to specify the filename that holds the private key. + + .. option:: --coupled + + Take ownership of the imported keys. + + .. option:: --private-key + + Explicitly pass the name of the file that holds the private key. + + * kmip + + The argument specifies one of the KMIP servers that has been + configured using the ``kmip add-server`` command. + The and arguments are the KMIP identifiers of + the public key and the private key respectively. + The DNSSEC algorithm is specified using the argument and + finally the argument (usually 256 or 257) is the value of + the flags field in the DNSKEY record for the public key. + + .. option:: --coupled + + Take ownership of the imported keys. + +* remove-key + + Remove a key or key pair from the key set. + The argument is the URL of the public key. + If the key is ``coupled`` then the files that hold the keys are also removed + or, in case of KMIP keys, the keys are removed from the HSM. + Normally, keys are only removed when they are stale. + + .. option:: --force + + Force a key to be removed even if the key is not stale. + + .. option:: --continue + + Continue when removing a key file fails or when a key cannot be removed + from an HSM. + +* status + + Provide status information about key rolls, key expiration and signature + expiration. + + .. option:: -v --verbose + + Make status verbose. + +* actions + + Show the actions that have to be executed for any key rolls. + +* keys + + Give detailed information about all keys in the key set. + +* get + + Get the values of the following configuration variables: use-csk, + autoremove, algorithm, ds-algorithm, dnskey-lifetime, cds-lifetime. + This is a subset of all configuration variables. + + Additionally, the dnskey argument returns the current DNSKEY RRset plus + signatures, cds returns the CDS and CDNSKEY RRsets plus signatures and + ds returns DS records that should be added to the parent zone. + +* set + + Set configuation variables. + Note that setting configuration variables after the create command but + before the init command can be used to affect the initial key creation. + + * use-csk + + When true, new keys will be created as CSK otherwise a KSK and a ZSK + will be created. + + * autoremove + + When true, keys that are stale will be removed automatically. + Currently there is no delay in removing keys. + + * algorithm + + Set the algorithm to be used when creating new keys. Supported values + are RSASHA256, RSASHA512, ECDSAP256SHA256, ECDSAP384SHA384, ED25519, + and ED448. + Not all values are supported for KMIP keys. + + .. option:: -b + + For RSA keys, the length of the key in bits. + + * auto-ksk, auto-zsk, auto-csk, auto-algorithm + + These commands take four boolean arguments: . + When set to true, the corresponding step or steps of the key roll specified + by the command are executed automatically. + + For example, ``auto-csk true false true false`` means that + CSK rolls will start automatically, that the propagation1-complete, + propagation2-complete, and roll-done need to be executed manually. + The cache-expired1 and cache-expired2 steps are executed automatically. + + * ds-algorithm + + Set the hash algorithm to be used for generating DS records. + Possible values are ``SHA-256`` and ``SHA-384``. + + * dnskey-lifetime , cds-lifetime + + When a DNSKEY RRset is signed (dnskey-lifetime) or when CDS or CDNSKEY + RRsets are signed (cds-lifetime), how far in the future are the signatures + set to expire. + The duration is an integer followed by a suffix, ``s`` or ``secs`` for + seconds, ``m`` or ``mins`` for minutes, ``h`` or ``hours``, ``d`` or ``days``, ``w`` or ``weeks``. + + * dnskey-remain-time , cds-remain-time + + The minimum amount of remaining time that signatures for the DNSKEY RRset + (dnskey-remain-time) or the CDS or CDNSKEY RRsets (cds-remain-time) have + to be valid. + New signatures are generated when the remaining time drops below the + specified duration. + For the syntax of see ``dnskey-lifetime``. + + * dnskey-inception-offset , cds-inception-offset + + When generating signatures for the DNSKEY RRset (dnskey-inception-offset) + or the CDS and CDNSKEY RRsets (cds-inception-offset), set the inception + timestamp this amount in the past to compensate for clocks that are a + bit off or in the wrong time zone. + For the syntax of see ``dnskey-lifetime``. + + * ksk-validity | ``off``, zsk-validity | ``off``, csk-validity | ``off`` + + Set how long a KSK, ZSK, or CSK is considered valid. + The special value ``off`` means that no limit has been set. + For the syntax of see ``dnskey-lifetime``. + + When a key is no longer considered valid and automatic starting of the + appropriate key roll has been enabled then a key roll will start at the + next invocation of the cron command. + + The status command shows which keys are no longer valid or when their + validity will end. + + * update-ds-command + + Set a command to to run when the DS records in the parent zone need + to be updated. + This command can, for example, alert the operator or use an API provided + by the parent zone to update the DS records automatically. + +* show + + Show all configuration variables. + +* cron + + Execute any automatic steps such a refreshing signatures or automatic steps + in key rolls. + +* kmip + + The kmip command manages the list of configured KMIP servers and the + default server to use for generating new keys. + The kmip command has the following subcommands: + + * disable + + Disable use of KMIP for generating new keys. + + * add-server + + Add a KMIP server with name and DNS name or IP address + . + The name of the server is used in a key reference to identify which KMIP + server holds the key. + + .. option:: --port + + TCP port to connect to the KMIP server on. The default port is 5696. + + .. option:: --pending + + Add the server but don't make it the default. + + .. option:: --credential-store + + Optional path to a JSON file to read/write username/password + credentials from/to. + + .. option:: --username + + Optional username to authenticate to the KMIP server as. + + .. option:: --password + + Optional password to authenticate to the KMIP server with. + + .. option:: --client-cert + + Optional path to a TLS certificate to authenticate to the KMIP server + with. + + .. option:: --client-key + + Optional path to a private key for client certificate authentication. + + .. option:: --insecure + + Accept the KMIP server TLS certificate without verifying it. + + .. option:: --server-cert + + Optional path to a TLS PEM certificate for the server. + + .. option:: --ca-cert + + Optional path to a TLS PEM certificate for a Certificate Authority. + + .. option:: --connect-timeout + + TCP connect timeout. Default 3 seconds. + + .. option:: --read-timeout + + TCP response read timeout. Default 30 seconds. + + .. option:: --write-timeout + + TCP request write timeout. Default 3 seconds. + + .. option:: --max-response-bytes + + Maximum KMIP response size to accept (in bytes). Default 8192 bytes. + + .. option:: --key-label-prefix + + Can be used to denote the s/w that created the key, and/or to indicate + which installation/environment it belongs to, e.g. dev, test, prod, + etc. + + .. option:: --key-label-max-bytes + + Maximum label length (in bytes) permitted by the HSM. Default 32 bytes. + + * modify-server + + Modify the settings of the server with ID . This subcommand + takes most of the options documented at ``kmip add-server``. + Some options have the same name but are slightly different. + There are also a few additional options. + The new and modified options are listed below. + + .. option:: --address + + Modify the hostname or IP address of the KMIP server. + + .. option:: --no-credentials + + Disable use of username / password authentication. + Note: This will remove any credentials from the credential-store for + this server id. + + .. option:: --no-client-auth + + Disable use of TLS client certificate authentication. + + .. option:: --insecure + + Modify whether or not to accept the KMIP server TLS certificate + without verifying it. + + * remove-server + + Remove an existing non-default KMIP server. + To remove the default KMIP server use `kmip disable` first. + A server cannot be removed if there are keys that reference it. + + * set-default-server + + Set the default KMIP server to use for key generation. + + * get-server + + Get the details of an existing KMIP server. + + * list-servers + + List all configured KMIP servers. diff --git a/doc/manual/source/man/dnst.rst b/doc/manual/source/man/dnst.rst index a5104b61..ce816661 100644 --- a/doc/manual/source/man/dnst.rst +++ b/doc/manual/source/man/dnst.rst @@ -47,6 +47,10 @@ Commands Generate a new key pair for a domain name. + :doc:`dnst-keyset ` (1) + + Manage DNSSEC signing keys for a domain. + :doc:`dnst-notify ` (1) Send a NOTIFY message to a list of name servers. diff --git a/pkg/debian/postinst b/pkg/debian/postinst index 86017a67..cb917632 100644 --- a/pkg/debian/postinst +++ b/pkg/debian/postinst @@ -1,13 +1,13 @@ #!/bin/sh -e -case "$1" in -configure) - # Create symbolic links - ln -f -s /usr/bin/dnst /usr/bin/ldns-keygen - ln -f -s /usr/bin/dnst /usr/bin/ldns-key2ds - ln -f -s /usr/bin/dnst /usr/bin/ldns-nsec3-hash - ln -f -s /usr/bin/dnst /usr/bin/ldns-notify - ln -f -s /usr/bin/dnst /usr/bin/ldns-signzone - ln -f -s /usr/bin/dnst /usr/bin/ldns-update - ;; -esac +# case "$1" in +# configure) +# # Create symbolic links +# ln -f -s /usr/bin/dnst /usr/bin/ldns-keygen +# ln -f -s /usr/bin/dnst /usr/bin/ldns-key2ds +# ln -f -s /usr/bin/dnst /usr/bin/ldns-nsec3-hash +# ln -f -s /usr/bin/dnst /usr/bin/ldns-notify +# ln -f -s /usr/bin/dnst /usr/bin/ldns-signzone +# ln -f -s /usr/bin/dnst /usr/bin/ldns-update +# ;; +# esac diff --git a/pkg/debian/prerm b/pkg/debian/prerm index 5644fb2f..552dbf84 100644 --- a/pkg/debian/prerm +++ b/pkg/debian/prerm @@ -1,8 +1,8 @@ #!/bin/sh -e -case "$1" in -upgrade|remove) - # Remove symbolic links - rm -f /usr/bin/ldns-nsec3-hash - ;; -esac \ No newline at end of file +# case "$1" in +# upgrade|remove) +# # Remove symbolic links +# rm -f /usr/bin/ldns-nsec3-hash +# ;; +# esac diff --git a/pkg/rpm/scriptlets.toml b/pkg/rpm/scriptlets.toml index 8aae9b97..838ec664 100644 --- a/pkg/rpm/scriptlets.toml +++ b/pkg/rpm/scriptlets.toml @@ -1,40 +1,38 @@ post_trans_script = ''' #!/bin/bash -e -# This script will be run _after_ upgrade in the presence of an obsoleted ldns-utils -# package. This is useful because the ldns-utils uninstall script will have been run -# _after_ installation of dnst and so the symbolic links that we create will be -# removed, and we have to make sure here that they get put back - -FORCE= -# See: https://docs.fedoraproject.org/en-US/packaging-guidelines/Scriptlets/#_syntax -if [ $1 -eq 2 ]; then - # Upgrade - FORCE=-f -fi - -# Create symbolic links -ln -f -s ${FORCE} /usr/bin/dnst /usr/bin/ldns-keygen -ln -f -s ${FORCE} /usr/bin/dnst /usr/bin/ldns-key2ds -ln -f -s ${FORCE} /usr/bin/dnst /usr/bin/ldns-nsec3-hash -ln -f -s ${FORCE} /usr/bin/dnst /usr/bin/ldns-notify -ln -f -s ${FORCE} /usr/bin/dnst /usr/bin/ldns-signzone -ln -f -s ${FORCE} /usr/bin/dnst /usr/bin/ldns-update +# # This script will be run _after_ upgrade in the presence of an obsoleted ldns-utils +# # package. This is useful because the ldns-utils uninstall script will have been run +# # _after_ installation of dnst and so the symbolic links that we create will be +# # removed, and we have to make sure here that they get put back +# +# FORCE= +# # See: https://docs.fedoraproject.org/en-US/packaging-guidelines/Scriptlets/#_syntax +# if [ $1 -eq 2 ]; then +# # Upgrade +# FORCE=-f +# fi +# +# # Create symbolic links +# ln -f -s ${FORCE} /usr/bin/dnst /usr/bin/ldns-keygen +# ln -f -s ${FORCE} /usr/bin/dnst /usr/bin/ldns-key2ds +# ln -f -s ${FORCE} /usr/bin/dnst /usr/bin/ldns-nsec3-hash +# ln -f -s ${FORCE} /usr/bin/dnst /usr/bin/ldns-notify +# ln -f -s ${FORCE} /usr/bin/dnst /usr/bin/ldns-signzone +# ln -f -s ${FORCE} /usr/bin/dnst /usr/bin/ldns-update ''' post_uninstall_script = ''' #!/bin/bash -e -#RPM_SYSTEMD_MACROS# - -# See: https://docs.fedoraproject.org/en-US/packaging-guidelines/Scriptlets/#_syntax -if [ $1 -eq 0 ] ; then - # Uninstallation - # Remove symbolic links - rm -f /usr/bin/ldns-keygen - rm -f /usr/bin/ldns-key2ds - rm -f /usr/bin/ldns-nsec3-hash - rm -f /usr/bin/ldns-notify - rm -f /usr/bin/ldns-signzone - rm -f /usr/bin/ldns-update -fi +# # See: https://docs.fedoraproject.org/en-US/packaging-guidelines/Scriptlets/#_syntax +# if [ $1 -eq 0 ] ; then +# # Uninstallation +# # Remove symbolic links +# rm -f /usr/bin/ldns-keygen +# rm -f /usr/bin/ldns-key2ds +# rm -f /usr/bin/ldns-nsec3-hash +# rm -f /usr/bin/ldns-notify +# rm -f /usr/bin/ldns-signzone +# rm -f /usr/bin/ldns-update +# fi ''' diff --git a/pkg/rules/packages-to-build.yml b/pkg/rules/packages-to-build.yml index 31358c02..e37f8092 100644 --- a/pkg/rules/packages-to-build.yml +++ b/pkg/rules/packages-to-build.yml @@ -2,7 +2,7 @@ # workflow inputs. --- pkg: - - 'dnst' + - 'cascade-dnst' image: - "ubuntu:focal" # ubuntu/20.04 - "ubuntu:jammy" # ubuntu/22.04 @@ -10,24 +10,26 @@ image: - "debian:buster" # debian/10 - "debian:bullseye" # debian/11 - "debian:bookworm" # debian/12 - - 'rockylinux:8' # compatible with EOL centos:8 - - 'rockylinux:9' + - "debian:trixie" # debian/13 + - 'almalinux:8' # compatible with EOL centos:8 + - 'almalinux:9' + - 'almalinux:10' target: - 'x86_64' include: # package for the Raspberry Pi 4b as an ARMv7 cross compiled variant of the Debian Bullseye upon which # Raspbian 11 is based. - - pkg: 'dnst' + - pkg: 'cascade-dnst' image: 'debian:bullseye' target: 'armv7-unknown-linux-musleabihf' # package for the Raspberry Pi 1b as an ARMv6 cross compiled variant of the Debian Buster upon which # Raspbian 10 is based. - - pkg: 'dnst' + - pkg: 'cascade-dnst' image: 'debian:buster' target: 'arm-unknown-linux-musleabihf' # package for the ROCK64 as an AARCH64 cross compiled variant of Debian Buster upon which Armbian 21 is based. - - pkg: 'dnst' + - pkg: 'cascade-dnst' image: 'debian:buster' target: 'aarch64-unknown-linux-musl' diff --git a/pkg/rules/packages-to-test.yml b/pkg/rules/packages-to-test.yml index 1441b687..885bfc61 100644 --- a/pkg/rules/packages-to-test.yml +++ b/pkg/rules/packages-to-test.yml @@ -2,13 +2,15 @@ # workflow inputs. --- pkg: - - 'dnst' + - 'cascade-dnst' image: - "ubuntu:focal" # ubuntu/20.04 - "ubuntu:jammy" # ubuntu/22.04 + - "ubuntu:noble" # ubuntu/24.04 - "debian:buster" # debian/10 - "debian:bullseye" # debian/11 - "debian:bookworm" # debian/12 + - "debian:trixie" # debian/13 published_pkg: - 'ldnsutils' # correct for Ubuntu/Debian target: @@ -25,25 +27,37 @@ test-mode: - 'fresh-install' - 'upgrade-from-published' include: - - pkg: 'dnst' - image: 'rockylinux:8' + - pkg: 'cascade-dnst' + image: 'almalinux:8' target: 'x86_64' test-mode: 'fresh-install' - - pkg: 'dnst' - image: 'rockylinux:8' + - pkg: 'cascade-dnst' + image: 'almalinux:8' target: 'x86_64' test-mode: 'upgrade-from-published' published_pkg: 'ldns-utils' rpm_yum_extra_args: --enablerepo powertools - - pkg: 'dnst' - image: 'rockylinux:9' + - pkg: 'cascade-dnst' + image: 'almalinux:9' target: 'x86_64' test-mode: 'fresh-install' - - pkg: 'dnst' - image: 'rockylinux:9' + - pkg: 'cascade-dnst' + image: 'almalinux:9' + target: 'x86_64' + test-mode: 'upgrade-from-published' + published_pkg: 'ldns-utils' + rpm_yum_extra_args: --enablerepo crb + + - pkg: 'cascade-dnst' + image: 'almalinux:10' + target: 'x86_64' + test-mode: 'fresh-install' + + - pkg: 'cascade-dnst' + image: 'almalinux:10' target: 'x86_64' test-mode: 'upgrade-from-published' published_pkg: 'ldns-utils' @@ -57,4 +71,4 @@ include: # mode: 'upgrade-from-published' # - pkg: 'routinator' # image: 'debian:bookworm' -# mode: 'upgrade-from-published' \ No newline at end of file +# mode: 'upgrade-from-published' diff --git a/pkg/test-scripts/test-cascade-dnst.sh b/pkg/test-scripts/test-cascade-dnst.sh new file mode 100644 index 00000000..b1e2a2d2 --- /dev/null +++ b/pkg/test-scripts/test-cascade-dnst.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +set -eo pipefail +set -x + +case $1 in + post-install) + # Run some sanity checks + /usr/libexec/cascade/cascade-dnst --version + # ldns-keygen -v + # dnst nsec3-hash nlnetlabs.nl + # ldns-nsec3-hash nlnetlabs.nl + /usr/libexec/cascade/cascade-dnst keyset --help + man cascade-dnst + # man dnst-keygen + # man ldns-keygen + man cascade-dnst-keyset + ;; + + post-upgrade) + # Nothing to do. + # Run some sanity checks + /usr/libexec/cascade/cascade-dnst --version + # ldns-keygen -v + # dnst nsec3-hash nlnetlabs.nl + # ldns-nsec3-hash nlnetlabs.nl + /usr/libexec/cascade/cascade-dnst keyset --help + man cascade-dnst + # man dnst + # man dnst-keygen + # man ldns-keygen + man cascade-dnst-keyset + ;; +esac diff --git a/pkg/test-scripts/test-dnst.sh b/pkg/test-scripts/test-dnst.sh deleted file mode 100644 index 2738a571..00000000 --- a/pkg/test-scripts/test-dnst.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash - -set -eo pipefail -set -x - -case $1 in - post-install) - # Run some sanity checks - dnst --version - ldns-keygen -v - dnst nsec3-hash nlnetlabs.nl - ldns-nsec3-hash nlnetlabs.nl - man dnst - man dnst-keygen - man ldns-keygen - ;; - - post-upgrade) - # Nothing to do. - # Run some sanity checks - dnst --version - ldns-keygen -v - dnst nsec3-hash nlnetlabs.nl - ldns-nsec3-hash nlnetlabs.nl - man dnst - man dnst-keygen - man ldns-keygen - ;; -esac \ No newline at end of file diff --git a/src/commands/keyset/cmd.rs b/src/commands/keyset/cmd.rs new file mode 100644 index 00000000..adf76830 --- /dev/null +++ b/src/commands/keyset/cmd.rs @@ -0,0 +1,5616 @@ +//! Key management utility. +#![warn(missing_docs)] +#![warn(clippy::missing_docs_in_private_items)] + +// TODO: +// - file locking +// - update the state file atomically by writing to a different file and +// then renaming. This helps downstream users. +// - option: overwrite the config file. This is risky but simplifies locking. +// - option: update the config file atomically. This makes locking more +// complex. +// - add a autoremove_delay option. Delete stale keys only some time after +// their withdrawn times. +// - move dnskey_rrset, cds_rrset, ns_rrset into apex_extra (ds_rrset remains). +// add apex_remove with a list of Rtype that the signer should remove from +// the apex. +// - create struct State that has KeySetConfig, KeySetState and the +// config_changed, state_changed, and run_update_ds_command falgs. This +// reduces parameter passing. It also allows KMIP to store its connection +// pool in State instead of in KeySetState as it currently does. +// - add a -v option to keyset. Remove the -v option from status. Add +// verbose output for creation and deletion of keys. + +use crate::env::Env; +use crate::error::Error; +use crate::util; +use bytes::Bytes; +use clap::Subcommand; +use domain::base::iana::{Class, DigestAlgorithm, OptRcode, SecurityAlgorithm}; +use domain::base::name::FlattenInto; +use domain::base::zonefile_fmt::{DisplayKind, ZonefileFmt}; +use domain::base::{ + MessageBuilder, Name, ParseRecordData, ParsedName, Record, Rtype, Serial, ToName, Ttl, +}; +use domain::crypto::sign::{GenerateParams, KeyPair, SecretKeyBytes}; +#[cfg(feature = "kmip")] +use domain::crypto::{kmip, kmip::KeyUrl, sign::SignRaw}; +use domain::dep::octseq::{FromBuilder, OctetsFrom}; +use domain::dnssec::common::{display_as_bind, parse_from_bind}; +use domain::dnssec::sign::keys::keyset::{ + self, Action, Key, KeySet, KeyState, KeyType, RollState, RollType, UnixTime, +}; +use domain::dnssec::sign::keys::SigningKey; +use domain::dnssec::sign::records::Rrset; +use domain::dnssec::sign::signatures::rrsigs::sign_rrset; +use domain::dnssec::validator::base::DnskeyExt; +use domain::net::client::dgram_stream; +use domain::net::client::protocol::{TcpConnect, UdpConnect}; +use domain::net::client::request::{ + ComposeRequest, RequestMessage, RequestMessageMulti, SendRequest, SendRequestMulti, +}; +use domain::net::client::stream; +use domain::rdata::dnssec::Timestamp; +use domain::rdata::{AllRecordData, Cdnskey, Cds, Dnskey, Ds, Rrsig, Soa, ZoneRecordData}; +use domain::resolv::lookup::lookup_host; +use domain::resolv::StubResolver; +#[cfg(feature = "kmip")] +use domain::utils::base32::encode_string_hex; +use domain::zonefile::inplace::{Entry, Zonefile}; +use futures::future::join_all; +use jiff::{Span, SpanRelativeTo}; +use serde::{Deserialize, Serialize}; +use std::cmp::max; +use std::collections::{BTreeMap, HashMap, HashSet}; +use std::convert::From; +use std::ffi::OsStr; +use std::fmt::{Debug, Display, Formatter}; +use std::fs::{create_dir_all, remove_file, File}; +use std::io::{self, Write}; +use std::net::{IpAddr, SocketAddr}; +use std::path::{absolute, Path, PathBuf}; +use std::process::Command; +use std::sync::Mutex; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; +use tokio::net::TcpStream; +#[cfg(feature = "kmip")] +use tracing::{debug, error, warn}; +#[cfg(not(feature = "kmip"))] +use tracing::{debug, error, warn}; +use url::Url; + +#[cfg(feature = "kmip")] +use super::kmip::{format_key_label, kmip_command, KmipCommands, KmipState}; + +/// Maximum tries to generate new key with a key tag that does not conclict +/// with the key tags of existing keys. +const MAX_KEY_TAG_TRIES: u8 = 10; + +/// Wait this amount before retrying for network errors, DNS errors, etc. +const DEFAULT_WAIT: Duration = Duration::from_secs(10 * 60); + +/// The default TTL for creating a new config file. +const DEFAULT_TTL: Ttl = Ttl::from_secs(3600); + +// Types to simplify some HashSet types. +/// Type for a Name that uses a Vec. +type NameVecU8 = Name>; +/// Type for a record that uses ZoneRecordData and a Vec. +type RecordZoneRecordData = Record, NameVecU8>>; +/// Type for a DNSKEY record. +type RecordDnskey = Record>>; + +// Automatic key rolls +// +// Keyset supports four types of automatic key rolls: +// 1) A KSK roll. Roll one (or more) KSKs to a new KSK. +// 2) A ZSK roll. Roll one (or more) ZSKs to a new ZSK. +// 3) A CSK roll. Roll any KSK, ZSK, or CSK to a single new CSK or roll +// one (or more CSKs) plus any KSK or ZSK to a new KSK plus a new ZSK. +// This depends on the value of the use_csk config variable. +// 4) An algorithm roll. Roll any KSK, ZSK, or CSK to a new CSK (if use_csk +// is true) or to a new KSK and a new ZSK (if use_csk is false) with an +// algorithm that is different from the one in the old keys. +// +// For each roll type automation can be enable for four different types of +// steps: +// 1) Start. When automation is enabled for this step, keyset checks if keys +// are expired, no conflicting rolls are currently in progress and no +// conditions (use of CSK, the need for a algorithm roll) prevents this +// type of roll. +// 2) Report. In the complete key roll, these are two steps: +// propagation1_complete and propagation2_complete. When automation is +// enabled, keyset goes through the list of actions and takes care of +// the Report actions (ReportDnskeyPropagated, ReportDsPropagated, +// ReportRrsigPropagated). Keyset checks nameservers for the zone +// (or the parent zone in the case of ReportDsPropagated) to make sure +// that new information has propagated to all listed nameservers. +// The maximum TTL is passed to Keyset::propagation1_complete (or +// Keyset::propagation2_complete). +// 3) Expire. This corresponds to the steps cache_expired1 and +// cache_expired2. When enabled, this step wait until time equal to the +// TTL amount that was reported in propagation1_complete or +// propagation2_complete to have passed before continuing to the next step. +// 4) Done. When enabled this step takes care of any Wait actions +// (WaitDnskeyPropagated, WaitDsPropagated, WaitRrsigPropagated). This +// is very similar to the Report step except no TTL value is reported. +// After this step, the key roll is considered done though some old date +// may still exist in caches. +// +// For each key roll type, automation for each step can be enabled or disabled +// individually. This give a total of sixteen flags. +// +// The function auto_start handles the Start step. The other steps are +// handled by auto_report_expire_done. The current state for automatic report +// and done handling is kept in a field called 'internal' in the KeySetState +// structure. +// +// At every change to the config or the state file, the next time +// 'dnst keyset cron' should be called is computed and stored in the +// state file. The function cron_next_auto_start provides timestamps for +// automatic start of key rolls, cron_next_auto_report_expire_done does +// the same for the report, expire, and done steps. + +/// Command line arguments of the keyset utility. +#[derive(Clone, Debug, clap::Args)] +pub struct Keyset { + /// Keyset config + #[arg(short = 'c')] + keyset_conf: PathBuf, + + /// Subcommand + #[command(subcommand)] + cmd: Commands, +} + +/// Type for an optional Duration. A separate type is needed because CLAP +/// treats Option special. +type OptDuration = Option; + +/// The subcommands of the keyset utility. +#[allow(clippy::large_enum_variant)] +#[derive(Clone, Debug, Subcommand)] +enum Commands { + /// Create empty state for a DNS zone. This will create both the config + /// file as well as the state file. + Create { + /// Domain name + #[arg(short = 'n')] + domain_name: Name>, + + /// State file + #[arg(short = 's')] + keyset_state: PathBuf, + }, + + /// Init creates keys for an empty state file. + Init, + + /// Command for KSK rolls. + Ksk { + /// The specific key roll subcommand. + #[command(subcommand)] + subcommand: RollCommands, + }, + /// Command for ZSK rolls. + Zsk { + /// The specific key roll subcommand. + #[command(subcommand)] + subcommand: RollCommands, + }, + /// Command for CSK rolls. + Csk { + /// The specific key roll subcommand. + #[command(subcommand)] + subcommand: RollCommands, + }, + /// Command for algorithm rolls. + Algorithm { + /// The specific key roll subcommand. + #[command(subcommand)] + subcommand: RollCommands, + }, + + /// Command for importing existing keys. + Import { + /// The specific import subcommand. + #[command(subcommand)] + subcommand: ImportCommands, + }, + + /// Remove a key from the key set. + RemoveKey { + /// Force a key to be removed even if the key is not stale. + #[arg(long)] + force: bool, + + /// Continue when removing the underlying keys fails. + #[arg(long = "continue")] + continue_flag: bool, + + /// The key to remove. + key: String, + }, + + /// Report status, such as key rolls that are in progress, expired + /// keys, when to call the 'cron' subcommand next. + Status { + /// Make status verbose. + #[arg(short = 'v', long)] + verbose: bool, + }, + /// Report actions that are associated with the current state of + /// any key rolls. + Actions, + /// List all keys in the current state. + Keys, + + /// Get various config and state values. + Get { + /// The specific get subcommand. + #[command(subcommand)] + subcommand: GetCommands, + }, + + /// Set config values. + Set { + /// The specific set subcommand. + #[command(subcommand)] + subcommand: SetCommands, + }, + + /// Show all config variables. + Show, + + /// Execute any automatic steps such a refreshing signatures or + /// automatic steps in key rolls. + Cron, + + /// Kmip command. + #[cfg(feature = "kmip")] + Kmip { + /// Kmip subcommands. + #[command(subcommand)] + subcommand: KmipCommands, + }, +} + +#[derive(Clone, Debug, Subcommand)] +enum GetCommands { + /// Get the state of the use_csk config variable. + UseCsk, + /// Get the state of the autoremove config variable. + Autoremove, + /// Get the state of the algorithm config variable. + Algorithm, + /// Get the state of the ds_algorithm config variable. + DsAlgorithm, + /// Get the state of the dnskey_lifetime config variable. + DnskeyLifetime, + /// Get the state of the cds_lifetime config variable. + CdsLifetime, + /// Get the current DNSKEY RRset including signatures. + Dnskey, + /// Get the current CDS and CDNSKEY RRsets including signatures. + Cds, + /// Get the current DS records that canbe added to the parent zone. + Ds, +} + +#[derive(Clone, Debug, Subcommand)] +enum SetCommands { + /// Set the use_csk config variable. + UseCsk { + /// The value of the config variable. + #[arg(action = clap::ArgAction::Set)] + boolean: bool, + }, + /// Set the autoremove config variable. + Autoremove { + /// The value of the config variable. + #[arg(action = clap::ArgAction::Set)] + boolean: bool, + }, + /// Set the algorithm config variable. + Algorithm { + /// The number of bits of a new RSA key. At the moment RSA is the + /// only public key algorithm that needs a bits argument. + #[arg(short = 'b')] + bits: Option, + + /// The algorithm to use for new keys. + algorithm: String, + }, + + /// Set the config values for automatic KSK rolls. + AutoKsk { + /// Whether to automatically start a key roll. + #[arg(action = clap::ArgAction::Set)] + start: bool, + /// Whether to automatically handle report actions. + #[arg(action = clap::ArgAction::Set)] + report: bool, + /// Whether to automatically handle cache expiration actions. + #[arg(action = clap::ArgAction::Set)] + expire: bool, + /// Whether to automatically handle done actions. + #[arg(action = clap::ArgAction::Set)] + done: bool, + }, + /// Set the config values for automatic ZSK rolls. + AutoZsk { + /// Whether to automatically start a key roll. + #[arg(action = clap::ArgAction::Set)] + start: bool, + /// Whether to automatically handle report actions. + #[arg(action = clap::ArgAction::Set)] + report: bool, + /// Whether to automatically handle cache expiration actions. + #[arg(action = clap::ArgAction::Set)] + expire: bool, + /// Whether to automatically handle done actions. + #[arg(action = clap::ArgAction::Set)] + done: bool, + }, + /// Set the config values for automatic CSK rolls. + AutoCsk { + /// Whether to automatically start a key roll. + #[arg(action = clap::ArgAction::Set)] + start: bool, + /// Whether to automatically handle report actions. + #[arg(action = clap::ArgAction::Set)] + report: bool, + /// Whether to automatically handle cache expiration actions. + #[arg(action = clap::ArgAction::Set)] + expire: bool, + /// Whether to automatically handle done actions. + #[arg(action = clap::ArgAction::Set)] + done: bool, + }, + /// Set the config values for automatic algorithm rolls. + AutoAlgorithm { + /// Whether to automatically start a key roll. + #[arg(action = clap::ArgAction::Set)] + start: bool, + /// Whether to automatically handle report actions. + #[arg(action = clap::ArgAction::Set)] + report: bool, + /// Whether to automatically handle cache expiration actions. + #[arg(action = clap::ArgAction::Set)] + expire: bool, + /// Whether to automatically handle done actions. + #[arg(action = clap::ArgAction::Set)] + done: bool, + }, + /// Set the hash algorithm to use for creating DS records. + DsAlgorithm { + /// The hash algorithm. + #[arg(value_parser = DsAlgorithm::new)] + algorithm: DsAlgorithm, + }, + /// Set the amount inception times of signatures over the DNSKEY RRset + /// are backdated. + /// + /// Note that positive values are subtract from the current time. + DnskeyInceptionOffset { + /// The offset. + #[arg(value_parser = parse_duration)] + duration: Duration, + }, + /// Set how much time the expiration times of signatures over the DNSKEY + /// RRset are in the future. + DnskeyLifetime { + /// The lifetime. + #[arg(value_parser = parse_duration)] + duration: Duration, + }, + /// Set how much time the DNSKEY signatures still have to be valid. + /// + /// New signatures will be generated when the time until the expiration + /// time is less than that. + DnskeyRemainTime { + /// The required remaining time. + #[arg(value_parser = parse_duration)] + duration: Duration, + }, + /// Set the amount inception times of signatures over the CDS and + /// CDNSKEY RRsets are backdated. + /// + /// Note that positive values are subtract from the current time. + CdsInceptionOffset { + /// The offset. + #[arg(value_parser = parse_duration)] + duration: Duration, + }, + /// Set how much time the expiration times of signatures over the CDS + /// and CDNSKEY RRsets are in the future. + CdsLifetime { + /// The lifetime. + #[arg(value_parser = parse_duration)] + duration: Duration, + }, + /// Set how much time the CDS/CDNSKEY signatures still have to be valid. + /// + /// New signatures will be generated when the time until the expiration + /// time is less than that. + CdsRemainTime { + /// The required remaining time. + #[arg(value_parser = parse_duration)] + duration: Duration, + }, + /// How long a KSK is valid from the time it was first 'published'. + KskValidity { + /// The amount of time the key is valid. + #[arg(value_parser = parse_opt_duration)] + opt_duration: OptDuration, + }, + /// How long a ZSK is valid from the time it was first 'published'. + ZskValidity { + /// The amount of time the key is valid. + #[arg(value_parser = parse_opt_duration)] + opt_duration: OptDuration, + }, + /// How long a CSK is valid from the time it was first 'published'. + CskValidity { + /// The amount of time the key is valid. + #[arg(value_parser = parse_opt_duration)] + opt_duration: OptDuration, + }, + + /// Set the TTL to be used DNSKEY/CDS/CDNSKEY records. + DefaultTtl { + /// TTL value to set. + ttl: u32, + }, + + /// Set the command to run when the DS records at the parent need updating. + UpdateDsCommand { + /// Command and arguments. + args: Vec, + }, +} + +#[derive(Clone, Debug, Subcommand)] +enum RollCommands { + /// Start a key roll. + StartRoll, + /// Report that the first propagation step has completed. + Propagation1Complete { + /// The TTL that is required to be reported by the Report actions. + ttl: u32, + }, + /// Cached information from before Propagation1Complete should have + /// expired by now. + CacheExpired1, + /// Report that the second propagation step has completed. + Propagation2Complete { + /// The TTL that is required to be reported by the Report actions. + ttl: u32, + }, + /// Cached information from before Propagation2Complete should have + /// expired by now. + CacheExpired2, + /// Report that the final changes have propagated and the the roll is done. + RollDone, +} + +#[derive(Clone, Debug, Subcommand)] +enum ImportCommands { + /// Import a public key. + PublicKey { + /// The file name of the public key. + path: PathBuf, + }, + + /// Command for KSK imports. + Ksk { + /// The specific key import subcommand. + #[command(subcommand)] + subcommand: ImportKeyCommands, + }, + /// Command for ZSK imports. + Zsk { + /// The specific key import subcommand. + #[command(subcommand)] + subcommand: ImportKeyCommands, + }, + /// Command for CSK imports. + Csk { + /// The specific key import subcommand. + #[command(subcommand)] + subcommand: ImportKeyCommands, + }, +} + +#[derive(Clone, Debug, Subcommand)] +enum ImportKeyCommands { + /// Import public/private key pair from file. + File { + /// Take ownership of the imported keys. + /// + /// When the key is removed from the key set, the underlying keys + /// are also removed. The default is decoupled when the underlying + /// keys are not removed. + #[arg(long)] + coupled: bool, + + /// Explicitly pass the name of the file that holds the private key. + /// + /// Otherwise the name is derived from the name of the file that holds + /// the public key. + #[arg(long)] + private_key: Option, + + /// Pathname of the public key. + path: PathBuf, + }, + #[cfg(feature = "kmip")] + /// Import a KMIP public/private key pair. + Kmip { + /// Take ownership of the imported keys. + /// + /// When the key is removed from the key set, the underlying keys + /// are also removed. The default is decoupled when the underlying + /// keys are not removed. + #[arg(long)] + coupled: bool, + + /// The identifier of the KMIP server. + server: String, + + /// The KMIP identifier of the public key. + public_id: String, + + /// The KMIP identifier of the private key. + private_id: String, + + /// The key's DNSSEC security algorithm. + algorithm: SecurityAlgorithm, + + /// Value to put in the DNSKEY flags field. + flags: u16, + }, +} + +#[derive(Debug)] +enum KeyVariant { + /// Apply command to KSKs. + Ksk, + /// Apply command to ZSKs. + Zsk, + /// Apply command to CSKs. + Csk, +} + +// We cannot use RollType because that name is already in use. +enum RollVariant { + /// Apply the subcommand to a KSK roll. + Ksk, + /// Apply the subcommand to a ZSK roll. + Zsk, + /// Apply the subcommand to a CSK roll. + Csk, + /// Apply the subcommand to an algorithm roll. + Algorithm, +} + +impl Keyset { + /// execute the keyset command. + pub fn execute(self, env: impl Env) -> Result<(), Error> { + let runtime = + tokio::runtime::Runtime::new().expect("tokio::runtime::Runtime::new should not fail"); + runtime.block_on(self.run(&env)) + } + + /// Run the command as an async function + pub async fn run(self, env: &impl Env) -> Result<(), Error> { + if let Commands::Create { + domain_name, + keyset_state, + } = self.cmd + { + let config_file_dir = make_parent_dir(self.keyset_conf.clone()); + + let state_file = absolute(&keyset_state).map_err(|e| { + format!("unable to make {} absolute: {}", keyset_state.display(), e) + })?; + let state_file_dir = make_parent_dir(state_file.clone()); + let keys_dir = state_file_dir.clone(); + + let ks = KeySet::new(domain_name); + let kss = KeySetState { + keyset: ks, + dnskey_rrset: Vec::new(), + ds_rrset: Vec::new(), + cds_rrset: Vec::new(), + ns_rrset: Vec::new(), + cron_next: None, + internal: HashMap::new(), + + #[cfg(feature = "kmip")] + kmip: Default::default(), + }; + const ONE_DAY: u64 = 86400; + const FOUR_WEEKS: u64 = 2419200; + let ksc = KeySetConfig { + state_file: state_file.clone(), + keys_dir, + use_csk: false, + algorithm: KeyParameters::EcdsaP256Sha256, + ksk_validity: None, + zsk_validity: None, + csk_validity: None, + auto_ksk: { Default::default() }, + auto_zsk: { Default::default() }, + auto_csk: { Default::default() }, + auto_algorithm: { Default::default() }, + dnskey_inception_offset: Duration::from_secs(ONE_DAY), + dnskey_signature_lifetime: Duration::from_secs(FOUR_WEEKS), + dnskey_remain_time: Duration::from_secs(FOUR_WEEKS / 2), + cds_inception_offset: Duration::from_secs(ONE_DAY), + cds_signature_lifetime: Duration::from_secs(FOUR_WEEKS), + cds_remain_time: Duration::from_secs(FOUR_WEEKS / 2), + ds_algorithm: DsAlgorithm::Sha256, + default_ttl: DEFAULT_TTL, + autoremove: false, + update_ds_command: Vec::new(), + }; + + // Create the parent directies. + create_dir_all(&state_file_dir).map_err(|e| { + format!( + "unable to create directory '{}': {e}", + state_file_dir.display() + ) + })?; + create_dir_all(&config_file_dir).map_err(|e| { + format!( + "unable to create directory '{}': {e}", + config_file_dir.display() + ) + })?; + + let json = serde_json::to_string_pretty(&kss).expect("should not fail"); + let mut file = File::create(&state_file) + .map_err(|e| format!("unable to create file {}: {e}", state_file.display()))?; + write!(file, "{json}") + .map_err(|e| format!("unable to write to file {}: {e}", state_file.display()))?; + + let json = serde_json::to_string_pretty(&ksc).expect("should not fail"); + let mut file = File::create(&self.keyset_conf).map_err(|e| { + format!("unable to create file {}: {e}", self.keyset_conf.display()) + })?; + write!(file, "{json}").map_err(|e| { + format!( + "unable to write to file {}: {e}", + self.keyset_conf.display() + ) + })?; + return Ok(()); + } + + let file = File::open(self.keyset_conf.clone()).map_err(|e| { + format!( + "unable to open config file {}: {e}", + self.keyset_conf.display() + ) + })?; + let mut ksc: KeySetConfig = serde_json::from_reader(file) + .map_err(|e| format!("error loading {:?}: {e}\n", self.keyset_conf))?; + let file = File::open(ksc.state_file.clone()).map_err(|e| { + format!( + "unable to open state file {}: {e}", + ksc.state_file.display() + ) + })?; + let mut kss: KeySetState = serde_json::from_reader(file) + .map_err(|e| format!("error loading {:?}: {e}\n", ksc.state_file))?; + + let mut config_changed = false; + let mut state_changed = false; + let mut run_update_ds_command = false; + + match self.cmd { + Commands::Create { .. } => unreachable!(), + Commands::Init => { + // Assume that dnskey_rrset is a reliable way to tell if + // we are initialized or not. + // Check for re-init. + if !kss.dnskey_rrset.is_empty() { + // Avoid re-init. + return Err("already initialized\n".into()); + } + + // Check if we have any imported keys. Include doesn't count. + // if we do, make we sure we have a CSK or a KSK plus a ZSK. + // If we have only of a KSK or only a ZSK then fail. Otherwise + // Create the dnskey_rrset and the ds_rrset. + let mut ksk_present = false; + let mut zsk_present = false; + let mut csk_present = false; + for k in kss.keyset.keys().values() { + match k.keytype() { + KeyType::Ksk(_) => ksk_present = true, + KeyType::Zsk(_) => zsk_present = true, + KeyType::Csk(_, _) => csk_present = true, + KeyType::Include(_) => (), + } + } + if (ksk_present && zsk_present) || csk_present { + // Start with imported keys. + update_dnskey_rrset(&ksc, &mut kss, env, true)?; + update_ds_rrset(&ksc, &mut kss, env, true)?; + } else if ksk_present || zsk_present { + // Incomplete keys + return Err("Cannot start with only a KSK or ZSK.".into()); + } else { + // No imported keys (except possibly for Include), start + // an algorithm roll. + + let (new_stored, _) = new_csk_or_ksk_zsk(&ksc, &mut kss, env)?; + + let new: Vec<_> = new_stored.iter().map(|v| v.as_ref()).collect(); + let actions = kss + .keyset + .start_roll(RollType::AlgorithmRoll, &[], &new) + .expect("should not happen"); + + handle_actions( + &actions, + &ksc, + &mut kss, + env, + true, + &mut run_update_ds_command, + )?; + kss.internal + .insert(RollType::AlgorithmRoll, Default::default()); + + print_actions(&actions); + } + state_changed = true; + } + Commands::Ksk { subcommand } => roll_command( + subcommand, + RollVariant::Ksk, + &ksc, + &mut kss, + env, + &mut state_changed, + &mut run_update_ds_command, + )?, + Commands::Zsk { subcommand } => roll_command( + subcommand, + RollVariant::Zsk, + &ksc, + &mut kss, + env, + &mut state_changed, + &mut run_update_ds_command, + )?, + Commands::Csk { subcommand } => roll_command( + subcommand, + RollVariant::Csk, + &ksc, + &mut kss, + env, + &mut state_changed, + &mut run_update_ds_command, + )?, + Commands::Algorithm { subcommand } => roll_command( + subcommand, + RollVariant::Algorithm, + &ksc, + &mut kss, + env, + &mut state_changed, + &mut run_update_ds_command, + )?, + + Commands::Import { subcommand } => { + import_command(subcommand, &ksc, &mut kss, env, &mut state_changed)? + } + + Commands::RemoveKey { + key, + force, + continue_flag, + } => { + remove_key_command(key, force, continue_flag, &mut kss)?; + if force { + // If the key was in use then the DNSKEY RRset may be + // affected. Avoid introducing a DNSKEY RRset when there + // was none. + if !kss.dnskey_rrset.is_empty() { + update_dnskey_rrset(&ksc, &mut kss, env, true)?; + } + + // What about CDS/CDNSKEY/DS? + } + state_changed = true; + } + + Commands::Status { verbose } => { + // This clone is needed because public_key_from_url needs a + // mutable reference to kss. Rewrite the kmip code to avoid + // that. + let rollstates = kss.keyset.rollstates().clone(); + for (roll, state) in rollstates.iter() { + println!("{roll:?}: {state:?}"); + + if verbose { + let mut keyset = kss.keyset.clone(); + let res = match state { + RollState::CacheExpire1(_) => Some(keyset.cache_expired1(*roll)), + RollState::CacheExpire2(_) => Some(keyset.cache_expired2(*roll)), + _ => None, + }; + if let Some(res) = res { + if let Err(keyset::Error::Wait(remain)) = res { + println!( + "Wait until {} to let caches expire", + UnixTime::now() + remain + ); + } else if let Err(e) = res { + return Err(format!( + "cache_expired[12] failed for state {roll:?}: {e}" + ) + .into()); + } else { + println!("Caches have expired, continue with the next step"); + } + } + + for action in kss.keyset.actions(*roll) { + match action { + Action::UpdateDnskeyRrset + | Action::CreateCdsRrset + | Action::RemoveCdsRrset + | Action::UpdateDsRrset + | Action::UpdateRrsig => (), + Action::ReportDnskeyPropagated | Action::WaitDnskeyPropagated => { + println!("Check that the following RRset has propagated to all name servers:"); + for r in &kss.dnskey_rrset { + println!("{r}"); + } + println!(); + } + Action::ReportDsPropagated | Action::WaitDsPropagated => { + println!("Check that all nameservers of the parent zone have the following RRset (or equivalent):"); + for r in &kss.ds_rrset { + println!("{r}"); + } + println!(); + } + Action::ReportRrsigPropagated | Action::WaitRrsigPropagated => { + println!("Check that all authoritative records in the zone have been signed with the following key(s) and that all nameservers of the zone serve that version or later:"); + // This clone is needed because + // public_key_from_url needs a mutable + // reference to kss. Rewrite the kmip + // code to avoid that. + let keys = kss.keyset.keys().clone(); + for (pubref, k) in keys { + let status = match k.keytype() { + KeyType::Zsk(status) => status, + KeyType::Csk(_, zsk_status) => zsk_status, + KeyType::Ksk(_) | KeyType::Include(_) => continue, + }; + if status.signer() { + let url = Url::parse(&pubref).map_err(|e| { + format!("unable to parse {pubref} as URL: {e}") + })?; + let public_key = public_key_from_url::>( + &url, &ksc, &mut kss, env, + )?; + println!( + "{public_key} ; key tag {}", + public_key.data().key_tag() + ); + } + } + println!(); + } + } + } + + let keyset_cmd = format!("dnst keyset -c {}", self.keyset_conf.display()); + + let (roll_subcommand, auto) = match roll { + RollType::KskRoll => ("ksk", &ksc.auto_ksk), + RollType::KskDoubleDsRoll => ("ksk", &ksc.auto_ksk), + RollType::ZskRoll => ("zsk", &ksc.auto_zsk), + RollType::ZskDoubleSignatureRoll => ("zsk", &ksc.auto_zsk), + RollType::CskRoll => ("csk", &ksc.auto_csk), + RollType::AlgorithmRoll => ("algorithm", &ksc.auto_algorithm), + }; + let (state_subcommand, auto) = match state { + RollState::Propagation1 => ("propagation1-complete ", auto.report), + RollState::CacheExpire1(_) => ("cache-expired1", auto.expire), + RollState::Propagation2 => ("propagation2-complete ", auto.report), + RollState::CacheExpire2(_) => ("cache-expired2", auto.expire), + RollState::Done => ("roll-done", auto.done), + }; + println!("For the next step run:"); + println!("\t{keyset_cmd} {roll_subcommand} {state_subcommand}"); + println!( + "\tautomation is {} for this step.", + if auto { "enabled" } else { "disabled" } + ); + println!(); + } + } + + let mut first = true; + for (r, s) in kss.keyset.rollstates() { + let auto_state = kss.internal.get(r).expect("should exist"); + match s { + // Nothing to report. + RollState::CacheExpire1(_) | RollState::CacheExpire2(_) => (), + + RollState::Propagation1 => { + let auto_state = + auto_state.propagation1.lock().expect("should not fail"); + if auto_state.dnskey.is_none() + && auto_state.ds.is_none() + && auto_state.rrsig.is_none() + { + continue; + } + if first { + first = false; + println!("Automatic key roll state:"); + } + show_automatic_roll_state(*r, s, &auto_state, true); + } + RollState::Propagation2 => { + let auto_state = + auto_state.propagation2.lock().expect("should not fail"); + if auto_state.dnskey.is_none() + && auto_state.ds.is_none() + && auto_state.rrsig.is_none() + { + continue; + } + if first { + first = false; + println!("Automatic key roll state:"); + } + show_automatic_roll_state(*r, s, &auto_state, true); + } + RollState::Done => { + let auto_state = auto_state.done.lock().expect("should not fail"); + if auto_state.dnskey.is_none() + && auto_state.ds.is_none() + && auto_state.rrsig.is_none() + { + continue; + } + if first { + first = false; + println!("Automatic key roll state:"); + } + show_automatic_roll_state(*r, s, &auto_state, false); + } + } + } + if !first { + println!(); + } + + if sig_renew(&kss.dnskey_rrset, &ksc.dnskey_remain_time) { + println!("DNSKEY RRSIG(s) need to be renewed"); + } + if sig_renew(&kss.cds_rrset, &ksc.cds_remain_time) { + println!("CDS/CDNSKEY RRSIG(s) need to be renewed"); + } + + // Check for expired keys. + if verbose { + for (pubref, k) in kss.keyset.keys() { + let (keystate, validity) = match k.keytype() { + KeyType::Ksk(keystate) => (keystate, Some(ksc.ksk_validity)), + KeyType::Zsk(keystate) => (keystate, Some(ksc.zsk_validity)), + KeyType::Csk(ksk_keystate, _) => (ksk_keystate, Some(ksc.csk_validity)), + KeyType::Include(keystate) => (keystate, None), + }; + if keystate.stale() { + println!("key {pubref} is stale"); + if ksc.autoremove { + println!("this key will be removed automatically after the next key roll"); + } else { + println!("remove manually (autoremove is false)"); + } + continue; + } + + if let Some(opt_validity) = validity { + if let Some(validity) = opt_validity { + let Some(timestamp) = k.timestamps().published() else { + println!("key {pubref} is not yet published."); + continue; + }; + if timestamp.elapsed() > validity { + println!("key {pubref} has expired."); + } else { + println!("key {pubref} expires at {}", timestamp + validity); + } + } else { + println!("key {pubref} does not expire. No validity period is configured for the key type"); + } + } else { + println!("key {pubref} does not expire. No validity is defined for this key type."); + } + } + println!(); + } else { + for (pubref, k) in kss.keyset.keys() { + let (expired, label) = key_expired(k, &ksc); + if expired { + println!("{label} {pubref} has expired"); + } + } + } + if let Some(cron_next) = &kss.cron_next { + println!("Next time to run the 'cron' subcommand {cron_next}"); + } + } + Commands::Actions => { + for roll in kss.keyset.rollstates().keys() { + let actions = kss.keyset.actions(*roll); + println!("{roll:?} actions:"); + print_actions(&actions); + } + } + Commands::Keys => { + println!("Keys:"); + let mut keys: Vec<_> = kss.keyset.keys().iter().collect(); + keys.sort_by(|(pubref1, key1), (pubref2, key2)| { + (key1.timestamps().creation(), pubref1) + .cmp(&(key2.timestamps().creation(), pubref2)) + }); + for (pubref, key) in keys { + println!("\t{} {}", pubref, key.privref().unwrap_or_default(),); + println!("\t\tDecoupled: {}", key.decoupled(),); + let (keytype, state, opt_state) = match key.keytype() { + KeyType::Ksk(keystate) => ("KSK", keystate, None), + KeyType::Zsk(keystate) => ("ZSK", keystate, None), + KeyType::Include(keystate) => ("Include", keystate, None), + KeyType::Csk(keystate_ksk, keystate_zsk) => { + ("CSK", keystate_ksk, Some(keystate_zsk)) + } + }; + println!( + "\t\tType: {keytype}, algorithm: {}, key tag: {}", + key.algorithm(), + key.key_tag() + ); + if let Some(zskstate) = opt_state { + println!("\t\tKSK role state: {state}"); + println!("\t\tZSK role state: {zskstate}"); + } else { + println!("\t\tState: {state}"); + } + let ts = key.timestamps(); + println!( + "\t\tCreated: {}", + ts.creation() + .map_or("".to_string(), |x| x.to_string()), + ); + println!( + "\t\tPublished: {}", + ts.published() + .map_or("".to_string(), |x| x.to_string()) + ); + println!( + "\t\tVisible: {}", + ts.visible() + .map_or("".to_string(), |x| x.to_string()), + ); + println!( + "\t\tDS visible: {}", + ts.ds_visible() + .map_or("".to_string(), |x| x.to_string()) + ); + println!( + "\t\tRRSIG visible: {}", + ts.rrsig_visible() + .map_or("".to_string(), |x| x.to_string()), + ); + println!( + "\t\tWithdrawn: {}", + ts.withdrawn() + .map_or("".to_string(), |x| x.to_string()) + ); + } + } + Commands::Get { subcommand } => get_command(subcommand, &ksc, &kss), + Commands::Set { subcommand } => set_command(subcommand, &mut ksc, &mut config_changed)?, + Commands::Show => { + println!("state-file: {:?}", ksc.state_file); + println!("use-csk: {}", ksc.use_csk); + println!("algorithm: {}", ksc.algorithm); + println!("ksk-validity: {:?}", ksc.ksk_validity); + println!("zsk-validity: {:?}", ksc.zsk_validity); + println!("csk-validity: {:?}", ksc.csk_validity); + println!( + "auto-ksk: start {}, report {}, expire {}, done {}", + ksc.auto_ksk.start, ksc.auto_ksk.report, ksc.auto_ksk.expire, ksc.auto_ksk.done, + ); + println!( + "auto-zsk: start {}, report {}, expire {}, done {}", + ksc.auto_zsk.start, ksc.auto_zsk.report, ksc.auto_zsk.expire, ksc.auto_zsk.done, + ); + println!( + "auto-csk: start {}, report {}, expire {}, done {}", + ksc.auto_csk.start, ksc.auto_csk.report, ksc.auto_csk.expire, ksc.auto_csk.done, + ); + println!( + "auto-algorithm: start {}, report {}, expire {}, done {}", + ksc.auto_algorithm.start, + ksc.auto_algorithm.report, + ksc.auto_algorithm.expire, + ksc.auto_algorithm.done, + ); + println!("dnskey-inception-offset: {:?}", ksc.dnskey_inception_offset); + println!( + "dnskey-signature-lifetime: {:?}", + ksc.dnskey_signature_lifetime + ); + println!("dnskey-remain-time: {:?}", ksc.dnskey_remain_time); + println!("cds-inception-offset: {:?}", ksc.cds_inception_offset); + println!("cds-signature-lifetime: {:?}", ksc.cds_signature_lifetime); + println!("cds-remain-time: {:?}", ksc.cds_remain_time); + println!("ds-algorithm: {:?}", ksc.ds_algorithm); + println!("default-ttl: {:?}", ksc.default_ttl); + println!("autoremove: {:?}", ksc.autoremove); + println!("update_ds_command: {:?}", ksc.update_ds_command); + } + Commands::Cron => { + if sig_renew(&kss.dnskey_rrset, &ksc.dnskey_remain_time) { + println!("DNSKEY RRSIG(s) need to be renewed"); + update_dnskey_rrset(&ksc, &mut kss, env, false)?; + state_changed = true; + } + if sig_renew(&kss.cds_rrset, &ksc.cds_remain_time) { + println!("CDS/CDNSKEY RRSIGs need to be renewed"); + create_cds_rrset( + &mut kss, + &ksc, + ksc.ds_algorithm.to_digest_algorithm(), + env, + false, + )?; + state_changed = true; + } + + let need_algorithm_roll = algorithm_roll_needed(&ksc, &kss); + + if ksc.use_csk || need_algorithm_roll { + // Start a CSK or algorithm roll if the KSK has expired. + // All other rolls are a conflict. + auto_start( + &ksc.ksk_validity, + if need_algorithm_roll { + &ksc.auto_algorithm + } else { + &ksc.auto_csk + }, + &ksc, + &mut kss, + env, + &mut state_changed, + |_| true, + |keytype| { + if let KeyType::Ksk(keystate) = keytype { + Some(keystate) + } else { + None + } + }, + if need_algorithm_roll { + start_algorithm_roll + } else { + start_csk_roll + }, + &mut run_update_ds_command, + )?; + + // The same for the ZSK. + auto_start( + &ksc.zsk_validity, + if need_algorithm_roll { + &ksc.auto_algorithm + } else { + &ksc.auto_csk + }, + &ksc, + &mut kss, + env, + &mut state_changed, + |_| true, + |keytype| { + if let KeyType::Zsk(keystate) = keytype { + Some(keystate) + } else { + None + } + }, + if need_algorithm_roll { + start_algorithm_roll + } else { + start_csk_roll + }, + &mut run_update_ds_command, + )?; + } else { + auto_start( + &ksc.ksk_validity, + &ksc.auto_ksk, + &ksc, + &mut kss, + env, + &mut state_changed, + |r| r != RollType::ZskRoll && r != RollType::ZskDoubleSignatureRoll, + |keytype| { + if let KeyType::Ksk(keystate) = keytype { + Some(keystate) + } else { + None + } + }, + start_ksk_roll, + &mut run_update_ds_command, + )?; + + auto_start( + &ksc.zsk_validity, + &ksc.auto_zsk, + &ksc, + &mut kss, + env, + &mut state_changed, + |r| r != RollType::KskRoll && r != RollType::KskDoubleDsRoll, + |keytype| { + if let KeyType::Zsk(keystate) = keytype { + Some(keystate) + } else { + None + } + }, + start_zsk_roll, + &mut run_update_ds_command, + )?; + } + + auto_start( + &ksc.csk_validity, + if need_algorithm_roll { + &ksc.auto_algorithm + } else { + &ksc.auto_csk + }, + &ksc, + &mut kss, + env, + &mut state_changed, + |_| true, + |keytype| { + if let KeyType::Csk(keystate, _) = keytype { + Some(keystate) + } else { + None + } + }, + if need_algorithm_roll { + start_algorithm_roll + } else { + start_csk_roll + }, + &mut run_update_ds_command, + )?; + + auto_report_expire_done( + &ksc.auto_ksk, + &[RollType::KskRoll, RollType::KskDoubleDsRoll], + &ksc, + &mut kss, + env, + &mut state_changed, + &mut run_update_ds_command, + ) + .await?; + auto_report_expire_done( + &ksc.auto_zsk, + &[RollType::ZskRoll, RollType::ZskDoubleSignatureRoll], + &ksc, + &mut kss, + env, + &mut state_changed, + &mut run_update_ds_command, + ) + .await?; + auto_report_expire_done( + &ksc.auto_csk, + &[RollType::CskRoll], + &ksc, + &mut kss, + env, + &mut state_changed, + &mut run_update_ds_command, + ) + .await?; + auto_report_expire_done( + &ksc.auto_algorithm, + &[RollType::AlgorithmRoll], + &ksc, + &mut kss, + env, + &mut state_changed, + &mut run_update_ds_command, + ) + .await?; + } + + #[cfg(feature = "kmip")] + Commands::Kmip { subcommand } => { + state_changed = kmip_command(env, subcommand, &mut kss)?; + } + } + + if !config_changed && !state_changed { + // No need to update cron_next if nothing has changed. + return Ok(()); + } + + let mut cron_next = Vec::new(); + + cron_next.push(compute_cron_next( + &kss.dnskey_rrset, + &ksc.dnskey_remain_time, + )); + + cron_next.push(compute_cron_next(&kss.cds_rrset, &ksc.cds_remain_time)); + + let need_algorithm_roll = algorithm_roll_needed(&ksc, &kss); + + if ksc.use_csk || need_algorithm_roll { + cron_next_auto_start( + ksc.ksk_validity, + if need_algorithm_roll { + &ksc.auto_algorithm + } else { + &ksc.auto_csk + }, + &kss, + |_| true, + |keytype| { + if let KeyType::Ksk(keystate) = keytype { + Some(keystate) + } else { + None + } + }, + &mut cron_next, + ); + cron_next_auto_start( + ksc.zsk_validity, + if need_algorithm_roll { + &ksc.auto_algorithm + } else { + &ksc.auto_csk + }, + &kss, + |_| true, + |keytype| { + if let KeyType::Zsk(keystate) = keytype { + Some(keystate) + } else { + None + } + }, + &mut cron_next, + ); + } else { + cron_next_auto_start( + ksc.ksk_validity, + &ksc.auto_ksk, + &kss, + |r| r != RollType::ZskRoll && r != RollType::ZskDoubleSignatureRoll, + |keytype| { + if let KeyType::Ksk(keystate) = keytype { + Some(keystate) + } else { + None + } + }, + &mut cron_next, + ); + cron_next_auto_start( + ksc.zsk_validity, + &ksc.auto_zsk, + &kss, + |r| r != RollType::KskRoll && r != RollType::KskDoubleDsRoll, + |keytype| { + if let KeyType::Zsk(keystate) = keytype { + Some(keystate) + } else { + None + } + }, + &mut cron_next, + ); + } + + cron_next_auto_start( + ksc.csk_validity, + if need_algorithm_roll { + &ksc.auto_algorithm + } else { + &ksc.auto_csk + }, + &kss, + |_| true, + |keytype| { + if let KeyType::Csk(keystate, _) = keytype { + Some(keystate) + } else { + None + } + }, + &mut cron_next, + ); + + cron_next_auto_report_expire_done( + &ksc.auto_ksk, + &[RollType::KskRoll, RollType::KskDoubleDsRoll], + &kss, + &mut cron_next, + )?; + cron_next_auto_report_expire_done( + &ksc.auto_zsk, + &[RollType::ZskRoll, RollType::ZskDoubleSignatureRoll], + &kss, + &mut cron_next, + )?; + cron_next_auto_report_expire_done( + &ksc.auto_csk, + &[RollType::CskRoll], + &kss, + &mut cron_next, + )?; + cron_next_auto_report_expire_done( + &ksc.auto_algorithm, + &[RollType::AlgorithmRoll], + &kss, + &mut cron_next, + )?; + + let cron_next = cron_next.iter().filter_map(|e| e.clone()).min(); + + if cron_next != kss.cron_next { + kss.cron_next = cron_next; + state_changed = true; + } + if config_changed { + let json = serde_json::to_string_pretty(&ksc).expect("should not fail"); + let mut file = File::create(&self.keyset_conf).map_err(|e| { + format!("unable to create file {}: {e}", self.keyset_conf.display()) + })?; + write!(file, "{json}").map_err(|e| { + format!( + "unable to write to file {}: {e}", + self.keyset_conf.display() + ) + })?; + } + if state_changed { + let json = serde_json::to_string_pretty(&kss).expect("should not fail"); + let mut file = File::create(&ksc.state_file) + .map_err(|e| format!("unable to create file {}: {e}", ksc.state_file.display()))?; + write!(file, "{json}").map_err(|e| { + format!("unable to write to file {}: {e}", ksc.state_file.display()) + })?; + } + + // Now check if we need to run the update_ds_command. Make sure that + // all locks are released before running the command. The command + // may want to call back into keyset to retreive the DS + // (or CDS/CDNSKEY) records. + if run_update_ds_command && !ksc.update_ds_command.is_empty() { + let output = Command::new(&ksc.update_ds_command[0]) + .args(&ksc.update_ds_command[1..]) + .output() + .map_err(|e| { + format!( + "creating for command for {} failed: {e}", + ksc.update_ds_command[0] + ) + })?; + if !output.status.success() { + println!("update command failed with: {}", output.status); + io::stdout() + .write_all(&output.stdout) + .map_err(|e| format!("writing to stdout failed: {e}"))?; + io::stderr() + .write_all(&output.stderr) + .map_err(|e| format!("writing to stderr failed: {e}"))?; + } + } + + Ok(()) + } +} + +/// Remove a key from the filesystem or the HSM. +#[allow(unused_variables)] +fn remove_key(kss: &mut KeySetState, url: Url) -> Result<(), Error> { + match url.scheme() { + "file" => { + remove_file(url.path()) + .map_err(|e| format!("unable to remove key file {}: {e}\n", url.path()))?; + } + + #[cfg(feature = "kmip")] + "kmip" => { + let key_url = KeyUrl::try_from(url)?; + let conn = kss.kmip.get_pool(key_url.server_id())?.get()?; + conn.destroy_key(key_url.key_id()) + .map_err(|e| format!("unable to remove key {key_url}: {e}"))?; + } + + _ => { + panic!("Unsupported URL scheme while removing key {url}"); + } + } + + Ok(()) +} + +/// Execute the key roll subcommands. +fn roll_command( + cmd: RollCommands, + roll_variant: RollVariant, + ksc: &KeySetConfig, + kss: &mut KeySetState, + env: &impl Env, + state_changed: &mut bool, + run_update_ds_command: &mut bool, +) -> Result<(), Error> { + let actions = match cmd { + RollCommands::StartRoll => { + let actions = match roll_variant { + RollVariant::Ksk => start_ksk_roll(ksc, kss, env, true, run_update_ds_command)?, + RollVariant::Zsk => start_zsk_roll(ksc, kss, env, true, run_update_ds_command)?, + RollVariant::Csk => start_csk_roll(ksc, kss, env, true, run_update_ds_command)?, + RollVariant::Algorithm => { + start_algorithm_roll(ksc, kss, env, true, run_update_ds_command)? + } + }; + + print_actions(&actions); + *state_changed = true; + return Ok(()); + } + RollCommands::Propagation1Complete { ttl } => { + let roll = roll_variant_to_roll(roll_variant); + kss.keyset.propagation1_complete(roll, ttl) + } + RollCommands::CacheExpired1 => { + let roll = roll_variant_to_roll(roll_variant); + kss.keyset.cache_expired1(roll) + } + RollCommands::Propagation2Complete { ttl } => { + let roll = roll_variant_to_roll(roll_variant); + kss.keyset.propagation2_complete(roll, ttl) + } + RollCommands::CacheExpired2 => { + let roll = roll_variant_to_roll(roll_variant); + kss.keyset.cache_expired2(roll) + } + RollCommands::RollDone => { + let roll = roll_variant_to_roll(roll_variant); + do_done(kss, roll, ksc.autoremove)?; + *state_changed = true; + return Ok(()); + } + }; + + let actions = match actions { + Ok(actions) => actions, + Err(err) => { + return Err(format!("Error reporting propagation complete: {err}\n").into()); + } + }; + + handle_actions(&actions, ksc, kss, env, true, run_update_ds_command)?; + + // Report actions + print_actions(&actions); + *state_changed = true; + Ok(()) +} + +/// Implement the get subcommand. +fn get_command(cmd: GetCommands, ksc: &KeySetConfig, kss: &KeySetState) { + match cmd { + GetCommands::UseCsk => { + println!("{}", ksc.use_csk); + } + GetCommands::Autoremove => { + println!("{}", ksc.autoremove); + } + GetCommands::Algorithm => { + println!("{}", ksc.algorithm); + } + GetCommands::DsAlgorithm => { + println!("{}", ksc.ds_algorithm); + } + GetCommands::DnskeyLifetime => { + let span = Span::try_from(ksc.dnskey_signature_lifetime).expect("should not fail"); + let signeddur = span + .to_duration(SpanRelativeTo::days_are_24_hours()) + .expect("should not fail"); + println!("{signeddur:#}"); + } + GetCommands::CdsLifetime => { + let span = Span::try_from(ksc.cds_signature_lifetime).expect("should not fail"); + let signeddur = span + .to_duration(SpanRelativeTo::days_are_24_hours()) + .expect("should not fail"); + println!("{signeddur:#}"); + } + GetCommands::Dnskey => { + for r in &kss.dnskey_rrset { + println!("{r}"); + } + } + GetCommands::Cds => { + for r in &kss.cds_rrset { + println!("{r}"); + } + } + GetCommands::Ds => { + for r in &kss.ds_rrset { + println!("{r}"); + } + } + } +} + +/// Implement the set subcommand. +fn set_command( + cmd: SetCommands, + ksc: &mut KeySetConfig, + config_changed: &mut bool, +) -> Result<(), Error> { + match cmd { + SetCommands::UseCsk { boolean } => { + ksc.use_csk = boolean; + } + SetCommands::Autoremove { boolean } => { + ksc.autoremove = boolean; + } + SetCommands::Algorithm { algorithm, bits } => { + ksc.algorithm = KeyParameters::new(&algorithm, bits)?; + } + SetCommands::AutoKsk { + start, + report, + expire, + done, + } => { + ksc.auto_ksk = AutoConfig { + start, + report, + expire, + done, + }; + *config_changed = true; + } + SetCommands::AutoZsk { + start, + report, + expire, + done, + } => { + ksc.auto_zsk = AutoConfig { + start, + report, + expire, + done, + }; + *config_changed = true; + } + SetCommands::AutoCsk { + start, + report, + expire, + done, + } => { + ksc.auto_csk = AutoConfig { + start, + report, + expire, + done, + }; + *config_changed = true; + } + SetCommands::AutoAlgorithm { + start, + report, + expire, + done, + } => { + ksc.auto_algorithm = AutoConfig { + start, + report, + expire, + done, + }; + *config_changed = true; + } + SetCommands::DsAlgorithm { algorithm } => { + ksc.ds_algorithm = algorithm; + } + SetCommands::DnskeyInceptionOffset { duration } => { + ksc.dnskey_inception_offset = duration; + } + SetCommands::DnskeyLifetime { duration } => { + ksc.dnskey_signature_lifetime = duration; + } + SetCommands::DnskeyRemainTime { duration } => { + ksc.dnskey_remain_time = duration; + } + SetCommands::CdsInceptionOffset { duration } => { + ksc.cds_inception_offset = duration; + } + SetCommands::CdsLifetime { duration } => { + ksc.cds_signature_lifetime = duration; + } + SetCommands::CdsRemainTime { duration } => { + ksc.cds_remain_time = duration; + } + SetCommands::KskValidity { opt_duration } => { + ksc.ksk_validity = opt_duration; + } + SetCommands::ZskValidity { opt_duration } => { + ksc.zsk_validity = opt_duration; + } + SetCommands::CskValidity { opt_duration } => { + ksc.csk_validity = opt_duration; + } + SetCommands::DefaultTtl { ttl } => { + ksc.default_ttl = Ttl::from_secs(ttl); + } + SetCommands::UpdateDsCommand { args } => { + ksc.update_ds_command = args; + } + } + *config_changed = true; + Ok(()) +} + +/// Config for the keyset command. +#[derive(Deserialize, Serialize)] +struct KeySetConfig { + /// Filename of the state file. + state_file: PathBuf, + + /// Directory where new key file should be created. + keys_dir: PathBuf, + + /// Whether to use a CSK (if true) or a KSK and a ZSK. + use_csk: bool, + + /// Algorithm and other parameters for key generation. + algorithm: KeyParameters, + + /// Validity of KSKs. + ksk_validity: Option, + /// Validity of ZSKs. + zsk_validity: Option, + /// Validity of CSKs. + csk_validity: Option, + + /// Configuration variable for automatic KSK rolls. + auto_ksk: AutoConfig, + /// Configuration variable for automatic ZSK rolls. + auto_zsk: AutoConfig, + /// Configuration variable for automatic CSK rolls. + auto_csk: AutoConfig, + /// Configuration variable for automatic algorithm rolls. + auto_algorithm: AutoConfig, + + /// DNSKEY signature inception offset (positive values are subtracted + ///from the current time). + dnskey_inception_offset: Duration, + + /// DNSKEY signature lifetime + dnskey_signature_lifetime: Duration, + + /// The required remaining signature lifetime. + dnskey_remain_time: Duration, + + /// CDS/CDNSKEY signature inception offset + cds_inception_offset: Duration, + + /// CDS/CDNSKEY signature lifetime + cds_signature_lifetime: Duration, + + /// The required remaining signature lifetime. + cds_remain_time: Duration, + + /// The DS hash algorithm. + ds_algorithm: DsAlgorithm, + + /// The TTL to use when creating DNSKEY/CDS/CDNSKEY records. + default_ttl: Ttl, + + /// Automatically remove keys that are no long in use. + autoremove: bool, + + /// Command to run when the DS records at the parent need updating. + update_ds_command: Vec, +} + +#[derive(Default, Deserialize, Serialize)] +struct AutoConfig { + /// Whether to start a key roll automatically. + start: bool, + /// Whether to handle the Report actions automatically. + report: bool, + /// Whether to handle the cache expire step automatically. + expire: bool, + /// Whether to handle the done step automatically. + done: bool, +} + +/// Persistent state for the keyset command. +#[derive(Deserialize, Serialize)] +pub struct KeySetState { + /// Domain KeySet state. + pub keyset: KeySet, + + /// DNSKEY RRset plus signatures to include in the signed zone. + pub dnskey_rrset: Vec, + + /// DS records to add to the parent zone. + pub ds_rrset: Vec, + + /// CDS and CDNSKEY RRsets plus signatures to include in the signed zone. + pub cds_rrset: Vec, + + /// Place holder for NS records. Maybe the four _rrset fields should be + /// combined. Though for extensibility there needs to be a field that + /// informs the signer which Rtypes need special treatment. + pub ns_rrset: Vec, + + /// Next time to call the cron subcommand. + cron_next: Option, + + /// KMIP related configuration. + #[cfg(feature = "kmip")] + #[serde(default)] + pub kmip: KmipState, + + /// Internal state for automatic key rolls. + internal: HashMap, +} + +#[derive(Deserialize, Serialize)] +enum KeyParameters { + /// The RSASHA256 algorithm with the key length in bits. + RsaSha256(usize), + /// The RSASHA512 w algorithmith the key length in bits. + RsaSha512(usize), + /// The ECDSAP256SHA256 algorithm. + EcdsaP256Sha256, + /// The ECDSAP384SHA384 algorithm. + EcdsaP384Sha384, + /// The ED25519 algorithm. + Ed25519, + /// The ED448 algorithm. + Ed448, +} + +impl KeyParameters { + /// Generate a new KeyParameter object from the algorithm name and + /// the key length (when required). + fn new(algorithm: &str, bits: Option) -> Result { + if algorithm == "RSASHA256" { + let bits = bits.ok_or::("bits option expected\n".into())?; + Ok(KeyParameters::RsaSha256(bits)) + } else if algorithm == "RSASHA512" { + let bits = bits.ok_or::("bits option expected\n".into())?; + Ok(KeyParameters::RsaSha512(bits)) + } else if algorithm == "ECDSAP256SHA256" { + Ok(KeyParameters::EcdsaP256Sha256) + } else if algorithm == "ECDSAP384SHA384" { + Ok(KeyParameters::EcdsaP384Sha384) + } else if algorithm == "ED25519" { + Ok(KeyParameters::Ed25519) + } else if algorithm == "ED448" { + Ok(KeyParameters::Ed448) + } else { + Err(format!("unknown algorithm {algorithm}\n").into()) + } + } + + /// Return the GenerateParams equivalent of a KeyParameters object. + fn to_generate_params(&self) -> GenerateParams { + match self { + KeyParameters::RsaSha256(size) => GenerateParams::RsaSha256 { + bits: (*size).try_into().expect("should not fail"), + }, + KeyParameters::RsaSha512(size) => GenerateParams::RsaSha512 { + bits: (*size).try_into().expect("should not fail"), + }, + KeyParameters::EcdsaP256Sha256 => GenerateParams::EcdsaP256Sha256, + KeyParameters::EcdsaP384Sha384 => GenerateParams::EcdsaP384Sha384, + KeyParameters::Ed25519 => GenerateParams::Ed25519, + KeyParameters::Ed448 => GenerateParams::Ed448, + } + } +} + +impl Display for KeyParameters { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { + match self { + KeyParameters::RsaSha256(bits) => write!(fmt, "RSASHA256 {bits} bits"), + KeyParameters::RsaSha512(bits) => write!(fmt, "RSASHA512 {bits} bits"), + KeyParameters::EcdsaP256Sha256 => write!(fmt, "ECDSAP256SHA256"), + KeyParameters::EcdsaP384Sha384 => write!(fmt, "ECDSAP384SHA384"), + KeyParameters::Ed25519 => write!(fmt, "ED25519"), + KeyParameters::Ed448 => write!(fmt, "ED448"), + } + } +} + +/// The hash algorithm to use for DS records. +// Do we want Deserialize and Serialize for DigestAlgorithm? +#[derive(Clone, Debug, Deserialize, Serialize)] +enum DsAlgorithm { + /// Hash the public key using SHA-256. + Sha256, + /// Hash the public key using SHA-384. + Sha384, +} + +impl DsAlgorithm { + /// Create a new DsAlgorithm based on the hash algorithm name. + fn new(digest: &str) -> Result { + if digest == "SHA-256" { + Ok(DsAlgorithm::Sha256) + } else if digest == "SHA-384" { + Ok(DsAlgorithm::Sha384) + } else { + Err(format!("unknown digest {digest}\n").into()) + } + } + + /// Return the equivalent DigestAlgorithm for a DsAlgorithm object. + fn to_digest_algorithm(&self) -> DigestAlgorithm { + match self { + DsAlgorithm::Sha256 => DigestAlgorithm::SHA256, + DsAlgorithm::Sha384 => DigestAlgorithm::SHA384, + } + } +} + +impl Display for DsAlgorithm { + fn fmt(&self, fmt: &mut Formatter<'_>) -> Result<(), std::fmt::Error> { + match self { + DsAlgorithm::Sha256 => write!(fmt, "SHA-256"), + DsAlgorithm::Sha384 => write!(fmt, "SHA-384"), + } + } +} + +/// State needed for automatic key rolls. +#[derive(Default, Deserialize, Serialize)] +struct RollStateReports { + /// State for the propagation1-complete step. + propagation1: Mutex, + /// State for the propagation2-complete step. + propagation2: Mutex, + /// State for the done step. + done: Mutex, +} + +#[derive(Clone, Debug, Default, Deserialize, Serialize)] +struct ReportState { + /// State for DNSKEY propagation checks. + dnskey: Option, + /// State for DS propagation checks. + ds: Option, + /// State for RRSIG propagation checks. + rrsig: Option, +} + +fn new_keys( + name: &Name>, + algorithm: GenerateParams, + make_ksk: bool, + keys: &HashMap, + keys_dir: &Path, + env: &impl Env, + #[cfg(feature = "kmip")] kmip: &mut KmipState, +) -> Result<(Url, Url, SecurityAlgorithm, u16), Error> { + // Generate the key. + // TODO: Add a high-level operation in 'domain' to select flags? + let flags = if make_ksk { 257 } else { 256 }; + let mut retries = MAX_KEY_TAG_TRIES; + + // If a default KMIP server is configured, use that to generate keys + #[cfg(feature = "kmip")] + if let Some(kmip_conn_pool) = kmip.get_default_pool()? { + let (key_pair, dnskey) = loop { + // TODO: Fortanix DSM rejects attempts to create keys by names + // that are already taken. Should we be able to detect that case + // specifically and try again with a different name? Should we add + // a random element to each name? Should we keep track of used + // names and detect a collision ourselves when choosing a name? + // Is there some natural differentiator that can be used to name + // keys uniquely other than zone name? + // + // Elements to include in a key name: + // - Application, e.g. Nameshed or NS. + // - Namespace, e.g. prod or test or dev. + // - Key type, e.g. KSK or ZSK. + // - Zone name, e.g. example.com, but also a.b.c.d.f.com + // - Uniqifier, e.g. to differentiate pre-generated keys for + // the same zone. + // + // Max 32 characters seem to be wise as that is the lowest limit + // used amongst PKCS#11 HSM providers for a which a limit is + // known. + // + // Use an overridable naming template? E.g. support placeholders + // such as , and , with a default + // of: + // + // -- + // + // Where is 2 bytes long and is 3 bytes + // long, leaving 32 - '-' - 2 - '-' - 3 = 32 - 7 = 25 bytes for + // , which can be abbreviated if too long by replacing + // the middle with '...' and is a 0 padded positive + // integer in the range 00..99 giving 100 keys to roll the zone + // up to twice a week without needing to use 00 again. + // + // When overridden a user could include fixed namespace and + // application values, e.g.: + // + // NS-PROD--- + // + // Resulting in key names like: + // + // NS-PROD-example.com-001-ksk + // NS-DEV-some.lo...in-name-013-zsk + // (shrunk from NS-DEV-some.long-domain-name-013-zsk) + // 01234567890123456789012345678901 + // + // However, regarding , it may be that pre-generation + // should be accomplished differently, by generating the keys + // outside of dnst keyset and importing them. But it may still + // be useful to consider what to do if a key fails to generate, + // should we retry with an integer value at the end of the zone + // name (within the 32 byte limit - aside: should that limit also + // be configurable?), can we even tell that failure was due to a + // name collision? + // + // Alternate proposals are to use -- or even a random number then re-labeled post-generation + // to include the key tag (which requires the generated key to + // determine). The initial random number is to avoid conflcits if + // re-labeling fails. + // + // And for to be a hexified 16-bit random number that + // we can scan existing keys for to avoid conflict with a key that + // we might have generated before. + // + // And for name truncation to keep the last label (TLD) then remove + // next nearest labels until the name fits the limit, and add an + // extra '.' in to make it clear it was truncated, else keep the + // first n characters. + // + // And to make the max limit be configurable for HSMs that support + // longer than 32 bytes. We could also make the entire label a user + // overridable format/template string. + // + // For now we will do: + // + // 1. Configurable label length limit defaulting to 32 bytes. + // 2. Initial hexified random 32-byte label. + // 3. Relabel to: -<(partial) zone name>--. + + // Generate initial hexified random byte label. + + let server_id = kmip_conn_pool.server_id(); + let key_label_cfg = &mut kmip.servers.get_mut(server_id).unwrap().key_label_config; + + let mut rnadom_bytes = vec![0; key_label_cfg.max_label_bytes as usize]; + rand::fill(&mut rnadom_bytes[..]); + let public_key_random_label = encode_string_hex(&rnadom_bytes); + + let mut random_bytes = vec![0; key_label_cfg.max_label_bytes as usize]; + rand::fill(&mut random_bytes[..]); + let private_key_random_label = encode_string_hex(&random_bytes); + + let key_pair = domain::crypto::kmip::sign::generate( + public_key_random_label, + private_key_random_label, + algorithm.clone(), + flags, + kmip_conn_pool.clone(), + ) + .map_err(|e| format!("KMIP key generation failed: {e}\n"))?; + + let dnskey = key_pair.dnskey(); + + if !keys.iter().any(|(_, k)| k.key_tag() == dnskey.key_tag()) { + if key_label_cfg.supports_relabeling { + // Re-label the key now that we know the key tag. + let key_type = match make_ksk { + true => "ksk", + false => "zsk", + }; + + let prefix = &key_label_cfg.prefix; + let key_tag = dnskey.key_tag().to_string(); + let zone_name = name.to_string(); + let max_label_bytes = key_label_cfg.max_label_bytes as usize; + + let public_key_label = format_key_label( + prefix, + &zone_name, + &key_tag, + key_type, + "-pub", + max_label_bytes, + ); + + if let Err(err) = &public_key_label { + warn!("Failed to generate label for public key, key will have a hex label: {err}"); + } + + let private_key_label = format_key_label( + prefix, + &zone_name, + &key_tag, + key_type, + "-pri", + max_label_bytes, + ); + + if let Err(err) = &private_key_label { + warn!("Failed to generate label for private key, key will have a hex label: {err}"); + } + + if let (Ok(public_key_label), Ok(private_key_label)) = + (public_key_label, private_key_label) + { + let conn = kmip_conn_pool.get()?; + // If key generation succeeded then the most likely reason + // for the rename operation to fail is lack of support for + // key relabeling. + match conn.rename_key(key_pair.public_key_id(), public_key_label) { + Ok(_res) => { + // TODO: Inspect the response attributes to see if + // the Modify Attribute operation actually changed + // the attribute as requested? + + // If re-labelling the public key succeeded but + // re-labelling the private key fails, that is + // unexpected. Why would it succeed for one and + // fail for the other? + conn.rename_key(key_pair.private_key_id(), private_key_label) + .map_err(|e| format!("KMIP key generation failed: failed to re-label private key with id {}: {e}", key_pair.private_key_id()))?; + } + Err(err) => { + // Assume that key re-labeling is not supported + // and disable future re-labeling attempts for + // this server. + warn!("KMIP post key generation re-labeling with server '{server_id}' failed, re-labeling will be disabled for this server: {err}"); + key_label_cfg.supports_relabeling = false; + } + } + } + } + + break (key_pair, dnskey); + } + + if retries <= 1 { + return Err("unable to generate key with unique key tag".into()); + } + retries -= 1; + }; + + return Ok(( + key_pair.public_key_url(), + key_pair.private_key_url(), + key_pair.algorithm(), + dnskey.key_tag(), + )); + } + + // Otherwise use Ring/OpenSSL based key generation. + let (secret_key, public_key, key_tag) = loop { + let (secret_key, public_key) = domain::crypto::sign::generate(algorithm.clone(), flags) + .map_err(|e| format!("key generation failed: {e}\n"))?; + + let key_tag = public_key.key_tag(); + if !keys.iter().any(|(_, k)| k.key_tag() == key_tag) { + break (secret_key, public_key, key_tag); + } + if retries <= 1 { + return Err("unable to generate key with unique key tag".into()); + } + retries -= 1; + }; + + let algorithm = public_key.algorithm(); + + let public_key = Record::new(name.clone(), Class::IN, Ttl::ZERO, public_key); + + let base = format!( + "K{}+{:03}+{:05}", + name.fmt_with_dot(), + algorithm.to_int(), + key_tag + ); + + let mut secret_key_path = keys_dir.to_path_buf(); + secret_key_path.push(Path::new(&format!("{base}.private"))); + let mut public_key_path = keys_dir.to_path_buf(); + public_key_path.push(Path::new(&format!("{base}.key"))); + + let mut secret_key_file = util::create_new_file(&env, &secret_key_path)?; + let mut public_key_file = util::create_new_file(&env, &public_key_path)?; + // Prepare the contents to write. + let secret_key = secret_key.display_as_bind().to_string(); + let public_key = display_as_bind(&public_key).to_string(); + + // Write the key files. + secret_key_file + .write_all(secret_key.as_bytes()) + .map_err(|err| format!("error while writing private key file '{base}.private': {err}"))?; + public_key_file + .write_all(public_key.as_bytes()) + .map_err(|err| format!("error while writing public key file '{base}.key': {err}"))?; + + let secret_key_path = secret_key_path.to_str().ok_or::( + format!("path {} needs to be valid UTF-8", secret_key_path.display()).into(), + )?; + let secret_key_url = "file://".to_owned() + secret_key_path; + let public_key_path = public_key_path.to_str().ok_or::( + format!("path {} needs to be valid UTF-8", public_key_path.display()).into(), + )?; + let public_key_url = "file://".to_owned() + public_key_path; + + let secret_key_url = Url::parse(&secret_key_url) + .map_err(|e| format!("unable to parse {secret_key_url} as URL: {e}"))?; + let public_key_url = Url::parse(&public_key_url) + .map_err(|e| format!("unable to parse {public_key_url} as URL: {e}"))?; + Ok((public_key_url, secret_key_url, algorithm, key_tag)) +} + +/// Update the DNSKEY RRset and signures in the KeySetState. +/// +/// Collect all keys where present() returns true and sign the DNSKEY RRset +/// with all KSK and CSK (KSK state) where signer() returns true. +fn update_dnskey_rrset( + ksc: &KeySetConfig, + kss: &mut KeySetState, + env: &impl Env, + verbose: bool, +) -> Result<(), Error> { + let mut dnskeys = Vec::new(); + // Clone needed because of public_key_from_url takes &mut KeySetState. + let keys = kss.keyset.keys().clone(); + for (k, v) in &keys { + let present = match v.keytype() { + KeyType::Ksk(key_state) => key_state.present(), + KeyType::Zsk(key_state) => key_state.present(), + KeyType::Csk(key_state, _) => key_state.present(), + KeyType::Include(key_state) => key_state.present(), + }; + + if present { + let pub_url = Url::parse(k).expect("valid URL expected"); + let public_key = public_key_from_url::>(&pub_url, ksc, kss, env)?; + dnskeys.push(public_key); + } + } + let now = Timestamp::now().into_int(); + let inception = (now - ksc.dnskey_inception_offset.as_secs() as u32).into(); + let expiration = (now + ksc.dnskey_signature_lifetime.as_secs() as u32).into(); + + let mut sigs = Vec::new(); + for (k, v) in &keys { + if dnskeys.is_empty() { + // Don't try to sign an empty set. + break; + } + let dnskey_signer = match v.keytype() { + KeyType::Ksk(key_state) => key_state.signer(), + KeyType::Zsk(_) => false, + KeyType::Csk(key_state, _) => key_state.signer(), + KeyType::Include(_) => false, + }; + + let rrset = Rrset::new(&dnskeys).map_err(|e| format!("unable to create Rrset: {e}\n"))?; + + if dnskey_signer { + let privref = v.privref().ok_or("missing private key")?; + let priv_url = Url::parse(privref).expect("valid URL expected"); + let pub_url = Url::parse(k).expect("valid URL expected"); + let signing_key = match (priv_url.scheme(), pub_url.scheme()) { + ("file", "file") => { + let private_data = std::fs::read_to_string(priv_url.path()) + .map_err(|e| format!("unable read from file {}: {e}", priv_url.path()))?; + let secret_key = SecretKeyBytes::parse_from_bind(&private_data) + .map_err(|e| format!("unable to parse private key file {privref}: {e}"))?; + + let public_key = public_key_from_url(&pub_url, ksc, kss, env)?; + + let key_pair = + KeyPair::from_bytes(&secret_key, public_key.data()).map_err(|e| { + format!("private key {privref} and public key {k} do not match: {e}") + })?; + SigningKey::new( + public_key.owner().clone(), + public_key.data().flags(), + key_pair, + ) + } + + #[cfg(feature = "kmip")] + ("kmip", "kmip") => { + let owner = kss.keyset.name().clone().flatten_into(); + let priv_key_url = KeyUrl::try_from(priv_url)?; + let pub_key_url = KeyUrl::try_from(pub_url)?; + let flags = priv_key_url.flags(); + let kmip_conn_pool = kss.kmip.get_pool(priv_key_url.server_id())?; + let key_pair = domain::crypto::kmip::sign::KeyPair::from_urls( + priv_key_url, + pub_key_url, + kmip_conn_pool, + ) + .map_err(|err| format!("Failed to retrieve KMIP key by URL: {err}"))?; + let key_pair = KeyPair::Kmip(key_pair); + SigningKey::new(owner, flags, key_pair) + } + + (priv_scheme, pub_scheme) => { + panic!("unsupported URL scheme combination: {priv_scheme} & {pub_scheme}"); + } + }; + + // TODO: Should there be a key not found error we can detect here so that we can retry if + // we believe that the key is simply not registered fully yet in the HSM? + let sig = sign_rrset(&signing_key, &rrset, inception, expiration).map_err(|e| { + format!("error signing DNSKEY RRset with private key {privref}: {e}") + })?; + sigs.push(sig); + } + } + + kss.dnskey_rrset.truncate(0); + for r in dnskeys { + kss.dnskey_rrset + .push(r.display_zonefile(DisplayKind::Simple).to_string()); + } + for r in sigs { + kss.dnskey_rrset + .push(r.display_zonefile(DisplayKind::Simple).to_string()); + } + if verbose { + println!("Got DNSKEY RRset:"); + for r in &kss.dnskey_rrset { + println!("\t{r}"); + } + } + Ok(()) +} + +/// Create the CDS and CDNSKEY RRsets plus signatures. +/// +/// The CDS and CDNSKEY RRsets contain the keys where at_parent() returns +/// true. The RRsets are signed with all keys that sign the DNSKEY RRset. +fn create_cds_rrset( + kss: &mut KeySetState, + ksc: &KeySetConfig, + digest_alg: DigestAlgorithm, + env: &impl Env, + verbose: bool, +) -> Result<(), Error> { + let mut cds_list = Vec::new(); + let mut cdnskey_list = Vec::new(); + // clone needed due to public_key_from_url taking &mut KeySetState. + let keys = kss.keyset.keys().clone(); + for (k, v) in &keys { + let at_parent = match v.keytype() { + KeyType::Ksk(key_state) => key_state.at_parent(), + KeyType::Zsk(key_state) => key_state.at_parent(), + KeyType::Csk(key_state, _) => key_state.at_parent(), + KeyType::Include(key_state) => key_state.at_parent(), + }; + + if at_parent { + let pub_url = Url::parse(k).expect("valid URL expected"); + let public_key = public_key_from_url(&pub_url, ksc, kss, env)?; + create_cds_rrset_helper(digest_alg, &mut cds_list, &mut cdnskey_list, public_key)?; + } + + // Need to sign + } + + let now = Timestamp::now().into_int(); + let inception = (now - ksc.cds_inception_offset.as_secs() as u32).into(); + let expiration = (now + ksc.cds_signature_lifetime.as_secs() as u32).into(); + + let mut cds_sigs = Vec::new(); + let mut cdnskey_sigs = Vec::new(); + for (k, v) in &keys { + if cds_list.is_empty() { + // Don't try to sign an empty set. Assume cdnskey_list is empty + // as well. + break; + } + let dnskey_signer = match v.keytype() { + KeyType::Ksk(key_state) => key_state.signer(), + KeyType::Zsk(_) => false, + KeyType::Csk(key_state, _) => key_state.signer(), + KeyType::Include(_) => false, + }; + + let cds_rrset = + Rrset::new(&cds_list).map_err(|e| format!("unable to create Rrset: {e}\n"))?; + let cdnskey_rrset = + Rrset::new(&cdnskey_list).map_err(|e| format!("unable to create Rrset: {e}\n"))?; + + if dnskey_signer { + let privref = v.privref().ok_or("missing private key")?; + let priv_url = Url::parse(privref).expect("valid URL expected"); + let pub_url = Url::parse(k).expect("valid URL expected"); + let signing_key = match (priv_url.scheme(), pub_url.scheme()) { + ("file", "file") => { + let path = priv_url.path(); + let filename = env.in_cwd(&path); + let private_data = std::fs::read_to_string(&filename).map_err(|e| { + format!( + "unable to read from private key file {}: {e}", + filename.display() + ) + })?; + let secret_key = + SecretKeyBytes::parse_from_bind(&private_data).map_err(|e| { + format!( + "unable to parse private key file {}: {e}", + filename.display() + ) + })?; + let public_key = public_key_from_url(&pub_url, ksc, kss, env)?; + + let key_pair = + KeyPair::from_bytes(&secret_key, public_key.data()).map_err(|e| { + format!("private key {privref} and public key {k} do not match: {e}") + })?; + SigningKey::new( + public_key.owner().clone(), + public_key.data().flags(), + key_pair, + ) + } + + #[cfg(feature = "kmip")] + ("kmip", "kmip") => { + let owner = kss.keyset.name().clone().flatten_into(); + let priv_key_url = KeyUrl::try_from(priv_url)?; + let pub_key_url = KeyUrl::try_from(pub_url)?; + let flags = priv_key_url.flags(); + let kmip_conn_pool = kss.kmip.get_pool(priv_key_url.server_id())?; + let key_pair = domain::crypto::kmip::sign::KeyPair::from_urls( + priv_key_url, + pub_key_url, + kmip_conn_pool, + ) + .map_err(|err| format!("Failed to retrieve KMIP key by URL: {err}"))?; + let key_pair = KeyPair::Kmip(key_pair); + SigningKey::new(owner, flags, key_pair) + } + + (priv_scheme, pub_scheme) => { + panic!("unsupported URL scheme combination: {priv_scheme} & {pub_scheme}"); + } + }; + let sig = sign_rrset(&signing_key, &cds_rrset, inception, expiration) + .map_err(|e| format!("error signing CDS RRset with private key {privref}: {e}"))?; + cds_sigs.push(sig); + let sig = + sign_rrset::<_, _, Bytes, _>(&signing_key, &cdnskey_rrset, inception, expiration) + .map_err(|e| { + format!("error signing CDNSKEY RRset with private key {privref}: {e}") + })?; + cdnskey_sigs.push(sig); + } + } + + kss.cds_rrset.truncate(0); + for r in cdnskey_list { + kss.cds_rrset + .push(r.display_zonefile(DisplayKind::Simple).to_string()); + } + for r in cdnskey_sigs { + kss.cds_rrset + .push(r.display_zonefile(DisplayKind::Simple).to_string()); + } + for r in cds_list { + kss.cds_rrset + .push(r.display_zonefile(DisplayKind::Simple).to_string()); + } + for r in cds_sigs { + kss.cds_rrset + .push(r.display_zonefile(DisplayKind::Simple).to_string()); + } + + if verbose { + println!("Got CDS/CDNSKEY RRset:"); + for r in &kss.cds_rrset { + println!("\t{r}"); + } + } + Ok(()) +} + +/// Create CDS and CDNSKEY RRsets. +fn create_cds_rrset_helper( + digest_alg: DigestAlgorithm, + cds_list: &mut Vec, Cds>>>, + cdnskey_list: &mut Vec, Cdnskey>>>, + record: Record>, Dnskey>>, +) -> Result<(), Error> { + let owner: Name = record.owner().to_name(); + let dnskey = record.data(); + let cdnskey = Cdnskey::new( + dnskey.flags(), + dnskey.protocol(), + dnskey.algorithm(), + dnskey.public_key().clone(), + ) + .expect("should not fail"); + let cdnskey_record = Record::new(owner.clone(), record.class(), record.ttl(), cdnskey); + cdnskey_list.push(cdnskey_record); + let key_tag = dnskey.key_tag(); + let sec_alg = dnskey.algorithm(); + let digest = dnskey + .digest(&record.owner(), digest_alg) + .map_err(|e| format!("error creating digest for DNSKEY record: {e}"))?; + let cds = Cds::new(key_tag, sec_alg, digest_alg, digest.as_ref().to_vec()) + .expect("Infallible because the digest won't be too long since it's a valid digest"); + let cds_record = Record::new(owner, record.class(), record.ttl(), cds); + cds_list.push(cds_record); + Ok(()) +} + +/// Remove the CDS and CDNSKEY RRsets and signatures. +fn remove_cds_rrset(kss: &mut KeySetState) { + kss.cds_rrset.truncate(0); +} + +/// Update the DS RRset. +/// +/// The DS records are generated from all keys where at_parent() returns true. +/// This RRset is not signed. +fn update_ds_rrset( + ksc: &KeySetConfig, + kss: &mut KeySetState, + env: &impl Env, + verbose: bool, +) -> Result<(), Error> { + let digest_alg = ksc.ds_algorithm.to_digest_algorithm(); + + #[allow(clippy::type_complexity)] + let mut ds_list: Vec>, Ds>>> = Vec::new(); + // clone needed due to public_key_from_url taking &mut KeySetState. + let keys = kss.keyset.keys().clone(); + for (k, v) in &keys { + let at_parent = match v.keytype() { + KeyType::Ksk(key_state) => key_state.at_parent(), + KeyType::Zsk(key_state) => key_state.at_parent(), + KeyType::Csk(key_state, _) => key_state.at_parent(), + KeyType::Include(key_state) => key_state.at_parent(), + }; + + if at_parent { + let pub_url = Url::parse(k).expect("valid URL expected"); + let public_key = public_key_from_url::>(&pub_url, ksc, kss, env)?; + let digest = public_key + .data() + .digest(&public_key.owner(), digest_alg) + .map_err(|e| format!("error creating digest for DNSKEY record: {e}"))?; + + let ds = Ds::new( + public_key.data().key_tag(), + public_key.data().algorithm(), + digest_alg, + digest.as_ref().to_vec(), + ) + .expect("Infallible because the digest won't be too long since it's a valid digest"); + + let ds_record = Record::new( + public_key.owner().clone().flatten_into(), + public_key.class(), + ksc.default_ttl, + ds, + ); + + ds_list.push(ds_record); + } + } + + kss.ds_rrset.truncate(0); + for r in ds_list { + kss.ds_rrset + .push(r.display_zonefile(DisplayKind::Simple).to_string()); + } + + if verbose { + println!("Got DS RRset:"); + for r in &kss.ds_rrset { + println!("\t{r}"); + } + } + Ok(()) +} + +/// Handle the actions that result from key roll steps that always need to +/// be handled independent of automation. +/// +/// Those are the actions that update the DNSKEY RRset, DS records and the +/// CDS and CDNSKEY RRsets. +fn handle_actions( + actions: &[Action], + ksc: &KeySetConfig, + kss: &mut KeySetState, + env: &impl Env, + verbose: bool, + run_update_ds_command: &mut bool, +) -> Result<(), Error> { + for action in actions { + match action { + Action::UpdateDnskeyRrset => update_dnskey_rrset(ksc, kss, env, verbose)?, + Action::CreateCdsRrset => create_cds_rrset( + kss, + ksc, + ksc.ds_algorithm.to_digest_algorithm(), + env, + verbose, + )?, + Action::RemoveCdsRrset => remove_cds_rrset(kss), + Action::UpdateDsRrset => { + *run_update_ds_command = true; + update_ds_rrset(ksc, kss, env, verbose)? + } + Action::UpdateRrsig => (), + Action::ReportDnskeyPropagated => (), + Action::ReportDsPropagated => (), + Action::ReportRrsigPropagated => (), + Action::WaitDnskeyPropagated => (), + Action::WaitDsPropagated => (), + Action::WaitRrsigPropagated => (), + } + } + Ok(()) +} + +/// Print a list of actions. +/// +/// TODO: make this list user friendly. +fn print_actions(actions: &[Action]) { + if actions.is_empty() { + println!("No actions"); + } else { + println!("Actions:"); + let mut report_count = 0; + for a in actions { + println!("\t{a:?}:"); + match a { + Action::CreateCdsRrset => { + println!("\t\tsign the zone with the CDS and CDNSKEY RRsets") + } + Action::RemoveCdsRrset => { + println!("\t\tsign the zone with empty CDS and CDNSKEY RRsets") + } + Action::UpdateDnskeyRrset => { + println!("\t\tsign the zone with the new DNSKEY RRset from the state file") + } + Action::UpdateDsRrset => { + println!("\t\tupdate the DS RRset at the parent to match the CDNSKEY RRset") + } + Action::UpdateRrsig => println!("\t\tsign the zone with the new zone signing keys"), + Action::ReportDnskeyPropagated => { + println!("\t\tverify that the new DNSKEY RRset has propagated to all"); + println!("\t\tnameservers and report (at least) the TTL of the DNSKEY RRset"); + report_count += 1; + } + Action::ReportDsPropagated => { + println!("\t\tverify that all nameservers of the parent zone have a new"); + println!("\t\tDS RRset that matches the keys in the CNDSKEY RRset and"); + println!("\t\treport (at least) the TTL of the DNSKEY RRset"); + report_count += 1; + } + Action::ReportRrsigPropagated => { + println!("\t\tverify that the new RRSIG records have propagated to all"); + println!("\t\tnameservers and report (at least) the maximum TTL among"); + println!("\t\tthe RRSIG records"); + report_count += 1; + } + Action::WaitDnskeyPropagated => { + println!("\t\tverify that the new DNSKEY RRset has propagated to all"); + println!("\t\tnameservers"); + } + Action::WaitDsPropagated => { + println!("\t\tverify that all nameservers of the parent zone have a new"); + println!("\t\tDS RRset that matches the keys in the CNDSKEY RRset"); + } + Action::WaitRrsigPropagated => { + println!("\t\tverify that the new RRSIG records have propagated to all"); + println!("\t\tnameservers"); + } + } + println!(); + } + if report_count > 1 { + println!("\tNote: with multiple Report actions, report the maximum of the TTLs."); + } + } +} + +/// Parse a duration from a string with suffixes like 'm', 'h', 'w', etc. +pub fn parse_duration(value: &str) -> Result { + let span: Span = value + .parse() + .map_err(|e| format!("unable to parse {value} as lifetime: {e}\n"))?; + let signeddur = span + .to_duration(SpanRelativeTo::days_are_24_hours()) + .map_err(|e| format!("unable to convert duration: {e}\n"))?; + Duration::try_from(signeddur).map_err(|e| format!("unable to convert duration: {e}\n").into()) +} + +/// Parse an optional duration from a string but also allow 'off' to signal +/// no duration. +fn parse_opt_duration(value: &str) -> Result, Error> { + if value == "off" { + return Ok(None); + } + let duration = parse_duration(value)?; + Ok(Some(duration)) +} + +/// Check whether signatures need to be renewed. +/// +/// The input is an RRset plus signatures in zonefile format plus a +/// duration how long the signatures are required to remain valid. +fn sig_renew(rrset: &[String], remain_time: &Duration) -> bool { + let mut zonefile = Zonefile::new(); + for r in rrset { + zonefile.extend_from_slice(r.as_ref()); + zonefile.extend_from_slice(b"\n"); + } + let now = Timestamp::now(); + let renew = now.into_int() as u64 + remain_time.as_secs(); + for e in zonefile { + let e = e.expect("should not fail"); + match e { + Entry::Record(r) => { + if let ZoneRecordData::Rrsig(rrsig) = r.data() { + if renew > rrsig.expiration().into_int() as u64 { + return true; + } + } + } + Entry::Include { .. } => continue, // Just ignore include. + } + } + false +} + +/// Return where a key has expired. Return a label for the type of +/// key as well to help user friendly output. +fn key_expired(key: &Key, ksc: &KeySetConfig) -> (bool, &'static str) { + let Some(timestamp) = key.timestamps().published() else { + return (false, ""); + }; + + // Take published time as basis for computing expiration. + let (keystate, label, validity) = match key.keytype() { + KeyType::Ksk(keystate) => (keystate, "KSK", ksc.ksk_validity), + KeyType::Zsk(keystate) => (keystate, "ZSK", ksc.zsk_validity), + KeyType::Csk(keystate, _) => (keystate, "CSK", ksc.csk_validity), + KeyType::Include(_) => return (false, ""), // Does not expire. + }; + if keystate.stale() { + // Old key. + return (false, ""); + } + let Some(validity) = validity else { + // No limit on key validity. + return (false, ""); + }; + (timestamp.elapsed() > validity, label) +} + +/// Create a PathBuf for the parent directory of a PathBuf. +fn make_parent_dir(filename: PathBuf) -> PathBuf { + filename.parent().unwrap_or(Path::new("/")).to_path_buf() +} + +/// Compute when the cron subcommand should be called to refresh signatures +/// for an RRset. +fn compute_cron_next(rrset: &[String], remain_time: &Duration) -> Option { + let mut zonefile = Zonefile::new(); + for r in rrset { + zonefile.extend_from_slice(r.as_ref()); + zonefile.extend_from_slice(b"\n"); + } + + let now = SystemTime::now(); + let min_expiration = zonefile + .map(|r| r.expect("should not fail")) + .filter_map(|r| match r { + Entry::Record(r) => Some(r), + Entry::Include { .. } => None, + }) + .filter_map(|r| { + if let ZoneRecordData::Rrsig(rrsig) = r.data() { + Some(rrsig.expiration()) + } else { + None + } + }) + .map(|t| t.to_system_time(now)) + .min(); + + // Map to the Unix epoch in case of failure. + min_expiration.map(|t| { + (t - *remain_time) + .try_into() + .unwrap_or_else(|_| UNIX_EPOCH.try_into().expect("should not fail")) + }) +} + +/// The result of an automatic action check that does not need to report a +/// TTL. +#[derive(Debug)] +enum AutoActionsResult { + /// The action has completed. + Ok, + /// Try again after the UnixTime parameter. + Wait(UnixTime), +} + +/// The result of an automatic action check the does need to report a TTL. +#[derive(Clone, Debug, Deserialize, Serialize)] +enum AutoReportActionsResult { + /// The action has completed, report at least the Ttl in the parameter. + Report(Ttl), + /// Try again after the UnixTime parameter. + Wait(UnixTime), +} + +/// The result of checking for RRSIG propagation. +#[derive(Clone, Debug, Deserialize, Serialize)] +enum AutoReportRrsigResult { + /// The action has completed, report at least the Ttl in the parameter. + Report(Ttl), + /// A DNS request failed (for example due to a network problem). Try again + /// after the UnixTime parameter. + Wait(UnixTime), + /// The zone has updated signatures, wait for this version of the zone to + /// appear on all name servers. + WaitSoa { + /// Try again after this time. + next: UnixTime, + /// Wait for this serial or newer. + serial: Serial, + /// The ttl to use to compute a new 'next' wait time if the check fails. + ttl: Ttl, + /// The ttl to put in the Report variable when the check succeeds. + report_ttl: Ttl, + }, + /// Wait for a specific record to get updated signatures. + WaitRecord { + /// Try again after this time. + next: UnixTime, + /// Name to check. + name: Name>, + /// Rtype to check. + rtype: Rtype, + /// The ttl to use to compute a new 'next' wait time if the check fails. + ttl: Ttl, + }, + /// For NSEC3 record, it is not possible to directly check if they got new + /// signatures. Instead, wait for a new version of the zone and check the + /// entire zone. + WaitNextSerial { + /// Try again after this time. + next: UnixTime, + /// Wait until the zone version is new than this serial. + serial: Serial, + /// The ttl to use to compute a new 'next' wait time if the check fails. + ttl: Ttl, + }, +} + +/// Handle the actions for the Done state automatically. Actions for this +/// state cannot have report actions, but there can be wait actions. +async fn auto_wait_actions( + actions: &[Action], + kss: &KeySetState, + report_state: &Mutex, + state_changed: &mut bool, +) -> AutoActionsResult { + for a in actions { + match a { + Action::CreateCdsRrset + | Action::RemoveCdsRrset + | Action::UpdateDnskeyRrset + | Action::UpdateDsRrset + | Action::UpdateRrsig => (), + Action::WaitDnskeyPropagated => { + // Note, an extra scope here to make clippy happy. Otherwise + // clippy thinks that the lock is used across an await point. + { + let report_state_locked = report_state.lock().expect("lock() should not fail"); + if let Some(dnskey_status) = &report_state_locked.dnskey { + match dnskey_status { + AutoReportActionsResult::Wait(next) => { + if *next > UnixTime::now() { + return AutoActionsResult::Wait(next.clone()); + } + } + AutoReportActionsResult::Report(_) => continue, + } + } + + drop(report_state_locked); + } + + let result = report_dnskey_propagated(kss).await; + + let mut report_state_locked = report_state.lock().expect("lock() should not fail"); + report_state_locked.dnskey = Some(result.clone()); + drop(report_state_locked); + *state_changed = true; + + match result { + AutoReportActionsResult::Wait(next) => return AutoActionsResult::Wait(next), + AutoReportActionsResult::Report(_) => (), + } + } + Action::WaitDsPropagated => { + // Clippy problem + { + let report_state_locked = report_state.lock().expect("lock() should not fail"); + if let Some(ds_status) = &report_state_locked.ds { + match ds_status { + AutoReportActionsResult::Wait(next) => { + if *next > UnixTime::now() { + return AutoActionsResult::Wait(next.clone()); + } + } + AutoReportActionsResult::Report(_) => continue, + } + } + drop(report_state_locked); + } + + let result = report_ds_propagated(kss).await.unwrap_or_else(|e| { + warn!("Check DS propagation failed: {e}"); + AutoReportActionsResult::Wait(UnixTime::now() + DEFAULT_WAIT) + }); + + let mut report_state_locked = report_state.lock().expect("lock() should not fail"); + report_state_locked.ds = Some(result.clone()); + drop(report_state_locked); + *state_changed = true; + + match result { + AutoReportActionsResult::Wait(next) => return AutoActionsResult::Wait(next), + AutoReportActionsResult::Report(_) => (), + } + } + Action::WaitRrsigPropagated => { + // Clippy problem + let opt_rrsig_status = { + let report_state_locked = report_state.lock().expect("lock() should not fail"); + // Make a copy of the state. We need to release the lock + // before calling await. + let opt_rrsig_status = report_state_locked.rrsig.clone(); + drop(report_state_locked); + opt_rrsig_status + }; + + if let Some(rrsig_status) = opt_rrsig_status { + match rrsig_status { + AutoReportRrsigResult::Wait(next) => { + if next > UnixTime::now() { + return AutoActionsResult::Wait(next.clone()); + } + } + AutoReportRrsigResult::Report(_) => continue, + AutoReportRrsigResult::WaitSoa { + next, + serial, + ttl, + report_ttl, + } => { + if next > UnixTime::now() { + return AutoActionsResult::Wait(next.clone()); + } + let res = check_soa(serial, kss).await.unwrap_or_else(|e| { + warn!("Check SOA propagation failed: {e}"); + false + }); + if res { + let mut report_state_locked = + report_state.lock().expect("lock() should not fail"); + report_state_locked.rrsig = + Some(AutoReportRrsigResult::Report(report_ttl)); + drop(report_state_locked); + *state_changed = true; + continue; + } else { + let next = UnixTime::now() + ttl.into(); + let mut report_state_locked = + report_state.lock().expect("lock() should not fail"); + report_state_locked.rrsig = Some(AutoReportRrsigResult::WaitSoa { + next: next.clone(), + serial, + ttl, + report_ttl, + }); + drop(report_state_locked); + *state_changed = true; + return AutoActionsResult::Wait(next); + } + } + AutoReportRrsigResult::WaitRecord { + next, + name, + rtype, + ttl, + } => { + if next > UnixTime::now() { + return AutoActionsResult::Wait(next.clone()); + } + let res = check_record(&name, &rtype, kss).await.unwrap_or_else(|e| { + warn!("record check failed: {e}"); + false + }); + if !res { + let next = UnixTime::now() + ttl.into(); + let mut report_state_locked = + report_state.lock().expect("lock() should not fail"); + report_state_locked.rrsig = + Some(AutoReportRrsigResult::WaitRecord { + next: next.clone(), + name: name.clone(), + rtype, + ttl, + }); + drop(report_state_locked); + *state_changed = true; + return AutoActionsResult::Wait(next); + } + + // This record has the right signatures. Check + // the zone. + } + AutoReportRrsigResult::WaitNextSerial { next, serial, ttl } => { + if next > UnixTime::now() { + return AutoActionsResult::Wait(next.clone()); + } + let res = check_next_serial(serial, kss).await.unwrap_or_else(|e| { + warn!("next serial check failed: {e}"); + false + }); + if !res { + let next = UnixTime::now() + ttl.into(); + let mut report_state_locked = + report_state.lock().expect("lock() should not fail"); + report_state_locked.rrsig = + Some(AutoReportRrsigResult::WaitNextSerial { + next: next.clone(), + serial, + ttl, + }); + drop(report_state_locked); + *state_changed = true; + return AutoActionsResult::Wait(next); + } + + // A new serial. Check the zone. + } + } + } + + let result = report_rrsig_propagated(kss).await.unwrap_or_else(|e| { + warn!("Check RRSIG propagation failed: {e}"); + AutoReportRrsigResult::Wait(UnixTime::now() + DEFAULT_WAIT) + }); + + let mut report_state_locked = report_state.lock().expect("lock() should not fail"); + report_state_locked.rrsig = Some(result.clone()); + drop(report_state_locked); + *state_changed = true; + + match result { + AutoReportRrsigResult::Wait(next) + | AutoReportRrsigResult::WaitRecord { next, .. } + | AutoReportRrsigResult::WaitNextSerial { next, .. } + | AutoReportRrsigResult::WaitSoa { next, .. } => { + return AutoActionsResult::Wait(next) + } + AutoReportRrsigResult::Report(_) => (), + } + } + // These actions are not compatible with the 'done' state because + // the 'done' state does not report anything, it can only wait. + Action::ReportDnskeyPropagated + | Action::ReportDsPropagated + | Action::ReportRrsigPropagated => unreachable!(), + } + } + AutoActionsResult::Ok +} + +/// Handle automatic report actions. +async fn auto_report_actions( + actions: &[Action], + kss: &KeySetState, + report_state: &Mutex, + state_changed: &mut bool, +) -> AutoReportActionsResult { + assert!(!actions.is_empty()); + let mut max_ttl = Ttl::from_secs(0); + for a in actions { + match a { + Action::ReportDnskeyPropagated => { + // Clippy problem + { + let report_state_locked = report_state.lock().expect("lock() should not fail"); + if let Some(dnskey_status) = &report_state_locked.dnskey { + match dnskey_status { + AutoReportActionsResult::Wait(next) => { + if *next > UnixTime::now() { + return dnskey_status.clone(); + } + } + AutoReportActionsResult::Report(ttl) => { + max_ttl = max(max_ttl, *ttl); + continue; + } + } + } + drop(report_state_locked); + } + + let result = report_dnskey_propagated(kss).await; + + let mut report_state_locked = report_state.lock().expect("lock() should not fail"); + report_state_locked.dnskey = Some(result.clone()); + drop(report_state_locked); + *state_changed = true; + + match result { + AutoReportActionsResult::Wait(_) => return result, + AutoReportActionsResult::Report(ttl) => { + max_ttl = max(max_ttl, ttl); + } + } + } + Action::ReportDsPropagated => { + // Clippy problem + { + let report_state_locked = report_state.lock().expect("lock() should not fail"); + if let Some(ds_status) = &report_state_locked.ds { + match ds_status { + AutoReportActionsResult::Wait(next) => { + if *next > UnixTime::now() { + return ds_status.clone(); + } + } + AutoReportActionsResult::Report(ttl) => { + max_ttl = max(max_ttl, *ttl); + continue; + } + } + } + drop(report_state_locked); + } + + let result = report_ds_propagated(kss).await.unwrap_or_else(|e| { + warn!("Check DS propagation failed: {e}"); + AutoReportActionsResult::Wait(UnixTime::now() + DEFAULT_WAIT) + }); + + let mut report_state_locked = report_state.lock().expect("lock() should not fail"); + report_state_locked.ds = Some(result.clone()); + drop(report_state_locked); + *state_changed = true; + + match result { + AutoReportActionsResult::Wait(_) => return result, + AutoReportActionsResult::Report(ttl) => { + max_ttl = max(max_ttl, ttl); + } + } + } + Action::ReportRrsigPropagated => { + // Clippy problem + let opt_rrsig_status = { + let report_state_locked = report_state.lock().expect("lock() should not fail"); + // Make a copy of the state. We need to release the lock + // before calling await. + let opt_rrsig_status = report_state_locked.rrsig.clone(); + drop(report_state_locked); + opt_rrsig_status + }; + + if let Some(rrsig_status) = opt_rrsig_status { + match rrsig_status { + AutoReportRrsigResult::Wait(next) => { + if next > UnixTime::now() { + return AutoReportActionsResult::Wait(next.clone()); + } + } + AutoReportRrsigResult::Report(ttl) => { + max_ttl = max(max_ttl, ttl); + continue; + } + AutoReportRrsigResult::WaitSoa { + next, + serial, + ttl, + report_ttl, + } => { + if next > UnixTime::now() { + return AutoReportActionsResult::Wait(next.clone()); + } + let res = check_soa(serial, kss).await.unwrap_or_else(|e| { + warn!("Check SOA propagation failed: {e}"); + false + }); + if res { + let mut report_state_locked = + report_state.lock().expect("lock() should not fail"); + report_state_locked.rrsig = + Some(AutoReportRrsigResult::Report(report_ttl)); + drop(report_state_locked); + *state_changed = true; + max_ttl = max(max_ttl, report_ttl); + continue; + } else { + let next = UnixTime::now() + ttl.into(); + let mut report_state_locked = + report_state.lock().expect("lock() should not fail"); + report_state_locked.rrsig = Some(AutoReportRrsigResult::WaitSoa { + next: next.clone(), + serial, + ttl, + report_ttl, + }); + drop(report_state_locked); + *state_changed = true; + return AutoReportActionsResult::Wait(next); + } + } + AutoReportRrsigResult::WaitRecord { + next, + name, + rtype, + ttl, + } => { + if next > UnixTime::now() { + return AutoReportActionsResult::Wait(next.clone()); + } + let res = check_record(&name, &rtype, kss).await.unwrap_or_else(|e| { + warn!("record check failed: {e}"); + false + }); + if !res { + let next = UnixTime::now() + ttl.into(); + let mut report_state_locked = + report_state.lock().expect("lock() should not fail"); + report_state_locked.rrsig = + Some(AutoReportRrsigResult::WaitRecord { + next: next.clone(), + name: name.clone(), + rtype, + ttl, + }); + drop(report_state_locked); + *state_changed = true; + return AutoReportActionsResult::Wait(next); + } + + // This record has the right signatures. Check + // the zone. + } + AutoReportRrsigResult::WaitNextSerial { next, serial, ttl } => { + if next > UnixTime::now() { + return AutoReportActionsResult::Wait(next.clone()); + } + let res = check_next_serial(serial, kss).await.unwrap_or_else(|e| { + warn!("next serial check failed: {e}"); + false + }); + if !res { + let next = UnixTime::now() + ttl.into(); + let mut report_state_locked = + report_state.lock().expect("lock() should not fail"); + report_state_locked.rrsig = + Some(AutoReportRrsigResult::WaitNextSerial { + next: next.clone(), + serial, + ttl, + }); + drop(report_state_locked); + *state_changed = true; + return AutoReportActionsResult::Wait(next); + } + + // A new serial. Check the zone. + } + } + } + + let result = report_rrsig_propagated(kss).await.unwrap_or_else(|e| { + warn!("Check RRSIG propagation failed: {e}"); + AutoReportRrsigResult::Wait(UnixTime::now() + DEFAULT_WAIT) + }); + + let mut report_state_locked = report_state.lock().expect("lock() should not fail"); + report_state_locked.rrsig = Some(result.clone()); + drop(report_state_locked); + *state_changed = true; + + match result { + AutoReportRrsigResult::Wait(next) + | AutoReportRrsigResult::WaitRecord { next, .. } + | AutoReportRrsigResult::WaitNextSerial { next, .. } + | AutoReportRrsigResult::WaitSoa { next, .. } => { + return AutoReportActionsResult::Wait(next) + } + AutoReportRrsigResult::Report(ttl) => { + max_ttl = max(max_ttl, ttl); + } + } + } + Action::UpdateDnskeyRrset + | Action::CreateCdsRrset + | Action::RemoveCdsRrset + | Action::UpdateDsRrset + | Action::UpdateRrsig => (), + + // These actions should not occur here. Actions in this functions + // need to be no-ops or report a TTL. Wait actions are not + // compatible with this. + Action::WaitDnskeyPropagated + | Action::WaitDsPropagated + | Action::WaitRrsigPropagated => unreachable!(), + } + } + AutoReportActionsResult::Report(max_ttl) +} + +/// Check whether automatic actions are done or not. If not, return until +/// when to wait to try again. +fn check_auto_actions(actions: &[Action], report_state: &Mutex) -> AutoActionsResult { + for a in actions { + match a { + Action::UpdateDnskeyRrset + | Action::CreateCdsRrset + | Action::RemoveCdsRrset + | Action::UpdateDsRrset + | Action::UpdateRrsig => (), + Action::ReportDnskeyPropagated | Action::WaitDnskeyPropagated => { + let report_state_locked = report_state.lock().expect("lock() should not fail"); + if let Some(dnskey_status) = &report_state_locked.dnskey { + match dnskey_status { + AutoReportActionsResult::Wait(next) => { + return AutoActionsResult::Wait(next.clone()) + } + AutoReportActionsResult::Report(_) => continue, + } + } + drop(report_state_locked); + + // No status, request cron + return AutoActionsResult::Wait(UnixTime::now()); + } + Action::ReportDsPropagated | Action::WaitDsPropagated => { + let report_state_locked = report_state.lock().expect("lock() should not fail"); + if let Some(ds_status) = &report_state_locked.ds { + match ds_status { + AutoReportActionsResult::Wait(next) => { + return AutoActionsResult::Wait(next.clone()) + } + AutoReportActionsResult::Report(_) => continue, + } + } + drop(report_state_locked); + + // No status, request cron + return AutoActionsResult::Wait(UnixTime::now()); + } + Action::ReportRrsigPropagated | Action::WaitRrsigPropagated => { + let report_state_locked = report_state.lock().expect("lock() should not fail"); + if let Some(rrsig_status) = &report_state_locked.rrsig { + match rrsig_status { + AutoReportRrsigResult::Wait(next) + | AutoReportRrsigResult::WaitRecord { next, .. } + | AutoReportRrsigResult::WaitNextSerial { next, .. } + | AutoReportRrsigResult::WaitSoa { next, .. } => { + return AutoActionsResult::Wait(next.clone()) + } + AutoReportRrsigResult::Report(_) => continue, + } + } + drop(report_state_locked); + + // No status, request cron + return AutoActionsResult::Wait(UnixTime::now()); + } + } + } + AutoActionsResult::Ok +} + +/// Execute the done action. +fn do_done(kss: &mut KeySetState, roll_type: RollType, autoremove: bool) -> Result<(), Error> { + let actions = kss.keyset.roll_done(roll_type); + + let actions = match actions { + Ok(actions) => actions, + Err(err) => { + return Err(format!("Error reporting done: {err}\n").into()); + } + }; + + if !actions.is_empty() { + return Err("List of actions after reporting done\n".into()); + } + + // Sometimes there is no space for a RemoveCdsRrset action. Just remove + // it anyhow. + remove_cds_rrset(kss); + + kss.internal.remove(&roll_type); + + // Remove old keys. + if autoremove { + let key_urls: Vec<_> = kss + .keyset + .keys() + .iter() + .filter(|(_, key)| { + let state = match key.keytype() { + KeyType::Ksk(state) => state, + KeyType::Zsk(state) => state, + KeyType::Csk(state, _) => state, + KeyType::Include(state) => state, + }; + state.stale() + }) + .map(|(pubref, key)| (pubref.clone(), key.privref().map(|r| r.to_string()))) + .collect(); + if !key_urls.is_empty() { + for u in key_urls { + let (pubref, privref) = &u; + kss.keyset + .delete_key(pubref) + .map_err(|e| format!("unable to remove key {pubref}: {e}\n"))?; + if let Some(privref) = privref { + let priv_url = Url::parse(privref) + .map_err(|e| format!("unable to parse {privref} as URL: {e}"))?; + remove_key(kss, priv_url)?; + } + let pub_url = Url::parse(pubref) + .map_err(|e| format!("unable to parse {pubref} as URL: {e}"))?; + remove_key(kss, pub_url)?; + } + println!(); + } + } + Ok(()) +} + +/// Start a KSK roll. +fn start_ksk_roll( + ksc: &KeySetConfig, + kss: &mut KeySetState, + env: &impl Env, + verbose: bool, + run_update_ds_command: &mut bool, +) -> Result, Error> { + let roll_type = RollType::KskRoll; + + assert!(!kss.keyset.keys().is_empty()); + + // Check for CSK. + if ksc.use_csk { + return Err("wrong key roll, use start-csk-roll\n".into()); + } + + // Refuse if we can find a CSK key. + if kss.keyset.keys().iter().any(|(_, key)| { + if let KeyType::Csk(keystate, _) = key.keytype() { + !keystate.stale() + } else { + false + } + }) { + return Err(format!("cannot start {roll_type:?} roll, found CSK\n").into()); + } + + // Find existing KSKs. Do we complain if there is none? + let old_stored: Vec<_> = kss + .keyset + .keys() + .iter() + .filter(|(_, key)| { + if let KeyType::Ksk(keystate) = key.keytype() { + !keystate.stale() + } else { + false + } + }) + .map(|(name, _)| name.clone()) + .collect(); + let old: Vec<_> = old_stored.iter().map(|name| name.as_ref()).collect(); + + // Create a new KSK + let (ksk_pub_url, ksk_priv_url, algorithm, key_tag) = new_keys( + kss.keyset.name(), + ksc.algorithm.to_generate_params(), + true, + kss.keyset.keys(), + &ksc.keys_dir, + env, + #[cfg(feature = "kmip")] + &mut kss.kmip, + )?; + kss.keyset + .add_key_ksk( + ksk_pub_url.to_string(), + Some(ksk_priv_url.to_string()), + algorithm, + key_tag, + UnixTime::now(), + true, + ) + .map_err(|e| format!("unable to add KSK {ksk_pub_url}: {e}\n"))?; + + let new = [ksk_pub_url.as_ref()]; + + // Start the key roll + let actions = match kss + .keyset + .start_roll(roll_type, &old, &new) + .map_err(|e| format!("cannot start {roll_type:?}: {e}\n")) + { + Ok(actions) => actions, + Err(e) => { + // Remove the keys we just created. + remove_key(kss, ksk_priv_url)?; + remove_key(kss, ksk_pub_url)?; + return Err(e.into()); + } + }; + handle_actions(&actions, ksc, kss, env, verbose, run_update_ds_command)?; + kss.internal.insert(roll_type, Default::default()); + Ok(actions) +} + +/// Start a ZSK roll. +fn start_zsk_roll( + ksc: &KeySetConfig, + kss: &mut KeySetState, + env: &impl Env, + verbose: bool, + run_update_ds_command: &mut bool, +) -> Result, Error> { + let roll_type = RollType::ZskRoll; + + assert!(!kss.keyset.keys().is_empty()); + + // Check for CSK. + if ksc.use_csk { + return Err("wrong key roll, use start-csk-roll\n".into()); + } + + // Refuse if we can find a CSK key. + if kss.keyset.keys().iter().any(|(_, key)| { + if let KeyType::Csk(keystate, _) = key.keytype() { + !keystate.stale() + } else { + false + } + }) { + return Err(format!("cannot start {roll_type:?} roll, found CSK\n").into()); + } + + // Find existing ZSKs. Do we complain if there is none? + let old_stored: Vec<_> = kss + .keyset + .keys() + .iter() + .filter(|(_, key)| { + if let KeyType::Zsk(keystate) = key.keytype() { + !keystate.stale() + } else { + false + } + }) + .map(|(name, _)| name.clone()) + .collect(); + let old: Vec<_> = old_stored.iter().map(|name| name.as_ref()).collect(); + + // Collect algorithms. Maybe this needs to be in the library. + + // Create a new ZSK + let (zsk_pub_url, zsk_priv_url, algorithm, key_tag) = new_keys( + kss.keyset.name(), + ksc.algorithm.to_generate_params(), + false, + kss.keyset.keys(), + &ksc.keys_dir, + env, + #[cfg(feature = "kmip")] + &mut kss.kmip, + )?; + kss.keyset + .add_key_zsk( + zsk_pub_url.to_string(), + Some(zsk_priv_url.to_string()), + algorithm, + key_tag, + UnixTime::now(), + true, + ) + .map_err(|e| format!("unable to add ZSK {zsk_pub_url}: {e}\n"))?; + + let new = [zsk_pub_url.as_ref()]; + + // Start the key roll + let actions = match kss + .keyset + .start_roll(roll_type, &old, &new) + .map_err(|e| format!("cannot start {roll_type:?}: {e}\n")) + { + Ok(actions) => actions, + Err(e) => { + // Remove the keys we just created. + remove_key(kss, zsk_priv_url)?; + remove_key(kss, zsk_pub_url)?; + return Err(e.into()); + } + }; + + handle_actions(&actions, ksc, kss, env, verbose, run_update_ds_command)?; + kss.internal.insert(roll_type, Default::default()); + Ok(actions) +} + +/// Start a CSK roll. +fn start_csk_roll( + ksc: &KeySetConfig, + kss: &mut KeySetState, + env: &impl Env, + verbose: bool, + run_update_ds_command: &mut bool, +) -> Result, Error> { + let roll_type = RollType::CskRoll; + + assert!(!kss.keyset.keys().is_empty()); + + // Find existing KSKs, ZSKs and CSKs. Do we complain if there + // are none? + let old_stored: Vec<_> = kss + .keyset + .keys() + .iter() + .filter(|(_, key)| match key.keytype() { + KeyType::Ksk(keystate) | KeyType::Zsk(keystate) | KeyType::Csk(keystate, _) => { + // Assume that for a CSK it is sufficient to check + // one of the key states. Also assume that we + // can check at_parent for a ZSK. + !keystate.stale() + } + KeyType::Include(_) => false, + }) + .map(|(name, _)| name.clone()) + .collect(); + let old: Vec<_> = old_stored.iter().map(|name| name.as_ref()).collect(); + + // Collect algorithms. Maybe this needs to be in the library. + + let (new_stored, new_urls) = new_csk_or_ksk_zsk(ksc, kss, env)?; + + let new: Vec<_> = new_stored.iter().map(|v| v.as_ref()).collect(); + + // Start the key roll + let actions = match kss + .keyset + .start_roll(roll_type, &old, &new) + .map_err(|e| format!("cannot start {roll_type:?}: {e}\n")) + { + Ok(actions) => actions, + Err(e) => { + // Remove the key files we just created. + for u in new_urls { + remove_key(kss, u)?; + } + return Err(e.into()); + } + }; + + handle_actions(&actions, ksc, kss, env, verbose, run_update_ds_command)?; + kss.internal.insert(roll_type, Default::default()); + Ok(actions) +} + +/// Start an algorithm roll. +fn start_algorithm_roll( + ksc: &KeySetConfig, + kss: &mut KeySetState, + env: &impl Env, + verbose: bool, + run_update_ds_command: &mut bool, +) -> Result, Error> { + let roll_type = RollType::AlgorithmRoll; + + assert!(!kss.keyset.keys().is_empty()); + + // Find existing KSKs, ZSKs and CSKs. Do we complain if there + // are none? + let old_stored: Vec<_> = kss + .keyset + .keys() + .iter() + .filter(|(_, key)| match key.keytype() { + KeyType::Ksk(keystate) | KeyType::Zsk(keystate) | KeyType::Csk(keystate, _) => { + // Assume that for a CSK it is sufficient to check + // one of the key states. Also assume that we + // can check at_parent for a ZSK. + !keystate.stale() + } + KeyType::Include(_) => false, + }) + .map(|(name, _)| name.clone()) + .collect(); + let old: Vec<_> = old_stored.iter().map(|name| name.as_ref()).collect(); + + let (new_stored, new_urls) = new_csk_or_ksk_zsk(ksc, kss, env)?; + let new: Vec<_> = new_stored.iter().map(|v| v.as_ref()).collect(); + + // Start the key roll + let actions = match kss + .keyset + .start_roll(roll_type, &old, &new) + .map_err(|e| format!("cannot start roll: {e}\n")) + { + Ok(actions) => actions, + Err(e) => { + // Remove the key files we just created. + for u in new_urls { + remove_key(kss, u)?; + } + return Err(e.into()); + } + }; + + handle_actions(&actions, ksc, kss, env, verbose, run_update_ds_command)?; + kss.internal.insert(roll_type, Default::default()); + Ok(actions) +} + +/// Check whether a new DNSKEY RRset has propagated. +/// +/// Compile a list of nameservers for the zone and their addresses and +/// query each address for the DNSKEY RRset. The function +/// check_dnskey_for_address does the actual work. +async fn report_dnskey_propagated(kss: &KeySetState) -> AutoReportActionsResult { + // Convert the DNSKEY RRset plus RRSIGs into a HashSet. + // Find the address of all name servers of zone + // Ask each nameserver for the DNSKEY RRset. Check if it matches the + // one we want. + // If it doesn't match, wait the TTL of the RRset to try again. + // On error, wait a default time. + let mut target_dnskey: HashSet = HashSet::new(); + for dnskey_rr in &kss.dnskey_rrset { + let mut zonefile = Zonefile::new(); + zonefile.extend_from_slice(dnskey_rr.as_bytes()); + zonefile.extend_from_slice(b"\n"); + if let Ok(Some(Entry::Record(rec))) = zonefile.next_entry() { + target_dnskey.insert(rec.flatten_into()); + } + } + + let zone = kss.keyset.name(); + let addresses = match addresses_for_zone(zone).await { + Ok(a) => a, + Err(e) => { + warn!("Getting nameserver addresses for {zone} failed: {e}"); + return AutoReportActionsResult::Wait(UnixTime::now() + DEFAULT_WAIT); + } + }; + + // addresses_for_zone returns at least one address. + assert!(!addresses.is_empty()); + + let futures: Vec<_> = addresses + .iter() + .map(|a| check_dnskey_for_address(zone, a, target_dnskey.clone())) + .collect(); + let res: Vec<_> = join_all(futures).await; + + // Be paranoid. The variable max_ttl is set to None initially to make + // sure that we only return a value if something has been assigned + // during the loop. + let mut max_ttl = None; + for r in res { + let r = match r { + Ok(r) => r, + Err(e) => { + warn!("DNSKEY check failed: {e}"); + return AutoReportActionsResult::Wait(UnixTime::now() + DEFAULT_WAIT); + } + }; + match r { + // It doesn't really matter how long we have to wait. + AutoReportActionsResult::Wait(_) => return r, + AutoReportActionsResult::Report(ttl) => { + max_ttl = Some(max(max_ttl.unwrap_or(Ttl::from_secs(0)), ttl)); + } + } + } + + // We can only get here with Some(Ttl) because there is at least one + // address. + let max_ttl = max_ttl.expect("cannot be None"); + AutoReportActionsResult::Report(max_ttl) +} + +/// Check whether the parent zone has a DS RRset that matches the keys +/// with 'at_parent' equal to true. +/// +/// Compile a list of nameservers for the parent zone and their addresses and +/// query each address for the DS RRset. The function +/// check_ds_for_address does the actual work. The CDNSKEY RRset is +/// used as the reference for the DS RRset. +async fn report_ds_propagated(kss: &KeySetState) -> Result { + // Convert the CDNSKEY RRset into a HashSet. + // Find the name of the parent zone. + // Find the address of all name servers of the parent zone. + // Ask each nameserver for the DS RRset. Check if it matches the + // one we want. + // If it doesn't match, wait the TTL of the RRset to try again. + // On error, wait a default time. + + let mut target_dnskey: HashSet = HashSet::new(); + for cdnskey_rr in &kss.cds_rrset { + let mut zonefile = Zonefile::new(); + zonefile.extend_from_slice(cdnskey_rr.as_bytes()); + zonefile.extend_from_slice(b"\n"); + if let Ok(Some(Entry::Record(r))) = zonefile.next_entry() { + if let ZoneRecordData::Cdnskey(cdnskey) = r.data() { + let dnskey = Dnskey::>::new( + cdnskey.flags(), + cdnskey.protocol(), + cdnskey.algorithm(), + cdnskey.public_key().to_vec(), + ) + .expect("should not fail"); + let record = Record::new(r.owner().to_name(), r.class(), r.ttl(), dnskey); + target_dnskey.insert(record); + } + } + } + + let zone = kss.keyset.name(); + let parent_zone = parent_zone(zone).await?; + let addresses = addresses_for_zone(&parent_zone).await?; + + // addresses_for_zone returns at least one address. + assert!(!addresses.is_empty()); + + let futures: Vec<_> = addresses + .iter() + .map(|a| check_ds_for_address(zone, a, target_dnskey.clone())) + .collect(); + let res: Vec<_> = join_all(futures).await; + let mut max_ttl = None; + for r in res { + let r = r?; + match r { + // It doesn't really matter how long we have to wait. + AutoReportActionsResult::Wait(_) => return Ok(r), + AutoReportActionsResult::Report(ttl) => { + max_ttl = Some(max(max_ttl.unwrap_or(Ttl::from_secs(0)), ttl)); + } + } + } + + // We can only get here with Some(Ttl) because there is at least one + // address. + let max_ttl = max_ttl.expect("cannot be None"); + Ok(AutoReportActionsResult::Report(max_ttl)) +} + +/// Report whether all RRSIGs (except for the ones that are copied from +/// keyset state) have been updated. +/// +/// The basic process is to send an AXFR query to the primary nameserver and +/// check the zone. If the zone checks out, very that all of the nameservers +/// of the zone have the checked SOA serial or newer. If a (name, rtype) tuple +/// is found with the wrong signatures then keep checking that name, rtype +/// combination until the right signatures are found. Then go back to checking +/// the entire zone. NSEC3 is special because it is not possible to directly +/// query for NSEC3 records. In that case, wait for high SOA serial and check +/// the entire zone again. +async fn report_rrsig_propagated(kss: &KeySetState) -> Result { + // This function assume a single signer. Multi-signer is not supported + // at all, but any kind of active-passive or active-active setup would also + // need changes. With more than one signer, each signer needs to be + // checked explicitly. Then for all nameservers it needs to be checked + // that their SOA versions are at least as high as all of the signers. + // Check the zone. If the zone checks out, make sure that all nameservers + // have at least the version of the zone that was checked. + + let result = check_zone(kss).await?; + let (serial, ttl, report_ttl) = match result { + // check_zone never returns Report or Wait. + AutoReportRrsigResult::Report(_) | AutoReportRrsigResult::Wait(_) => unreachable!(), + AutoReportRrsigResult::WaitSoa { + serial, + ttl, + report_ttl, + .. + } => (serial, ttl, report_ttl), + AutoReportRrsigResult::WaitRecord { .. } | AutoReportRrsigResult::WaitNextSerial { .. } => { + return Ok(result) + } + }; + + Ok( + if check_soa(serial, kss).await.unwrap_or_else(|e| { + warn!("Check SOA propagation failed: {e}"); + false + }) { + AutoReportRrsigResult::Report(report_ttl) + } else { + AutoReportRrsigResult::WaitSoa { + next: UnixTime::now() + ttl.into(), + serial, + ttl, + report_ttl, + } + }, + ) +} + +/// Check whether the zone has signatures from the right keys. +/// +/// Collect the ZSK algorithm and key tags into a HashSet +/// Get the primary nameserver from the SOA record (this should become +/// a configuration option for the nameserver and any TSIG key to use). +/// Transfer the zone. +/// Assume the signer is correct. +/// Convert the RRSIGs into a HashMap with (name, type) as key and a HashSet +/// of (algorithm, key tag) as value. +/// Convert the other records into a BtreeMap with name as key and +/// a HashSet of type as the value. Check that each name and type has a +/// corresponding complete RRSIG set. +/// Ignore delegated records +async fn check_zone(kss: &KeySetState) -> Result { + let expected_set = get_expected_zsk_key_tags(kss); + + let zone = kss.keyset.name(); + + let resolver = StubResolver::new(); + let answer = resolver + .query((zone, Rtype::SOA)) + .await + .map_err(|e| format!("lookup of {zone}/SOA failed: {e}"))?; + let Some(Ok((mname, mut serial))) = answer + .answer()? + .limit_to_in::>() + .map(|r| r.map(|r| (r.data().mname().clone(), r.data().serial()))) + .next() + else { + let rcode = answer.opt_rcode(); + return if rcode != OptRcode::NOERROR { + Err(format!("Unable to resolve {zone}/SOA: {rcode}").into()) + } else { + Err(format!("No result for {zone}/SOA").into()) + }; + }; + + let addresses = addresses_for_name(&resolver, mname).await?; + + 'addr: for a in &addresses { + let tcp_conn = match TcpStream::connect((*a, 53_u16)).await { + Ok(conn) => conn, + Err(e) => { + warn!("DNS TCP connection to {a} failed: {e}"); + continue; + } + }; + + let (tcp, transport) = stream::Connection::>, _>::new(tcp_conn); + tokio::spawn(transport.run()); + + let msg = MessageBuilder::new_vec(); + let mut msg = msg.question(); + msg.push((zone, Rtype::AXFR)).expect("should not fail"); + let req = RequestMessageMulti::new(msg).expect("should not fail"); + + // Send a request message. + let mut request = SendRequestMulti::send_request(&tcp, req.clone()); + + let mut treemap = BTreeMap::new(); + let mut sigmap = HashMap::new(); + + let mut first_soa = false; + let mut max_ttl = Ttl::from_secs(0); + loop { + // Get the reply + let reply = match request.get_response().await { + Ok(reply) => reply, + Err(e) => { + warn!("reading AXFR response from {a} failed: {e}"); + continue 'addr; + } + }; + let Some(reply) = reply else { + return Err(format!("Unexpected end of AXFR for {zone}").into()); + }; + let rcode = reply.opt_rcode(); + if rcode != OptRcode::NOERROR { + warn!("AXFR for {zone} from {a} failed: {rcode}"); + continue 'addr; + } + + let answer = reply.answer()?; + for r in answer { + let r = r?; + if !first_soa { + let Some(soa_record) = r.to_record::>()? else { + // Bad start of zone transfer. + return Err(format!( + "Wrong start of AXFR for {zone}, expected SOA found {}", + r.rtype() + ) + .into()); + }; + + first_soa = true; + serial = soa_record.data().serial(); + } else if r.rtype() == Rtype::SOA { + // The end. + let res = check_rrsigs(treemap, sigmap, zone, expected_set); + return match res { + CheckRrsigsResult::Done => Ok(AutoReportRrsigResult::WaitSoa { + next: UnixTime::now(), + serial, + ttl: r.ttl(), + report_ttl: max_ttl, + }), + CheckRrsigsResult::WaitRecord { name, rtype } => { + Ok(AutoReportRrsigResult::WaitRecord { + next: UnixTime::now() + r.ttl().into(), + name, + rtype, + ttl: r.ttl(), + }) + } + CheckRrsigsResult::WaitNextSerial => { + Ok(AutoReportRrsigResult::WaitNextSerial { + next: UnixTime::now() + r.ttl().into(), + serial, + ttl: r.ttl(), + }) + } + }; + } + + let owner = r.owner().to_name(); + if let Some(rrsig_record) = r.to_record::>()? { + let key = (owner, rrsig_record.data().type_covered()); + let value = ( + rrsig_record.data().algorithm(), + rrsig_record.data().key_tag(), + ); + let alg_kt_map = sigmap.entry(key).or_insert_with(HashSet::new); + alg_kt_map.insert(value); + max_ttl = max(max_ttl, r.ttl()); + } else { + let key = owner; + let rtype_map = treemap.entry(key).or_insert_with(HashSet::new); + rtype_map.insert(r.rtype()); + } + } + } + } + + Err(format!("AXFR for {zone} failed for all addresses {addresses:?}").into()) +} + +/// Return the set of addresses of the nameservers of a zone. +async fn addresses_for_zone(zone: &impl ToName) -> Result, Error> { + // Paranoid solution: + // Find nameserver addresses for the parent zone. + // Iterate over those addresses and try to get a delegation. + // Record all nameservers and glue addresses returned in the delegations. + // Add offical address for those nameservers. + // Iterate over the address and ask for the apex NS RRset. Add those + // and address offical address for those nameservers. + // Return the set of addresses. + // + // Current method, ask a resolver for the apex NS RRset. Loop over the + // set and ask for addresses. Return the list of addresses. + + let mut nameservers = Vec::new(); + let resolver = StubResolver::new(); + let answer = resolver + .query((zone, Rtype::NS)) + .await + .map_err(|e| format!("lookup of {}/NS failed: {e}", zone.to_name::>()))?; + let rcode = answer.opt_rcode(); + if rcode != OptRcode::NOERROR { + return Err(format!("{}/NS query failed: {rcode}", zone.to_name::>()).into()); + } + for r in answer.answer()?.limit_to_in::>() { + let r = r?; + let AllRecordData::Ns(ns) = r.data() else { + continue; + }; + if *r.owner() != zone { + continue; + } + nameservers.push(ns.nsdname().clone()); + } + if nameservers.is_empty() { + return Err(format!("{} has no NS records", zone.to_name::>()).into()); + } + + let mut futures = Vec::new(); + for n in nameservers { + futures.push(addresses_for_name(&resolver, n)); + } + + let mut set = HashSet::new(); + for a in join_all(futures).await.into_iter() { + set.extend(match a { + Ok(a) => a, + Err(e) => { + return Err(e); + } + }); + } + Ok(set) +} + +/// Return the IPv4 and IPv6 addresses associated with a name. +async fn addresses_for_name( + resolver: &StubResolver, + name: impl ToName, +) -> Result, Error> { + let res = lookup_host(&resolver, &name).await.map_err(|e| { + format!( + "lookup of addresses for {} failed: {e}", + name.to_name::>() + ) + })?; + let res: Vec<_> = res.iter().collect(); + if res.is_empty() { + return Err(format!("no IP addresses found for {}", name.to_name::>()).into()); + } + Ok(res) +} + +/// Check whether a nameserver at a specific address has the right DNSKEY +/// RRset plus signatures. +async fn check_dnskey_for_address( + zone: &Name>, + address: &IpAddr, + mut target_dnskey: HashSet, +) -> Result { + let records = lookup_name_rtype_at_address(zone, Rtype::DNSKEY, address).await?; + + let mut max_ttl = Ttl::from_secs(0); + + for r in records { + if let AllRecordData::Dnskey(dnskey) = r.data() { + if r.owner() != zone { + continue; + } + max_ttl = max(max_ttl, r.ttl()); + let target_r = target_dnskey.iter().find(|target_r| { + if let ZoneRecordData::Dnskey(target_dnskey) = target_r.data() { + target_dnskey == dnskey + } else { + false + } + }); + if let Some(record) = target_r { + // Clone record to release target_dnskey. + let record = record.clone(); + // Found one, remove it from the set. + target_dnskey.remove(&record); + } else { + // The current record is not found in the target set. Wait + // until the TTL has expired. + debug!("Check DNSKEY RRset: DNSKEY record not expected"); + return Ok(AutoReportActionsResult::Wait( + UnixTime::now() + r.ttl().into_duration(), + )); + } + continue; + } + if let AllRecordData::Rrsig(rrsig) = r.data() { + if r.owner() != zone || rrsig.type_covered() != Rtype::DNSKEY { + continue; + } + max_ttl = max(max_ttl, r.ttl()); + let target_r = target_dnskey.iter().find(|target_r| { + if let ZoneRecordData::Rrsig(target_rrsig) = target_r.data() { + target_rrsig == rrsig + } else { + false + } + }); + if let Some(record) = target_r { + // Clone record to release target_dnskey. + let record = record.clone(); + // Found one, remove it from the set. + target_dnskey.remove(&record); + } else { + // The current record is not found in the target set. Wait + // until the TTL has expired. + debug!("Check DNSKEY RRset: RRSIG record not expected"); + return Ok(AutoReportActionsResult::Wait( + UnixTime::now() + r.ttl().into_duration(), + )); + } + continue; + } + } + if let Some(record) = target_dnskey.iter().next() { + // Not all DNSKEY records were found. + warn!("Not all required DNSKEY records were found for {zone}"); + Ok(AutoReportActionsResult::Wait( + UnixTime::now() + record.ttl().into(), + )) + } else { + Ok(AutoReportActionsResult::Report(max_ttl)) + } +} + +/// Check whether a nameserver at a specific address has the right DS RRset. +async fn check_ds_for_address( + zone: &Name>, + address: &IpAddr, + mut target_dnskey: HashSet, +) -> Result { + let records = lookup_name_rtype_at_address::>(zone, Rtype::DS, address).await?; + + let mut max_ttl = Ttl::from_secs(0); + + for r in records { + if r.owner() != zone { + continue; + } + max_ttl = max(max_ttl, r.ttl()); + let target_r = target_dnskey.iter().find(|target_r| { + let digest = target_r + .data() + .digest(zone, r.data().digest_type()) + .expect("should not fail"); + r.data().algorithm() == target_r.data().algorithm() + && r.data().digest() == digest.as_ref() + }); + if let Some(record) = target_r { + // Clone record to release target_dnskey. + let record = record.clone(); + // Found one, remove it from the set. + target_dnskey.remove(&record); + } else { + // The current record is not found in the target set. Wait + // until the TTL has expired. + debug!("Check DS RRset: DS record not expected"); + return Ok(AutoReportActionsResult::Wait( + UnixTime::now() + r.ttl().into_duration(), + )); + } + continue; + } + let dnskey = target_dnskey.iter().next(); + if let Some(dnskey) = dnskey { + debug!("Check DS RRset: expected DS record not present"); + let ttl = dnskey.ttl(); + Ok(AutoReportActionsResult::Wait( + UnixTime::now() + ttl.into_duration(), + )) + } else { + Ok(AutoReportActionsResult::Report(max_ttl)) + } +} + +/// Check whether a nameserver at a specific address has the right SOA serial +/// or a newer one. +async fn check_soa_for_address( + zone: &Name>, + address: &IpAddr, + serial: Serial, +) -> Result { + let records = lookup_name_rtype_at_address::>(zone, Rtype::SOA, address).await?; + + if records.is_empty() { + return Ok(AutoReportActionsResult::Wait( + UnixTime::now() + DEFAULT_WAIT, + )); + } + + if let Some(ttl) = records + .iter() + .filter_map(|r| { + if r.data().serial() < serial { + Some(r.ttl()) + } else { + None + } + }) + .next() + { + return Ok(AutoReportActionsResult::Wait(UnixTime::now() + ttl.into())); + } + // Return a dummy TTL. The caller knows the real TTL to report. + Ok(AutoReportActionsResult::Report(Ttl::from_secs(0))) +} + +/// Lookup a name, rtype pair at an address. +/// +/// Extract records of type T from the answer. +async fn lookup_name_rtype_at_address( + name: &Name>, + rtype: Rtype, + address: &IpAddr, +) -> Result, T>>, Error> +where + for<'a> T: ParseRecordData<'a, Bytes>, +{ + let server_addr = SocketAddr::new(*address, 53); + let udp_connect = UdpConnect::new(server_addr); + let tcp_connect = TcpConnect::new(server_addr); + let (udptcp_conn, transport) = dgram_stream::Connection::new(udp_connect, tcp_connect); + tokio::spawn(transport.run()); + + let mut msg = MessageBuilder::new_vec(); + msg.header_mut().set_rd(true); + let mut msg = msg.question(); + msg.push((name, rtype)).expect("should not fail"); + let mut req = RequestMessage::new(msg).expect("should not fail"); + req.set_dnssec_ok(true); + let mut request = udptcp_conn.send_request(req.clone()); + let response = request + .get_response() + .await + .map_err(|e| format!("{name}/{rtype} request to {address} failed: {e}"))?; + + let mut res = Vec::new(); + for r in response.answer()?.limit_to_in::() { + let r = r?; + res.push(r); + } + Ok(res) +} + +/// Return the name of the parent zone. +async fn parent_zone(name: &Name>) -> Result>, Error> { + let parent = name + .parent() + .ok_or_else::(|| format!("unable to get parent of {name}").into())?; + + let resolver = StubResolver::new(); + let answer = resolver + .query((&parent, Rtype::SOA)) + .await + .map_err(|e| format!("lookup of {parent}/SOA failed: {e}"))?; + let rcode = answer.opt_rcode(); + if rcode != OptRcode::NOERROR { + return Err(format!("{parent}/SOA query failed: {rcode}").into()); + } + if let Some(Ok(owner)) = answer + .answer()? + .limit_to_in::>() + .map(|r| r.map(|r| r.owner().to_name::>())) + .next() + { + return Ok(owner); + } + + // Try the authority section. + if let Some(Ok(owner)) = answer + .authority()? + .limit_to_in::>() + .map(|r| r.map(|r| r.owner().to_name::>())) + .next() + { + return Ok(owner); + } + + Err(format!("{parent}/SOA query failed").into()) +} + +/// This function automatically starts a key roll when the conditions are right. +/// +/// First the conficting_roll function is invoked to make sure there are no +/// rolls in progress that would conflict. Then match_keytype is used to +/// select key that could participate in this roll. The published time of +/// each key is compared to the validity parameter to see if the key +/// needs to be replaced. No key roll will happen is validity is equal to +/// None. The start_roll parameter starts the key roll. +#[allow(clippy::too_many_arguments)] +fn auto_start( + validity: &Option, + auto: &AutoConfig, + ksc: &KeySetConfig, + kss: &mut KeySetState, + env: Env, + state_changed: &mut bool, + conficting_roll: impl Fn(RollType) -> bool, + match_keytype: impl Fn(KeyType) -> Option, + start_roll: impl Fn( + &KeySetConfig, + &mut KeySetState, + Env, + bool, + &mut bool, + ) -> Result, Error>, + run_update_ds_command: &mut bool, +) -> Result<(), Error> { + if let Some(validity) = validity { + if auto.start { + // If there is no conficting roll, and this + // flag is set, and the lifetime has expired then + // start a roll. + if !kss + .keyset + .rollstates() + .iter() + .any(|(r, _)| conficting_roll(*r)) + { + let next = kss + .keyset + .keys() + .iter() + .filter_map(|(_, k)| { + if let Some(keystate) = match_keytype(k.keytype()) { + if !keystate.stale() { + k.timestamps() + .published() + .map(|published| published + *validity) + } else { + None + } + } else { + None + } + }) + .min(); + if let Some(next) = next { + if next < UnixTime::now() { + start_roll(ksc, kss, env, false, run_update_ds_command)?; + *state_changed = true; + } + } + } + } + } + Ok(()) +} + +/// Handle automation for the report, expire and done steps. +/// +/// The auto parameter has the flags that control whether automation is +/// enabled or disabled for a step. The roll_list parameters are the +/// roll types that are covered by the auto parameter. +/// This function calls two function (auto_report_actions and +/// auto_wait_actions) to handle, repectively, the Report and Wait actions. +async fn auto_report_expire_done( + auto: &AutoConfig, + roll_list: &[RollType], + ksc: &KeySetConfig, + kss: &mut KeySetState, + env: &impl Env, + state_changed: &mut bool, + run_update_ds_command: &mut bool, +) -> Result<(), Error> { + if auto.report { + // If there is currently a roll in one of the + // propagation states and this flags is set and all + // actions have comleted report the ttl. + for r in roll_list { + if let Some(state) = kss.keyset.rollstates().get(r) { + let report_state = kss.internal.get(r).expect("should not fail"); + let report_state = match state { + RollState::Propagation1 => &report_state.propagation1, + RollState::Propagation2 => &report_state.propagation2, + _ => continue, + }; + let actions = kss.keyset.actions(*r); + match auto_report_actions(&actions, kss, report_state, state_changed).await { + AutoReportActionsResult::Wait(_) => continue, + AutoReportActionsResult::Report(ttl) => { + let actions = match state { + RollState::Propagation1 => { + kss.keyset.propagation1_complete(*r, ttl.as_secs()) + } + RollState::Propagation2 => { + kss.keyset.propagation2_complete(*r, ttl.as_secs()) + } + _ => unreachable!(), + }; + + let actions = match actions { + Ok(actions) => actions, + Err(err) => { + return Err(format!( + "Error reporting propagation complete: {err}\n" + ) + .into()); + } + }; + + handle_actions(&actions, ksc, kss, env, false, run_update_ds_command)?; + *state_changed = true; + } + } + } + } + } + if auto.expire { + // If there is currently a roll in one of the cache + // expire states and this flag is set, move to the next + // state + for r in roll_list { + if let Some(state) = kss.keyset.rollstates().get(r) { + let actions = match state { + RollState::CacheExpire1(_) => kss.keyset.cache_expired1(*r), + RollState::CacheExpire2(_) => kss.keyset.cache_expired2(*r), + _ => continue, + }; + if let Err(keyset::Error::Wait(_)) = actions { + // To early. + continue; + } + let actions = actions + .map_err(|e| format!("cache_expired[12] failed for state {r:?}: {e}"))?; + handle_actions(&actions, ksc, kss, env, false, run_update_ds_command)?; + // Report actions + *state_changed = true; + } + } + } + if auto.done { + // If there is current a roll in the done state and all + // actions have completed then call do_done to end the key roll. + for r in roll_list { + if let Some(RollState::Done) = kss.keyset.rollstates().get(r) { + let report_state = &kss.internal.get(r).expect("should not fail").done; + let actions = kss.keyset.actions(*r); + match auto_wait_actions(&actions, kss, report_state, state_changed).await { + AutoActionsResult::Ok => { + do_done(kss, *r, ksc.autoremove)?; + *state_changed = true; + } + AutoActionsResult::Wait(_) => continue, + } + } + } + } + Ok(()) +} + +/// This function computes when the next key roll should happen. +/// +/// It has the same logic as auto_start but instead of starting a key roll, +/// it (optionally) adds a timestamp to the cron_next vector. Should this +/// be merged with auto_start? +fn cron_next_auto_start( + validity: Option, + auto: &AutoConfig, + kss: &KeySetState, + conflicting_roll: impl Fn(RollType) -> bool, + match_keytype: impl Fn(KeyType) -> Option, + cron_next: &mut Vec>, +) { + if let Some(validity) = validity { + if auto.start { + // If there is no KSK, CSK, or Algorithm roll, and this + // flag is set, compute the remaining KSK lifetime + + // The only roll types that are compatible with a KSK roll + // are the two ZSK rolls. + if !kss + .keyset + .rollstates() + .iter() + .any(|(r, _)| conflicting_roll(*r)) + { + let next = kss + .keyset + .keys() + .iter() + .filter_map(|(_, k)| { + if let Some(keystate) = match_keytype(k.keytype()) { + if !keystate.stale() { + k.timestamps().published() + } else { + None + } + } else { + None + } + }) + .map(|published| published + validity) + .min(); + cron_next.push(next); + } + } + } +} + +/// This function computes when next to try to move to the next state. +/// +/// For the Report and Wait actions that involves checking when propagation +/// should be tested again. For the expire step it computes when the +/// keyset object in the domain library accepts the cache_expired1 or +/// cache_expired2 methods. +fn cron_next_auto_report_expire_done( + auto: &AutoConfig, + roll_list: &[RollType], + kss: &KeySetState, + cron_next: &mut Vec>, +) -> Result<(), Error> { + if auto.report { + // If there is currently a roll in one of the propagation + // states and this flags is set take when to check again for + // actions to complete + for r in roll_list { + if let Some(state) = kss.keyset.rollstates().get(r) { + let report_state = kss.internal.get(r).expect("should not fail"); + let report_state = match state { + RollState::Propagation1 => &report_state.propagation1, + RollState::Propagation2 => &report_state.propagation2, + _ => continue, + }; + let actions = kss.keyset.actions(*r); + match check_auto_actions(&actions, report_state) { + AutoActionsResult::Ok => { + // All actions are ready. Request cron. + cron_next.push(Some(UnixTime::now())); + } + AutoActionsResult::Wait(next) => cron_next.push(Some(next)), + } + } + } + } + + if auto.expire { + // If there is currently a roll in one of the cache expire + // states and this flag is set, use the remaining time until caches + // are expired. Try to issue the cache_expire[12] method on a + // clone of keyset. + let mut keyset = kss.keyset.clone(); + for r in roll_list { + if let Some(state) = keyset.rollstates().get(r) { + let actions = match state { + RollState::CacheExpire1(_) => keyset.cache_expired1(*r), + RollState::CacheExpire2(_) => keyset.cache_expired2(*r), + _ => continue, + }; + if let Err(keyset::Error::Wait(remain)) = actions { + cron_next.push(Some(UnixTime::now() + remain)); + continue; + } + let _ = actions + .map_err(|e| format!("cache_expired[12] failed for state {r:?}: {e}"))?; + + // Time to call cron. Report the current time. + cron_next.push(Some(UnixTime::now())); + } + } + } + + if auto.done { + // If there is current a roll in the done state and all + // and this flag is set, take when the check again for actions to + // complete + for r in roll_list { + if let Some(RollState::Done) = kss.keyset.rollstates().get(r) { + let report_state = kss.internal.get(r).expect("should not fail"); + match check_auto_actions(&kss.keyset.actions(*r), &report_state.done) { + AutoActionsResult::Ok => { + // All actions are ready. Request cron. + cron_next.push(Some(UnixTime::now())); + } + AutoActionsResult::Wait(next) => { + cron_next.push(Some(next)); + } + } + } + } + } + + Ok(()) +} + +/// The result of checking whether all RRSIG records are present. +#[derive(PartialEq)] +enum CheckRrsigsResult { + /// The required RRSIGs are present. + Done, + /// Wait for a specific name, rtype combination to get updated signatures. + WaitRecord { + /// The name to check. + name: Name>, + /// And the Rtype. + rtype: Rtype, + }, + /// Wait for the next version of the zone. + WaitNextSerial, +} + +/// Type for the key of the signature HashMap. +type SigmapKey = (Name>, Rtype); +/// Type for the value of the signature HashMap. +type SigmapValue = HashSet<(SecurityAlgorithm, u16)>; + +/// Check if all authoritive records have the right signatures. +/// +/// A zone is not authoritative for names below a delegation. At a delegation, +/// a zone is authoritative for DS and NSEC records. +fn check_rrsigs( + treemap: BTreeMap>, HashSet>, + sigmap: HashMap, + zone: &Name>, + expected_set: HashSet<(SecurityAlgorithm, u16)>, +) -> CheckRrsigsResult { + let mut delegation = None; + let mut result = CheckRrsigsResult::Done; + for (key, rtype_map) in treemap { + if let Some(name) = &delegation { + if key.ends_with(name) { + // Ignore anything below a delegation. + continue; + } + delegation = None; + } + if rtype_map.contains(&Rtype::NS) && key != zone { + delegation = Some(key.clone()); + } + for rtype in rtype_map { + if delegation.is_some() { + // NS is not signed. A and AAAA are glue. + if rtype == Rtype::NS || rtype == Rtype::A || rtype == Rtype::AAAA { + continue; + } else if rtype == Rtype::DS || rtype == Rtype::NSEC { + // DS records are signed. Just keep going. + } else { + error!("Weird type {rtype} in delegation {}", &key); + continue; + } + } + if (rtype == Rtype::DNSKEY || rtype == Rtype::CDS || rtype == Rtype::CDNSKEY) + && key == zone + { + // These rtypes are signed with the KSKs + continue; + } + let set = if let Some(set) = sigmap.get(&(key.clone(), rtype)) { + set.clone() + } else { + warn!("RRSIG not found for {key}/{rtype}"); + HashSet::new() + }; + if set != expected_set { + // NSEC3 records are special because we cannot directly query + // for them. For 'normal' records, return WaitRecord. + // For NSEC3 we need to wait for a new version of the zone, + // so we return WaitNextSerial. However, WaitRecord is more + // efficient. Therefore, if the mismatch is at an NSEC3 then + // remember this by setting result to WaitNextSerial but + // keep checking. + if rtype != Rtype::NSEC3 { + warn!( + "RRSIG mismatch for {key}/{rtype}: found {:?} expected {:?}", + set, expected_set + ); + let name = key.to_name::>(); + return CheckRrsigsResult::WaitRecord { name, rtype }; + } + if result == CheckRrsigsResult::Done { + warn!( + "RRSIG mismatch for {key}/{rtype}: found {:?} expected {:?}", + set, expected_set + ); + } + result = CheckRrsigsResult::WaitNextSerial; + } + } + } + + // All authoritative records have signatures with the right algorithms and + // key tags. Or an NSEC3 failure was found. + result +} + +/// Check if a name, Rtype pair has the right signatures. +async fn check_record( + name: &Name>, + rtype: &Rtype, + kss: &KeySetState, +) -> Result { + let expected = get_expected_zsk_key_tags(kss); + let addresses = get_primary_addresses(kss.keyset.name()).await?; + for address in &addresses { + let server_addr = SocketAddr::new(*address, 53); + let udp_connect = UdpConnect::new(server_addr); + let tcp_connect = TcpConnect::new(server_addr); + let (udptcp_conn, transport) = dgram_stream::Connection::new(udp_connect, tcp_connect); + tokio::spawn(transport.run()); + + let mut msg = MessageBuilder::new_vec(); + msg.header_mut().set_rd(true); + let mut msg = msg.question(); + msg.push((name, *rtype)).expect("should not fail"); + let mut req = RequestMessage::new(msg).expect("should not fail"); + req.set_dnssec_ok(true); + let mut request = udptcp_conn.send_request(req.clone()); + let response = match request.get_response().await { + Ok(r) => r, + Err(e) => { + warn!("{name}/{rtype} request to {server_addr} failed: {e}"); + continue; + } + }; + + let mut alg_tag_set = HashSet::new(); + + for r in response.answer()?.limit_to_in::>() { + let r = r?; + if r.data().type_covered() != *rtype { + continue; + } + alg_tag_set.insert((r.data().algorithm(), r.data().key_tag())); + } + return Ok(alg_tag_set == expected); + } + Err(format!("lookup of {name}/{rtype} failed for all addresses {addresses:?}").into()) +} + +/// Check if the zone has move to the next serial. +async fn check_next_serial(serial: Serial, kss: &KeySetState) -> Result { + let zone = kss.keyset.name(); + let addresses = get_primary_addresses(zone).await?; + for address in &addresses { + let server_addr = SocketAddr::new(*address, 53); + let udp_connect = UdpConnect::new(server_addr); + let tcp_connect = TcpConnect::new(server_addr); + let (udptcp_conn, transport) = dgram_stream::Connection::new(udp_connect, tcp_connect); + tokio::spawn(transport.run()); + + let mut msg = MessageBuilder::new_vec(); + msg.header_mut().set_rd(true); + let mut msg = msg.question(); + msg.push((zone, Rtype::SOA)).expect("should not fail"); + let req = RequestMessage::new(msg).expect("should not fail"); + let mut request = udptcp_conn.send_request(req.clone()); + let response = match request.get_response().await { + Ok(r) => r, + Err(e) => { + warn!("{zone}/SOA request to {server_addr} failed: {e}"); + continue; + } + }; + + if let Some(r) = response.answer()?.limit_to_in::>().next() { + let r = r?; + return Ok(r.data().serial() > serial); + } + warn!("No SOA record in reply to SOA query for zone {zone}"); + return Ok(false); + } + Err(format!("lookup of {zone}/SOA failed for all addresses {addresses:?}").into()) +} + +/// Check if all addresses of all nameservers of the zone to see if they +/// have at least the SOA serial passed as parameter. +async fn check_soa(serial: Serial, kss: &KeySetState) -> Result { + // Find the address of all name servers of zone + // Ask each nameserver for the SOA record. + // Check that it's version is at least the version we checked. + // If it doesn't match, wait the TTL of the SOA record to try again. + // On error, wait a default time. + + let zone = kss.keyset.name(); + + let addresses = addresses_for_zone(zone).await?; + let futures: Vec<_> = addresses + .iter() + .map(|a| check_soa_for_address(zone, a, serial)) + .collect(); + let res: Vec<_> = join_all(futures).await; + + for r in res { + let r = r?; + match r { + // It doesn't really matter how long we have to wait. + AutoReportActionsResult::Wait(_) => return Ok(false), + AutoReportActionsResult::Report(_) => (), + } + } + + Ok(true) +} + +/// Get the expected key tags. +/// +/// Instead of validating signatures against the keys that sign the zone, +/// the signatures are of only checked for key tags. +fn get_expected_zsk_key_tags(kss: &KeySetState) -> HashSet<(SecurityAlgorithm, u16)> { + kss.keyset + .keys() + .iter() + .filter_map(|(_, k)| match k.keytype() { + KeyType::Ksk(_) | KeyType::Include(_) => None, + KeyType::Zsk(keystate) => Some((keystate, k.algorithm(), k.key_tag())), + KeyType::Csk(_, keystate) => Some((keystate, k.algorithm(), k.key_tag())), + }) + .filter_map(|(ks, a, kt)| if ks.signer() { Some((a, kt)) } else { None }) + .collect() +} + +/// Get the addresses of the primary nameserver of a zone. +async fn get_primary_addresses(zone: &Name>) -> Result, Error> { + let resolver = StubResolver::new(); + let answer = resolver + .query((zone, Rtype::SOA)) + .await + .map_err(|e| format!("lookup of {zone}/SOA failed: {e}"))?; + let Some(Ok(mname)) = answer + .answer()? + .limit_to_in::>() + .map(|r| r.map(|r| r.data().mname().clone())) + .next() + else { + let rcode = answer.opt_rcode(); + return if rcode != OptRcode::NOERROR { + Err(format!("Unable to resolve {zone}/SOA: {rcode}").into()) + } else { + Err(format!("No result for {zone}/SOA").into()) + }; + }; + + addresses_for_name(&resolver, mname).await +} + +/// Check if an algorithm roll is needed. +/// +/// An algorithm roll is needed if the algorithm listed in config is +/// different from the set of algorithms in the collection of active keys. +fn algorithm_roll_needed(ksc: &KeySetConfig, kss: &KeySetState) -> bool { + // Collect the algorithms in all active keys. Check if the algorithm + // for new keys is the same. + let curr_algs: HashSet<_> = kss + .keyset + .keys() + .iter() + .filter_map(|(_, k)| { + if let Some(keystate) = match k.keytype() { + KeyType::Ksk(keystate) => Some(keystate), + KeyType::Zsk(keystate) => Some(keystate), + KeyType::Csk(keystate, _) => Some(keystate), + KeyType::Include(_) => None, + } { + if !keystate.stale() { + Some(k.algorithm()) + } else { + None + } + } else { + None + } + }) + .collect(); + let new_algs = HashSet::from([ksc.algorithm.to_generate_params().algorithm()]); + curr_algs != new_algs +} + +/// Show the automatic roll state for one state in a roll. +fn show_automatic_roll_state( + roll: RollType, + state: &RollState, + auto_state: &ReportState, + report: bool, +) { + println!("Roll {roll:?}, state {state:?}:"); + if let Some(status) = &auto_state.dnskey { + match status { + AutoReportActionsResult::Wait(retry) => { + println!("\tWait until the new DNSKEY RRset has propagated to all nameservers."); + println!("\tTry again after {retry}"); + } + AutoReportActionsResult::Report(ttl) => { + println!("\tThe new DNSKEY RRset has propagated to all nameservers."); + if report { + println!("\tReport (at least) TTL {}", ttl.as_secs()); + } + } + } + } + if let Some(status) = &auto_state.ds { + match status { + AutoReportActionsResult::Wait(retry) => { + println!("\tWait until the new DS RRset has propagated to all nameservers"); + println!("\tof the parent zone. Try again after {retry}"); + } + AutoReportActionsResult::Report(ttl) => { + println!("\tThe new DS RRset has propagated to all nameservers."); + if report { + println!("\tReport (at least) TTL {}", ttl.as_secs()); + } + } + } + } + if let Some(status) = &auto_state.rrsig { + match status { + AutoReportRrsigResult::Wait(next) => { + println!("\tSomething went wrong transferring the zone to be verified."); + println!("\tTry again after {next}"); + } + AutoReportRrsigResult::WaitRecord { + name, rtype, next, .. + } => { + println!("\tWait until {name}/{rtype} is signed with the right keys."); + println!("\tTry again after {next}"); + } + AutoReportRrsigResult::WaitNextSerial { serial, next, .. } => { + println!("\tWait for a zone with serial higher than {serial}"); + println!("\tTry again after {next}"); + } + AutoReportRrsigResult::WaitSoa { serial, next, .. } => { + println!("\tWait until the zone with at least serial {serial} has propagated"); + println!("\tto all nameservers. Try again after {next}"); + } + AutoReportRrsigResult::Report(ttl) => { + println!("\tThe new RRSIG records have propagated to all nameservers."); + if report { + println!("\tReport (at least) TTL {}", ttl.as_secs()); + } + } + } + } +} + +/// Create a new CSK key or KSK and ZSK keys if use_csk is false. +fn new_csk_or_ksk_zsk( + ksc: &KeySetConfig, + kss: &mut KeySetState, + env: &impl Env, +) -> Result<(Vec, Vec), Error> { + let (new_stored, new_urls) = if ksc.use_csk { + let mut new_urls = Vec::new(); + + // Create a new CSK + let (csk_pub_url, csk_priv_url, algorithm, key_tag) = new_keys( + kss.keyset.name(), + ksc.algorithm.to_generate_params(), + true, + kss.keyset.keys(), + &ksc.keys_dir, + env, + #[cfg(feature = "kmip")] + &mut kss.kmip, + )?; + new_urls.push(csk_priv_url.clone()); + new_urls.push(csk_pub_url.clone()); + kss.keyset + .add_key_csk( + csk_pub_url.to_string(), + Some(csk_priv_url.to_string()), + algorithm, + key_tag, + UnixTime::now(), + true, + ) + .map_err(|e| format!("unable to add CSK {csk_pub_url}: {e}\n"))?; + + let new = vec![csk_pub_url]; + (new, new_urls) + } else { + let mut new_urls = Vec::new(); + + // Create a new KSK + let (ksk_pub_url, ksk_priv_url, algorithm, key_tag) = new_keys( + kss.keyset.name(), + ksc.algorithm.to_generate_params(), + true, + kss.keyset.keys(), + &ksc.keys_dir, + env, + #[cfg(feature = "kmip")] + &mut kss.kmip, + )?; + new_urls.push(ksk_priv_url.clone()); + new_urls.push(ksk_pub_url.clone()); + kss.keyset + .add_key_ksk( + ksk_pub_url.to_string(), + Some(ksk_priv_url.to_string()), + algorithm, + key_tag, + UnixTime::now(), + true, + ) + .map_err(|e| format!("unable to add KSK {ksk_pub_url}: {e}\n"))?; + + // Create a new ZSK + let (zsk_pub_url, zsk_priv_url, algorithm, key_tag) = new_keys( + kss.keyset.name(), + ksc.algorithm.to_generate_params(), + false, + kss.keyset.keys(), + &ksc.keys_dir, + env, + #[cfg(feature = "kmip")] + &mut kss.kmip, + )?; + new_urls.push(zsk_priv_url.clone()); + new_urls.push(zsk_pub_url.clone()); + kss.keyset + .add_key_zsk( + zsk_pub_url.to_string(), + Some(zsk_priv_url.to_string()), + algorithm, + key_tag, + UnixTime::now(), + true, + ) + .map_err(|e| format!("unable to add ZSK {zsk_pub_url}: {e}\n"))?; + + let new = vec![ksk_pub_url, zsk_pub_url]; + (new, new_urls) + }; + Ok((new_stored, new_urls)) +} + +/// Return the right RollType for a RollVariant. +fn roll_variant_to_roll(roll_variant: RollVariant) -> RollType { + // For key type, such as KSK and ZSK, that can have different rolls, we + // we should find out which variant is used. + match roll_variant { + RollVariant::Ksk => RollType::KskRoll, + RollVariant::Zsk => RollType::ZskRoll, + RollVariant::Csk => RollType::CskRoll, + RollVariant::Algorithm => RollType::AlgorithmRoll, + } +} + +/// Implementation of the Import subcommands. +fn import_command( + subcommand: ImportCommands, + ksc: &KeySetConfig, + kss: &mut KeySetState, + env: &impl Env, + state_changed: &mut bool, +) -> Result<(), Error> { + match subcommand { + ImportCommands::PublicKey { path } => { + let public_data = std::fs::read_to_string(&path) + .map_err(|e| format!("unable read from file {}: {e}", path.display()))?; + + let public_key = parse_from_bind::>(&public_data) + .map_err(|e| format!("unable to parse public key file {}: {e}", path.display()))?; + + let path = absolute(&path) + .map_err(|e| format!("unable to make {} absolute: {}", path.display(), e))?; + let public_key_url = "file://".to_owned() + &path.display().to_string(); + kss.keyset + .add_public_key( + public_key_url.clone(), + public_key.data().algorithm(), + public_key.data().key_tag(), + UnixTime::now(), + true, + ) + .map_err(|e| format!("unable to add public key {public_key_url}: {e}\n"))?; + kss.keyset + .set_present(&public_key_url, true) + .expect("should not happen"); + + // What about visible. We should visible when DNSKEY RRset has + // propagated. But we are not doing a key roll now. Just set it + // unconditionally. + kss.keyset + .set_visible(&public_key_url, UnixTime::now()) + .expect("should not happen"); + } + ImportCommands::Ksk { subcommand } => { + import_key_command(subcommand, KeyVariant::Ksk, kss)?; + } + ImportCommands::Zsk { subcommand } => { + import_key_command(subcommand, KeyVariant::Zsk, kss)?; + } + ImportCommands::Csk { subcommand } => { + import_key_command(subcommand, KeyVariant::Csk, kss)?; + } + } + *state_changed = true; + + // Update the DNSKEY RRset if is is not empty. We don't want to create + // and incomplete DNSKEY RRset. + if !kss.dnskey_rrset.is_empty() { + update_dnskey_rrset(ksc, kss, env, true)?; + } + Ok(()) +} + +/// Implement import subcommand for a specific key type. +fn import_key_command( + subcommand: ImportKeyCommands, + key_variant: KeyVariant, + kss: &mut KeySetState, +) -> Result<(), Error> { + let (public_key_url, private_key_url, algorithm, key_tag, coupled) = match subcommand { + ImportKeyCommands::File { + path, + coupled, + private_key, + } => { + let private_path = match private_key { + Some(private_key) => private_key, + None => { + if path.extension() != Some(OsStr::new("key")) { + return Err(format!("public key {} should end in .key, use --private-key to specify a private key separately", path.display()).into()); + } + path.with_extension("private") + } + }; + let private_data = std::fs::read_to_string(&private_path) + .map_err(|e| format!("unable read from file {}: {e}", private_path.display()))?; + let secret_key = SecretKeyBytes::parse_from_bind(&private_data).map_err(|e| { + format!( + "unable to parse private key file {}: {e}", + private_path.display() + ) + })?; + let public_data = std::fs::read_to_string(&path) + .map_err(|e| format!("unable read from file {}: {e}", path.display()))?; + let public_key = parse_from_bind::>(&public_data) + .map_err(|e| format!("unable to parse public key file {}: {e}", path.display()))?; + + // Check the consistency of the public and private key pair. + let _key_pair = KeyPair::from_bytes(&secret_key, public_key.data()).map_err(|e| { + format!( + "private key {} and public key {} do not match: {e}", + private_path.display(), + path.display() + ) + })?; + + if public_key.owner() != kss.keyset.name() { + return Err(format!( + "public key {} has wrong owner name {}, expected {}", + path.display(), + public_key.owner(), + kss.keyset.name() + ) + .into()); + } + + let path = absolute(&path) + .map_err(|e| format!("unable to make {} absolute: {}", path.display(), e))?; + let private_path = absolute(&private_path).map_err(|e| { + format!("unable to make {} absolute: {}", private_path.display(), e) + })?; + let public_key_url = "file://".to_owned() + &path.display().to_string(); + let private_key_url = "file://".to_owned() + &private_path.display().to_string(); + + ( + public_key_url, + private_key_url, + public_key.data().algorithm(), + public_key.data().key_tag(), + coupled, + ) + } + #[cfg(feature = "kmip")] + ImportKeyCommands::Kmip { + server, + public_id, + private_id, + algorithm, + flags, + coupled, + } => { + let pool = kss.kmip.get_pool(&server)?; + let keypair = + kmip::sign::KeyPair::from_metadata(algorithm, flags, &private_id, &public_id, pool) + .map_err(|e| { + format!("error constructing key pair on KMIP server '{server}': {e}") + })?; + let public_key_url = keypair.public_key_url(); + let private_key_url = keypair.private_key_url(); + ( + public_key_url.to_string(), + private_key_url.to_string(), + keypair.algorithm(), + keypair.dnskey().key_tag(), + coupled, + ) + } + }; + let mut set_at_parent = false; + let mut set_rrsig_visible = false; + match key_variant { + KeyVariant::Ksk => { + kss.keyset + .add_key_ksk( + public_key_url.clone(), + Some(private_key_url.clone()), + algorithm, + key_tag, + UnixTime::now(), + true, + ) + .map_err(|e| { + format!("unable to add KSK {public_key_url}/{private_key_url}: {e}\n") + })?; + set_at_parent = true; + } + KeyVariant::Zsk => { + kss.keyset + .add_key_zsk( + public_key_url.clone(), + Some(private_key_url.clone()), + algorithm, + key_tag, + UnixTime::now(), + true, + ) + .map_err(|e| format!("unable to add ZSK {public_key_url}: {e}\n"))?; + set_rrsig_visible = true; + } + KeyVariant::Csk => { + kss.keyset + .add_key_csk( + public_key_url.clone(), + Some(private_key_url.clone()), + algorithm, + key_tag, + UnixTime::now(), + true, + ) + .map_err(|e| format!("unable to add CSK {public_key_url}: {e}\n"))?; + set_at_parent = true; + set_rrsig_visible = true; + } + } + + kss.keyset + .set_present(&public_key_url, true) + .expect("should not happen"); + + // What about visible? We should visible when the DNSKEY + // RRset has propagated. But we are not doing a key roll + // now. Just set it unconditionally. + kss.keyset + .set_visible(&public_key_url, UnixTime::now()) + .expect("should not happen"); + + kss.keyset + .set_signer(&public_key_url, true) + .expect("should not happen"); + + kss.keyset + .set_decoupled(&public_key_url, !coupled) + .expect("should not happen"); + + if set_at_parent { + kss.keyset + .set_at_parent(&public_key_url, true) + .expect("should not happen"); + + // What about ds_visible? We should ds_visible when the DS + // RRset has propagated. But we are not doing a key roll + // now. Just set it unconditionally. + kss.keyset + .set_ds_visible(&public_key_url, UnixTime::now()) + .expect("should not happen"); + } + if set_rrsig_visible { + // We should set rrsig_visible when the zone's RRSIG records + // have propagated. But we are not doing a key roll + // now. Just set it unconditionally. + kss.keyset + .set_rrsig_visible(&public_key_url, UnixTime::now()) + .expect("should not happen"); + } + Ok(()) +} + +/// Implement the remove-key subcommand. +fn remove_key_command( + key: String, + force: bool, + continue_flag: bool, + kss: &mut KeySetState, +) -> Result<(), Error> { + // The strategy depends on whether the key is decoupled or not. + // If the key is decoupled, then just remove the key from the keyset and + // leave underlying keys where they are. + // If the key is not decoupled, then we also need to remove the underlying + // keys. In that case, first check if the key is stale or if force is set. + // Then remove the private key (if any). If that fails abort unless + // continue is set. Then remove the public key. If that fails and the + // private key is remove then just log an error. Finally remove the key + // from the keyset. + // If force is true, then mark the key stale before removing. + let Some(k) = kss.keyset.keys().get(&key) else { + return Err(format!("key {key} not found").into()); + }; + let k = k.clone(); + if k.decoupled() { + if force { + kss.keyset.set_stale(&key).expect("should not fail"); + } + kss.keyset + .delete_key(&key) + .map_err(|e| format!("unable to remove key {key}: {e}").into()) + } else { + let stale = match k.keytype() { + KeyType::Ksk(keystate) | KeyType::Zsk(keystate) | KeyType::Include(keystate) => { + keystate.stale() + } + KeyType::Csk(ksk_keystate, zsk_keystate) => { + ksk_keystate.stale() && zsk_keystate.stale() + } + }; + if !stale && !force { + return Err(format!( + "unable to remove key {key}. Key is not stale. Use --force to override" + ) + .into()); + } + + // If there is a private key then try to remove that one first. We + // don't want lingering private key when something else fails. + if let Some(privref) = k.privref() { + let private_key_url = Url::parse(privref) + .map_err(|e| format!("unable to parse {privref} as Url: {e}"))?; + let res = remove_key(kss, private_key_url); + if !continue_flag { + res?; + } else if let Err(e) = res { + error!("unable to remove key {privref}: {e}"); + } + } + + // Move on to the public key. + let public_key_url = + Url::parse(&key).map_err(|e| format!("unable to parse {key} as Url: {e}"))?; + let res = remove_key(kss, public_key_url); + if k.privref().is_some() || continue_flag { + // Ignore errors removing a public key if we previously removed + // (or tried to remove) a private key. Or if we are told to + // continue. + if let Err(e) = res { + error!("unable to remove key {key}: {e}"); + } + } else { + res?; + } + if force { + kss.keyset.set_stale(&key).expect("should not fail"); + } + kss.keyset + .delete_key(&key) + .map_err(|e| format!("unable to remove key {key}: {e}").into()) + } +} + +/// Take a URL, get the public key and return a Record<_, Dnskey<_>>. +#[allow(unused_variables)] +fn public_key_from_url( + pub_url: &Url, + ksc: &KeySetConfig, + kss: &mut KeySetState, + env: &impl Env, +) -> Result, Dnskey>, Error> +where + Octs: FromBuilder + OctetsFrom>, + >>::Error: Display, +{ + match pub_url.scheme() { + "file" => { + let path = pub_url.path(); + let filename = env.in_cwd(&path); + + let public_data = std::fs::read_to_string(&filename) + .map_err(|e| format!("unable read from file {}: {e}", filename.display()))?; + let mut public_key = parse_from_bind::>(&public_data).map_err(|e| { + format!( + "unable to parse public key file {}: {e}", + filename.display() + ) + })?; + + public_key.set_ttl(ksc.default_ttl); + let public_key = Record::try_octets_from(public_key) + .map_err(|e| format!("try_octets_from failed: {e}"))?; + Ok(public_key) + } + + #[cfg(feature = "kmip")] + "kmip" => { + let kmip_key_url = KeyUrl::try_from(pub_url.clone())?; + let flags = kmip_key_url.flags(); + let kmip_conn_pool = kss.kmip.get_pool(kmip_key_url.server_id())?; + let key = domain::crypto::kmip::PublicKey::for_key_url(kmip_key_url, kmip_conn_pool) + .map_err(|err| format!("Failed to fetch public key for KMIP key URL: {err}"))?; + let owner: Name = kss + .keyset + .name() + .clone() + .try_flatten_into() + .map_err(|e| format!(".try_flatten_into failed: {e}"))?; + let record = Record::new( + owner, + Class::IN, + ksc.default_ttl, + Dnskey::try_octets_from(key.dnskey(flags)) + .map_err(|e| format!("try_octets_from failed: {e}"))?, + ); + Ok(record) + } + + _ => { + panic!("unsupported scheme in {pub_url}"); + } + } +} + +/* +Test for RRSIG check +- records before the zone +- records after the zone +- DNSKEY/CDS/CDNSKEY + - at apex + - not at apex +- delegations + - with DS/NSEC + - with A/AAAA at the delegations + - other records at the delegations + - below delegation +- bad sig NSEC3 +- bad sig not NSEC3 +*/ diff --git a/src/commands/keyset/kmip.rs b/src/commands/keyset/kmip.rs new file mode 100644 index 00000000..7f32f7ad --- /dev/null +++ b/src/commands/keyset/kmip.rs @@ -0,0 +1,1856 @@ +//! KMIP support for the keyset subcommand. +//! +//! KMIP (OASIS Key Management Interoperability Protocol) is a specification +//! for communicating with HSMs (Hardware Security Modules) that implement +//! secure cryptographic key generation and signing of data using generated +//! keys. +//! +//! The functions and types in this module are used to extend `dnst keyset` to +//! support KMIP based cryptographic keys as well as the default Ring/OpenSSL +//! based keys. + +// Note: Currently this is only used by `dnst keyset` but one can imagine it +// also being used by `dnst keygen`, `dnst key2ds` and `dnst signzone`. It may +// make sense to move the pure KMIP content from here to say src/kmip.rs and +// only keep the `dnst keyset` specific KMIP content in this module. One would +// also then need a way to configure which KMIP server the other subcommands +// should use and might want to also at that point consider a `dnst`-wide +// config mechanism for KMIP servers, e.g. `dnst kmip` or `dnst cfg kmip` or +// something. + +use std::{ + collections::HashMap, + fmt::Formatter, + fs::{File, OpenOptions}, + io::{BufReader, BufWriter, Seek, SeekFrom, Write}, + ops::Not, + path::{Path, PathBuf}, + str::FromStr, + time::Duration, +}; + +use clap::Subcommand; +use domain::{ + base::{name::ToLabelIter, Name, NameBuilder}, + crypto::kmip::{ClientCertificate, ConnectionSettings, KeyUrl}, + dep::kmip::client::pool::{ConnectionManager, KmipConnError, SyncConnPool}, +}; +use serde::{Deserialize, Serialize}; +use url::Url; + +use crate::{ + commands::keyset::{parse_duration, KeySetState}, + env::Env, + error::Error, +}; + +/// The default TCP port on which to connect to a KMIP server as defined by +/// IANA. +// TODO: Move this to the `kmip-protocol` crate? +pub const DEF_KMIP_PORT: u16 = 5696; + +//------------ KmipCommands -------------------------------------------------- + +/// Commands for configuring the use of KMIP compatible HSMs for key +/// generation and signing instead of or in addition to using and Ring/OpenSSL +/// based key generation and signing. +#[allow(clippy::large_enum_variant)] +#[derive(Clone, Debug, Subcommand)] +pub enum KmipCommands { + /// Disable use of KMIP for generating new keys. + /// + /// Existing KMIP keys will still work as normal, but any new keys will + /// be generated using Ring/OpenSSL whether or not KMIP servers are + /// configured. + /// + /// To re-enable KMIP use: kmip set-default-server. + Disable, + + /// Add a KMIP server to use for key generation & signing. + /// + /// If this is the first KMIP server to be configured it will be set + /// as the default KMIP server which will be used to generate new keys + /// instead of using Ring/OpenSSL based key generation. + /// + /// If this is NOT the first KMIP server to be configured, the default + /// KMIP server will be left as-is, either unset or set to an existing + /// KMIP server. + /// + /// Use 'kmip set-default-server' to change the default KMIP server. + AddServer { + /// An identifier to refer to the KMIP server by. + /// + /// This identifier is used in KMIP key URLs. The identifier serves + /// several purposes: + /// + /// 1. To make it easy at a glance to recognize which KMIP server a + /// given key was created on, by allowing operators to assign a + /// meaningful name to the server instead of whatever identity + /// strings the server associates with itself or by using hostnames + /// or IP addresses as identifiers. + /// + /// 2. To refer to additional configuration elsewhere to avoid + /// including sensitive and/or verbose KMIP server credential or + /// TLS client certificate/key authentication data in the URL, + /// and which would be repeated in every key created on the same + /// server. + /// + /// 3. To allow the actual location of the server and/or its access + /// credentials to be rotated without affecting the key URLs, e.g. + /// if a server is assigned a new IP address or if access + /// credentials change. + /// + /// The downside of this is that consumers of the key URL must also + /// possess the additional configuration settings and be able to fetch + /// them based on the same server identifier. + server_id: String, + + /// The hostname or IP address of the KMIP server. + ip_host_or_fqdn: String, + + /// TCP port to connect to the KMIP server on. + #[arg(help_heading = "Server", long = "port", default_value_t = DEF_KMIP_PORT)] + port: u16, + + /// Add the server but don't make it the default. + #[arg(help_heading = "Server", long = "pending", default_value_t = false, action = clap::ArgAction::SetTrue)] + pending: bool, + + /// Optional path to a JSON file to read/write username/password credentials from/to. + /// + /// The format of the file (at the time of writing) is like so: + /// { + /// "server_id": { + /// "username": "xxxx", + /// "password": "yyyy", + /// } + /// [, "another_server_id": { ... }] + /// } + #[arg(help_heading = "Client Credentials", long = "credential-store")] + credentials_store_path: Option, + + /// Optional username to authenticate to the KMIP server as. + #[arg( + help_heading = "Client Credentials", + long = "username", + requires = "credentials_store_path" + )] + username: Option, + + /// Optional password to authenticate to the KMIP server with. + #[arg( + help_heading = "Client Credentials", + long = "password", + requires = "username" + )] + password: Option, + + /// Optional path to a TLS certificate to authenticate to the KMIP + /// server with. + #[arg( + help_heading = "Client Certificate Authentication", + long = "client-cert", + requires = "client_key_path" + )] + client_cert_path: Option, + + /// Optional path to a private key for client certificate + /// authentication. + /// + /// The private key is needed to be able to prove to the KMIP server + /// that you are the owner of the provided TLS client certificate. + #[arg( + help_heading = "Client Certificate Authentication", + long = "client-key", + requires = "client_cert_path" + )] + client_key_path: Option, + + /// Whether or not to accept the KMIP server TLS certificate without + /// verifying it. + /// + /// Set to false if using a self-signed TLS certificate, e.g. in a + /// test environment. + #[arg(help_heading = "Server Certificate Verification", long = "insecure", default_value_t = false, action = clap::ArgAction::SetTrue)] + insecure: bool, + + /// Optional path to a TLS PEM certificate for the server. + #[arg(help_heading = "Server Certificate Verification", long = "server-cert")] + server_cert_path: Option, + + /// Optional path to a TLS PEM certificate for a Certificate Authority. + #[arg(help_heading = "Server Certificate Verification", long = "ca-cert")] + ca_cert_path: Option, + + /// TCP connect timeout. + // Note: This should be low otherwise the CLI user experience when + // running a command that interacts with a KMIP server, like `dnst + // init`, is that the command hangs if the KMIP server is not running + // or not reachable, until the timeout expires, and one would expect + // that under normal circumstances establishing a TCP connection to + // the KMIP server should be quite quick. + // Note: Does this also include time for TLS setup? + #[arg(help_heading = "Client Limits", long = "connect-timeout", value_parser = parse_duration, default_value = "3s")] + connect_timeout: Duration, + + /// TCP response read timeout. + // Note: This should be high otherwise for HSMs that are slow to + // respond, like the YubiHSM, we time out the connection while waiting + // for the response when generating keys. + #[arg(help_heading = "Client Limits", long = "read-timeout", value_parser = parse_duration, default_value = "30s")] + read_timeout: Duration, + + /// TCP request write timeout. + #[arg(help_heading = "Client Limits", long = "write-timeout", value_parser = parse_duration, default_value = "3s")] + write_timeout: Duration, + + /// Maximum KMIP response size to accept (in bytes). + #[arg( + help_heading = "Client Limits", + long = "max-response-bytes", + default_value_t = 8192 + )] + max_response_bytes: u32, + + /// Optional user supplied key label prefix. + /// + /// Can be used to denote the s/w that created the key, and/or to + /// indicate which installation/environment it belongs to, e.g. dev, + /// test, prod, etc. + #[arg(help_heading = "Key Labels", long = "key-label-prefix")] + key_label_prefix: Option, + + /// Maximum label length (in bytes) permitted by the HSM. + #[arg( + help_heading = "Key Labels", + long = "key-label-max-bytes", + default_value_t = 32 + )] + key_label_max_bytes: u8, + }, + + /// Modify an existing KMIP server configuration. + ModifyServer { + /// The identifier of the KMIP server. + server_id: String, + + /// Modify the hostname or IP address of the KMIP server. + #[arg(help_heading = "Server", long = "address")] + ip_host_or_fqdn: Option, + + /// Modify the TCP port to connect to the KMIP server on. + #[arg(help_heading = "Server", long = "port")] + port: Option, + + /// Disable use of username / password authentication. + /// + /// Note: This will remove any credentials from the credential-store + /// for this server id. + #[arg(help_heading = "Client Credentials", long = "no-credentials", action = clap::ArgAction::SetTrue)] + no_credentials: bool, + + /// Modify the path to a JSON file to read/write username/password + /// credentials from/to. + #[arg(help_heading = "Client Credentials", long = "credential-store")] + credentials_store_path: Option, + + /// Modifyt the username to authenticate to the KMIP server as. + #[arg(help_heading = "Client Credentials", long = "username")] + username: Option, + + /// Modify the password to authenticate to the KMIP server with. + #[arg(help_heading = "Client Credentials", long = "password")] + password: Option, + + /// Disable use of TLS client certificate authentication. + #[arg(help_heading = "Client Certificate Authentication", long = "no-client-auth", action = clap::ArgAction::SetTrue)] + no_client_auth: bool, + + /// Modify the path to the TLS certificate to authenticate to the KMIP + /// server with. + #[arg( + help_heading = "Client Certificate Authentication", + long = "client-cert" + )] + client_cert_path: Option, + + /// Modify the path to the private key for client certificate + /// authentication. + #[arg( + help_heading = "Client Certificate Authentication", + long = "client-key" + )] + client_key_path: Option, + + /// Modify whether or not to accept the KMIP server TLS certificate + /// without verifying it. + #[arg(help_heading = "Server Certificate Verification", long = "insecure")] + insecure: Option, + + /// Modify the path to a TLS PEM certificate for the server. + #[arg(help_heading = "Server Certificate Verification", long = "server-cert")] + server_cert_path: Option, + + /// Optional path to a TLS PEM certificate for a Certificate Authority. + #[arg(help_heading = "Server Certificate Verification", long = "ca-cert")] + ca_cert_path: Option, + + /// Modify the TCP connect timeout. + #[arg(help_heading = "Client Limits", long = "connect-timeout", value_parser = parse_duration)] + connect_timeout: Option, + + /// Modify the TCP response read timeout. + #[arg(help_heading = "Client Limits", long = "read-timeout", value_parser = parse_duration)] + read_timeout: Option, + + /// Modify the TCP request write timeout. + #[arg(help_heading = "Client Limits", long = "write-timeout", value_parser = parse_duration)] + write_timeout: Option, + + /// Modify the maximum KMIP response size to accept (in bytes). + #[arg(help_heading = "Client Limits", long = "max-response-bytes")] + max_response_bytes: Option, + + /// Optional user supplied key label prefix. + /// + /// Can be used to denote the s/w that created the key, and/or to + /// indicate which installation/environment it belongs to, e.g. dev, + /// test, prod, etc. + #[arg(help_heading = "Key Labels", long = "key-label-prefix")] + key_label_prefix: Option, + + /// Maximum label length (in bytes) permitted by the HSM. + #[arg(help_heading = "Key Labels", long = "key-label-max-bytes")] + key_label_max_bytes: Option, + }, + + /// Remove an existing non-default KMIP server. + /// + /// To remove the default KMIP server use `kmip disable` first. + RemoveServer { + /// The identifier of the KMIP server to remove. + server_id: String, + }, + + /// Set the default KMIP server to use for key generation. + SetDefaultServer { + /// The identifier of the KMIP server to use as the default. + server_id: String, + }, + + /// Get the details of an existing KMIP server. + GetServer { + /// The identifier of the KMIP server to get. + server_id: String, + }, + + /// List all configured KMIP servers. + ListServers, +} + +//------------ kmip_command() ------------------------------------------------ + +/// Process a `dnst keyset kmip` command. +pub fn kmip_command( + env: &impl Env, + cmd: KmipCommands, + kss: &mut KeySetState, +) -> Result { + match cmd { + KmipCommands::Disable => { + kss.kmip.default_server_id = None; + } + + KmipCommands::AddServer { + server_id, + ip_host_or_fqdn, + port, + pending, + credentials_store_path, + username, + password, + client_cert_path, + client_key_path, + insecure, + server_cert_path, + ca_cert_path, + connect_timeout, + read_timeout, + write_timeout, + max_response_bytes, + key_label_prefix, + key_label_max_bytes, + } => { + // Handle only the valid cases. Let Clap reject the invalid cases + // with a helpful error message, e.g. password without username is + // not allowed. + + let credentials = match (credentials_store_path, username, password) { + (Some(credentials_store_path), Some(username), password) => { + Some(KmipClientCredentialsConfig { + credentials_store_path, + credentials: Some(KmipClientCredentials { username, password }), + }) + } + (Some(credentials_store_path), _, _) => Some(KmipClientCredentialsConfig { + credentials_store_path, + credentials: None, + }), + _ => None, + }; + + let client_auth = match (client_cert_path, client_key_path) { + (Some(cert_path), Some(private_key_path)) => { + Some(KmipClientTlsCertificateAuthConfig { + cert_path, + private_key_path, + }) + } + _ => None, + }; + + let server_auth = KmipServerTlsCertificateVerificationConfig { + verify_certificate: insecure.not(), + server_cert_path, + ca_cert_path, + }; + + let limits = KmipClientLimits { + connect_timeout, + read_timeout, + write_timeout, + max_response_bytes, + }; + + let key_label_cfg = KeyLabelConfig { + max_label_bytes: key_label_max_bytes, + supports_relabeling: true, + prefix: key_label_prefix.unwrap_or_default(), + }; + + add_kmip_server( + &mut kss.kmip, + server_id, + ip_host_or_fqdn, + port, + pending, + credentials, + client_auth, + server_auth, + limits, + key_label_cfg, + )?; + } + + KmipCommands::ModifyServer { + server_id, + ip_host_or_fqdn, + port, + no_credentials, + credentials_store_path, + username, + password, + no_client_auth, + client_cert_path, + client_key_path, + insecure, + server_cert_path, + ca_cert_path, + connect_timeout, + read_timeout, + write_timeout, + max_response_bytes, + key_label_prefix, + key_label_max_bytes, + } => { + let mut crl_credentials_store_path = ChangeRemoveLeave::Leave; + let mut crl_username = ChangeRemoveLeave::Leave; + let mut crl_password = ChangeRemoveLeave::Leave; + let mut crl_client_cert_path = ChangeRemoveLeave::Leave; + let mut crl_client_key_path = ChangeRemoveLeave::Leave; + let mut crl_server_cert_path = ChangeRemoveLeave::Leave; + let mut crl_ca_cert_path = ChangeRemoveLeave::Leave; + + if no_credentials { + crl_credentials_store_path = ChangeRemoveLeave::Remove; + crl_username = ChangeRemoveLeave::Remove; + crl_password = ChangeRemoveLeave::Remove; + } else { + if let Some(v) = credentials_store_path { + crl_credentials_store_path = ChangeRemoveLeave::Change(v); + } + if let Some(v) = username { + crl_username = ChangeRemoveLeave::Change(v); + } + if let Some(v) = password { + crl_password = ChangeRemoveLeave::Change(v); + } + } + + if no_client_auth { + crl_client_cert_path = ChangeRemoveLeave::Remove; + crl_client_key_path = ChangeRemoveLeave::Remove; + } else { + if let Some(v) = client_cert_path { + crl_client_cert_path = ChangeRemoveLeave::Change(v); + } + if let Some(v) = client_key_path { + crl_client_key_path = ChangeRemoveLeave::Change(v); + } + } + + if let Some(v) = server_cert_path { + crl_server_cert_path = ChangeRemoveLeave::Change(v); + } + if let Some(v) = ca_cert_path { + crl_ca_cert_path = ChangeRemoveLeave::Change(v); + } + + modify_kmip_server( + &mut kss.kmip, + &server_id, + ip_host_or_fqdn, + port, + crl_credentials_store_path, + crl_username, + crl_password, + crl_client_cert_path, + crl_client_key_path, + insecure, + crl_server_cert_path, + crl_ca_cert_path, + connect_timeout, + read_timeout, + write_timeout, + max_response_bytes, + key_label_prefix, + key_label_max_bytes, + ) + .map_err(|err| { + Error::new(&format!( + "unable to modify configuration for KMIP server '{server_id}': {err}" + )) + })?; + } + + KmipCommands::RemoveServer { server_id } => { + remove_kmip_server(kss, server_id)?; + } + + KmipCommands::SetDefaultServer { server_id } => { + if !kss.kmip.servers.contains_key(&server_id) { + return Err(format!("KMIP server id '{server_id}' is not known").into()); + } + kss.kmip.default_server_id = Some(server_id); + } + + KmipCommands::GetServer { server_id } => { + let Some(server) = kss.kmip.servers.get(&server_id) else { + return Err(format!("KMIP server id '{server_id}' is not known").into()); + }; + + write!(env.stdout(), "{server}"); + + return Ok(false); + } + + KmipCommands::ListServers => { + write!(env.stdout(), "{}", &kss.kmip); + return Ok(false); + } + } + + Ok(true) +} + +//------------- remove_kmip_server() ----------------------------------------- + +/// Remove a KMIP server and its credentials. +/// +/// Removes the specified KMIP server from the configuration, and any +/// associated referenced credentials. +/// +/// Returns an error if: +/// - The KMIP server is the current default. +/// - The KMIP server is in use by any known keys. +/// - A referenced credentials file could not be updated to remove +/// credentials for the server being removed. +fn remove_kmip_server(kss: &mut KeySetState, server_id: String) -> Result<(), Error> { + if kss.kmip.default_server_id.as_ref() == Some(&server_id) { + return Err(format!( + "KMIP server '{server_id}' cannot be removed as it is the current default. Use kmip disable first." + ) + .into()); + } + + if kss.keyset.keys().iter().any(|(key_url_str, _)| { + if let Ok(url) = Url::parse(key_url_str) { + if let Ok(key_url) = KeyUrl::try_from(url) { + if key_url.server_id() == server_id { + return true; + } + } + } + false + }) { + return Err(format!( + "KMIP server '{server_id}' cannot be removed as there are still keys using it." + ) + .into()); + } + + let removed = kss.kmip.servers.remove(&server_id); + + if let Some(credentials_path) = removed.and_then(|s| s.client_credentials_path) { + let _ = remove_kmip_client_credentials(&server_id, &credentials_path)?; + } + + Ok(()) +} + +/// Remove credentials from a file, removing the file entirely if then empty. +fn remove_kmip_client_credentials( + server_id: &str, + credentials_path: &Path, +) -> Result { + let mut credentials_file = + KmipClientCredentialsFile::new(credentials_path, KmipServerCredentialsFileMode::ReadWrite)?; + + let removed_creds = credentials_file.remove(server_id).ok_or(Error::new(&format!("unable to remove credentials for KMIP server '{server_id}' from credentials file {}: server id does not exist in the file", credentials_path.display())))?; + + credentials_file.save()?; + + if credentials_file.is_empty() { + drop(credentials_file); + std::fs::remove_file(credentials_path).map_err(|e| { + Error::new(&format!( + "unable to remove empty credentials file {} for KMIP server '{server_id}': {e}", + credentials_path.display(), + )) + })?; + } + + Ok(removed_creds) +} + +//------------ add_kmip_server() --------------------------------------------- + +/// Adds a KMIP server to the configured set. +/// +/// Sensitive credentials must be referenced from separate files, we do not +/// allow them to be stored directly in the main configuration. +/// +/// To make it easier for users to store username/password credentials we +/// support writing them to the JSON file for the user using credentials +/// specified on the command line. We also support reading from a pre-existing +/// JSON credentials file, assuming a user was able to create one by hand. +/// +/// The format of the file (at the time of writing) is like so: +/// +/// { +/// "server_id": { +/// "username": "xxxx", +/// "password": "yyyy", +/// } +/// } +/// +/// Note: We do not (yet?) support protection against accidental leakage of +/// secrets in memory (e.g. via the secrecy crate) because the secrecy crate +/// SecretBox type cannot be cloned, thus would have to be both read from disk +/// for every request, and doing so would need to be supported all the way/ +/// down to the KMIP message wire serialization in the kmip-protocol crate, +/// plus the crate explicitly warns against creating a Serde Serialize impl +/// for SecretBox'd data and so requires you to manually impl that yourself. +#[allow(clippy::too_many_arguments)] +fn add_kmip_server( + kmip: &mut KmipState, + server_id: String, + ip_host_or_fqdn: String, + port: u16, + pending: bool, + credentials: Option, + client_cert_auth: Option, + server_cert_verification: KmipServerTlsCertificateVerificationConfig, + client_limits: KmipClientLimits, + key_label_config: KeyLabelConfig, +) -> Result<(), Error> { + if kmip.servers.contains_key(&server_id) { + return Err(Error::new(&format!( + "unable to add KMIP server '{server_id}': server already exists!" + ))); + } + + let client_credentials_path = match credentials { + // No credentials supplied. + // Use unauthenticated access to the KMIP server. + None => None, + + Some(KmipClientCredentialsConfig { + credentials_store_path, + credentials, + }) => { + let mut credentials_file = KmipClientCredentialsFile::new( + &credentials_store_path, + KmipServerCredentialsFileMode::CreateReadWrite, + )?; + + if let Some(credentials) = credentials { + if credentials_file + .insert(server_id.clone(), credentials) + .is_some() + { + // Don't accidental change existing credentials. + return Err(Error::new(&format!("unable to add KMIP credentials to file {}: server '{server_id}' already exists.", credentials_store_path.display()))); + } + credentials_file.save()?; + } else { + // Only credentials path supplied. + // Check that it contains credentials for the specified server. + if !credentials_file.contains(&server_id) { + return Err(Error::new(&format!("unable to add KMIP server '{server_id}': credentials for server not found in {}", credentials_store_path.display()))); + } + } + + Some(credentials_store_path) + } + }; + + let settings = KmipServerConnectionConfig { + server_addr: ip_host_or_fqdn, + server_port: port, + server_cert_verification, + client_credentials_path, + client_cert_auth, + client_limits, + key_label_config, + }; + + kmip.servers.insert(server_id.clone(), settings); + + if !pending && kmip.servers.len() == 1 { + kmip.default_server_id = Some(server_id); + } + + Ok(()) +} + +//------------ ChangeRemoveLeave --------------------------------------------- + +/// Should a setting be changed, removed or left as-is? +enum ChangeRemoveLeave { + /// The setting should be changed to the given value. + Change(T), + + /// The setting should be removed as if it were never set by the user. + Remove, + + /// The setting should be left unchanged at its current value. + Leave, +} + +//------------ modify_kmip_server() ------------------------------------------ + +/// Modify the settings of a currently configured KMIP server. +#[allow(clippy::too_many_arguments)] +fn modify_kmip_server( + kmip: &mut KmipState, + server_id: &str, + ip_host_or_fqdn: Option, + port: Option, + credentials_store_path: ChangeRemoveLeave, + username: ChangeRemoveLeave, + password: ChangeRemoveLeave, + client_cert_path: ChangeRemoveLeave, + client_key_path: ChangeRemoveLeave, + server_insecure: Option, + server_cert_path: ChangeRemoveLeave, + ca_cert_path: ChangeRemoveLeave, + connect_timeout: Option, + read_timeout: Option, + write_timeout: Option, + max_response_bytes: Option, + key_label_prefix: Option, + key_label_max_bytes: Option, +) -> Result<(), Error> { + let Some(mut cfg) = kmip.servers.remove(server_id) else { + return Err("server does not exist!".into()); + }; + + cfg.server_addr = ip_host_or_fqdn.unwrap_or(cfg.server_addr); + cfg.server_port = port.unwrap_or(cfg.server_port); + + // Handle changed credentials. + cfg.client_credentials_path = match (credentials_store_path, username, password) { + (ChangeRemoveLeave::Leave, ChangeRemoveLeave::Leave, ChangeRemoveLeave::Leave) => { + // Nothing to do. + cfg.client_credentials_path + } + + (ChangeRemoveLeave::Remove, ChangeRemoveLeave::Change(_), _) + | (ChangeRemoveLeave::Remove, _, ChangeRemoveLeave::Change(_)) + | (ChangeRemoveLeave::Leave, ChangeRemoveLeave::Remove, ChangeRemoveLeave::Change(_)) => { + return Err("cannot remove credentials and change credentials at the same time".into()); + } + + (ChangeRemoveLeave::Change(_), ChangeRemoveLeave::Remove, _) => { + return Err("cannot move credentials and remove credentials at the same time".into()); + } + + (ChangeRemoveLeave::Remove, _, _) => { + // Remove any existing stored credentials. + if let Some(path) = &cfg.client_credentials_path { + let _ = remove_kmip_client_credentials(server_id, path)?; + } + None + } + + (ChangeRemoveLeave::Change(new_path), username, password) => { + // Change the file used to store credentials. If the credentials + // are not being changed, move them from the old file to the + // new file. Otherwise remove them from the old file and the new + // credentials to the new file. + + // Remove the old credentials file. + let creds = if let Some(p) = cfg.client_credentials_path { + let mut creds = remove_kmip_client_credentials(server_id, &p)?; + // Adjust credentials if needed. + match username { + ChangeRemoveLeave::Change(v) => creds.username = v, + ChangeRemoveLeave::Remove => unreachable!(), // Handled above + ChangeRemoveLeave::Leave => { /* Nothing to do */ } + } + match password { + ChangeRemoveLeave::Change(v) => creds.password = Some(v), + ChangeRemoveLeave::Remove => creds.password = None, + ChangeRemoveLeave::Leave => { /* Nothing to do */ } + } + creds + } else { + let username = match username { + ChangeRemoveLeave::Change(v) => v, + ChangeRemoveLeave::Remove => unreachable!(), // Handled above + ChangeRemoveLeave::Leave => { + return Err("cannot use existing username as none was found".into()) + } + }; + let password = match password { + ChangeRemoveLeave::Change(v) => Some(v), + ChangeRemoveLeave::Remove => None, + ChangeRemoveLeave::Leave => None, + }; + KmipClientCredentials { username, password } + }; + + // Open the new credentials file. + let mut new_creds_file = KmipClientCredentialsFile::new( + &new_path, + KmipServerCredentialsFileMode::CreateReadWrite, + )?; + + // Insert credentials and save them. + let _ = new_creds_file.insert(server_id.to_string(), creds); + new_creds_file.save()?; + Some(new_path) + } + + (ChangeRemoveLeave::Leave, _, _) if cfg.client_credentials_path.is_none() => { + return Err("cannot change client credentials that don't exist".into()); + } + + (ChangeRemoveLeave::Leave, username, password) => { + // Open the new credentials file. + let mut creds_file = KmipClientCredentialsFile::new( + cfg.client_credentials_path.as_ref().unwrap(), // SAFETY: Checked for is_none() above + KmipServerCredentialsFileMode::ReadWrite, + )?; + + let creds = if let Some(mut creds) = creds_file.remove(server_id) { + // Adjust credentials if needed. + match username { + ChangeRemoveLeave::Change(v) => creds.username = v, + ChangeRemoveLeave::Remove => unreachable!(), // Handled above + ChangeRemoveLeave::Leave => { /* Nothing to do */ } + } + match password { + ChangeRemoveLeave::Change(v) => creds.password = Some(v), + ChangeRemoveLeave::Remove => creds.password = None, + ChangeRemoveLeave::Leave => { /* Nothing to do */ } + } + creds + } else { + // Create new credentials. + let ChangeRemoveLeave::Change(username) = username else { + return Err( + "cannot change credentials that do not exist if no username is supplied" + .into(), + ); + }; + let password = match password { + ChangeRemoveLeave::Change(v) => Some(v), + ChangeRemoveLeave::Remove => None, + ChangeRemoveLeave::Leave => None, + }; + KmipClientCredentials { username, password } + }; + + // (re-)insert the credentials and save them. + let _ = creds_file.insert(server_id.to_string(), creds); + creds_file.save()?; + cfg.client_credentials_path + } + }; + + // Handle changed client certificate authentication. + cfg.client_cert_auth = match (client_cert_path, client_key_path) { + (ChangeRemoveLeave::Leave, ChangeRemoveLeave::Leave) => { + // Use the current values. + cfg.client_cert_auth + } + + (ChangeRemoveLeave::Remove, ChangeRemoveLeave::Remove) => { + // Forget the current values. + None + } + + (ChangeRemoveLeave::Remove, _) | (_, ChangeRemoveLeave::Remove) => { + return Err("cannot remove only one of the client certificate or client key.".into()); + } + + (cert_path, key_path) => { + // Adjust the settings as needed. + let cert_path = match cert_path { + ChangeRemoveLeave::Change(v) => v, + ChangeRemoveLeave::Remove => unreachable!(), // Handled above + ChangeRemoveLeave::Leave => cfg.client_cert_auth.as_ref().map(|v| v.cert_path.clone()).ok_or::("cannot configure client certicate authentication without a client certificate path".into())?, + }; + let private_key_path = match key_path { + ChangeRemoveLeave::Change(v) => v, + ChangeRemoveLeave::Remove => unreachable!(), // Handled above, + ChangeRemoveLeave::Leave => cfg + .client_cert_auth + .as_ref() + .map(|v| v.private_key_path.clone()) + .ok_or::( + "cannot configure client certificate authentication with a private key path" + .into(), + )?, + }; + + Some(KmipClientTlsCertificateAuthConfig { + cert_path, + private_key_path, + }) + } + }; + + // Handle changed server certificate verification. + if let Some(v) = server_insecure { + cfg.server_cert_verification.verify_certificate = v.not(); + } + match server_cert_path { + ChangeRemoveLeave::Change(v) => cfg.server_cert_verification.server_cert_path = Some(v), + ChangeRemoveLeave::Remove => cfg.server_cert_verification.server_cert_path = None, + ChangeRemoveLeave::Leave => { /* Nothing to do */ } + } + match ca_cert_path { + ChangeRemoveLeave::Change(v) => cfg.server_cert_verification.ca_cert_path = Some(v), + ChangeRemoveLeave::Remove => cfg.server_cert_verification.ca_cert_path = None, + ChangeRemoveLeave::Leave => { /* Nothing to do */ } + } + + if let Some(v) = connect_timeout { + cfg.client_limits.connect_timeout = v; + } + if let Some(v) = read_timeout { + cfg.client_limits.read_timeout = v; + } + if let Some(v) = write_timeout { + cfg.client_limits.write_timeout = v; + } + if let Some(v) = max_response_bytes { + cfg.client_limits.max_response_bytes = v; + } + + if let Some(v) = key_label_prefix { + cfg.key_label_config.prefix = v; + } + if let Some(v) = key_label_max_bytes { + cfg.key_label_config.max_label_bytes = v; + } + + kmip.servers.insert(server_id.to_string(), cfg); + + if kmip.servers.len() == 1 { + kmip.default_server_id = Some(server_id.to_string()); + } + + Ok(()) +} + +//------------ KmipClientCredentialsConfig ----------------------------------- + +/// Optional disk file based credentials for connecting to a KMIP server. +pub struct KmipClientCredentialsConfig { + pub credentials_store_path: PathBuf, + pub credentials: Option, +} + +//------------ KmipClientCredentials ----------------------------------------- + +/// Credentials for connecting to a KMIP server. +/// +/// Intended to be read from a JSON file stored separately to the main +/// configuration so that separate security policy can be applied to sensitive +/// credentials. +#[derive(Debug, Deserialize, Serialize)] +pub struct KmipClientCredentials { + /// KMIP username credential. + /// + /// Mandatory if the KMIP "Credential Type" is "Username and Password". + /// + /// See: https://docs.oasis-open.org/kmip/spec/v1.2/os/kmip-spec-v1.2-os.html#_Toc409613458 + pub username: String, + + /// KMIP password credential. + /// + /// Optional when KMIP "Credential Type" is "Username and Password". + /// + /// See: https://docs.oasis-open.org/kmip/spec/v1.2/os/kmip-spec-v1.2-os.html#_Toc409613458 + #[serde(skip_serializing_if = "Option::is_none", default)] + pub password: Option, +} + +//------------ KmipClientCredentialSet --------------------------------------- + +/// A set of KMIP server credentials. +#[derive(Debug, Default, Deserialize, Serialize)] +struct KmipClientCredentialsSet(HashMap); + +//------------ KmipClientCredentialsFileMode --------------------------------- + +/// The access mode to use when accessing a credentials file. +#[derive(Debug)] +pub enum KmipServerCredentialsFileMode { + /// Open an existing credentials file for reading. Saving will fail. + ReadOnly, + + /// Open an existing credentials file for reading and writing. + ReadWrite, + + /// Open or create the credentials file for reading and writing. + CreateReadWrite, +} + +//--- impl Display + +impl std::fmt::Display for KmipServerCredentialsFileMode { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + KmipServerCredentialsFileMode::ReadOnly => write!(f, "read-only"), + KmipServerCredentialsFileMode::ReadWrite => write!(f, "read-write"), + KmipServerCredentialsFileMode::CreateReadWrite => write!(f, "create-read-write"), + } + } +} + +//------------ KmipServerCredentialsFile ------------------------------------- + +/// A KMIP server credential set file. +#[derive(Debug)] +pub struct KmipClientCredentialsFile { + /// The file from which the credentials were loaded, and will be saved + /// back to. + file: File, + + /// The path from which the file was loaded. Used for generating error + /// messages. + path: PathBuf, + + /// The actual set of loaded credentials. + credentials: KmipClientCredentialsSet, + + /// The read/write/create mode. + #[allow(dead_code)] + mode: KmipServerCredentialsFileMode, +} + +impl KmipClientCredentialsFile { + /// Load credentials from disk. + /// + /// Optionally: + /// - Create the file if missing. + /// - Keep the file open for writing back changes. See ['Self::save()`]. + pub fn new(path: &Path, mode: KmipServerCredentialsFileMode) -> Result { + let read; + let write; + let create; + + match mode { + KmipServerCredentialsFileMode::ReadOnly => { + read = true; + write = false; + create = false; + } + KmipServerCredentialsFileMode::ReadWrite => { + read = true; + write = true; + create = false; + } + KmipServerCredentialsFileMode::CreateReadWrite => { + read = true; + write = true; + create = true; + } + } + + let file = OpenOptions::new() + .read(read) + .write(write) + .create(create) + .truncate(false) + .open(path) + .map_err(|e| { + format!( + "unable to open KMIP credentials file {} in {mode} mode: {e}", + path.display() + ) + })?; + + // Determine the length of the file as JSON parsing fails if the file + // is completely empty. + let len = file.metadata().map(|m| m.len()).map_err(|e| { + format!( + "unable to query metadata of KMIP credentials file {}: {e}", + path.display() + ) + })?; + + // Buffer reading as apparently JSON based file reading is extremely + // slow without buffering, even for small files. + let mut reader = BufReader::new(&file); + + // Load or create the credential set. + let credentials: KmipClientCredentialsSet = if len > 0 { + serde_json::from_reader(&mut reader).map_err(|e| { + format!( + "error loading KMIP credentials file {:?}: {e}\n", + path.display() + ) + })? + } else { + KmipClientCredentialsSet::default() + }; + + // Save the path for use in generating error messages. + let path = path.to_path_buf(); + + Ok(KmipClientCredentialsFile { + file, + path, + credentials, + mode, + }) + } + + /// Write the credential set back to the file it was loaded from. + pub fn save(&mut self) -> Result<(), Error> { + // Ensure that writing happens at the start of the file. + self.file + .seek(SeekFrom::Start(0)) + .map_err(|e| format!("seek to start failed: {e}"))?; + + // Use a buffered writer as writing JSON to a file directly is + // apparently very slow, even for small files. + // + // Enclose the use of the BufWriter in a block so that it is + // definitely no longer using the file when we next act on it. + { + let mut writer = BufWriter::new(&self.file); + serde_json::to_writer_pretty(&mut writer, &self.credentials).map_err(|e| { + format!( + "error writing KMIP credentials file {}: {e}", + self.path.display() + ) + })?; + + // Ensure that the BufWriter is flushed as advised by the + // BufWriter docs. + writer.flush().map_err(|e| format!("flush failed: {e}"))?; + } + + // Truncate the file to the length of data we just wrote.. + let pos = self + .file + .stream_position() + .map_err(|e| format!("unable to get stream position: {e}"))?; + self.file + .set_len(pos) + .map_err(|e| format!("unable to set file length: {e}"))?; + + // Ensure that any write buffers are flushed. + self.file + .flush() + .map_err(|e| format!("flush failed: {e}"))?; + + Ok(()) + } + + /// Does this credential set include credentials for the specified KMIP + /// server. + pub fn contains(&self, server_id: &str) -> bool { + self.credentials.0.contains_key(server_id) + } + + #[allow(dead_code)] + fn get(&self, server_id: &str) -> Option<&KmipClientCredentials> { + self.credentials.0.get(server_id) + } + + /// Add credentials for the specified KMIP server, replacing any that + /// previously existed for the same server.- + /// + /// Returns any previous configuration if found. + pub fn insert( + &mut self, + server_id: String, + credentials: KmipClientCredentials, + ) -> Option { + self.credentials.0.insert(server_id, credentials) + } + + /// Remove any existing configuration for the specified KMIP server. + /// + /// Returns any previous configuration if found. + pub fn remove(&mut self, server_id: &str) -> Option { + self.credentials.0.remove(server_id) + } + + pub fn is_empty(&self) -> bool { + self.credentials.0.is_empty() + } +} + +//------------ KmipClientTlsCertificateAuthConfig ---------------------------- + +/// Configuration for KMIP TLS client certificate based authentication. +/// +/// Both certificate and key file must be present and must be in PEM format. +// Note: We only support PEM format, not PKCS#12, because the underlying +// kmip-protocol TLS "drivers" for rustls and OpenSSL both don't actually +// support PKCS#12 even though taking it as config input. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct KmipClientTlsCertificateAuthConfig { + /// Path to the PEM format client certificate file. + pub cert_path: PathBuf, + + /// Path to the PEM format client private key file. + pub private_key_path: PathBuf, +} + +//------------ KmipServerTlsCertificateVerificationConfig -------------------- + +/// Configuration for KMIP TLS certificate verification. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct KmipServerTlsCertificateVerificationConfig { + /// Whether or not to enable server certificate verification. + #[serde(default)] + pub verify_certificate: bool, + + /// Path to the server certificate file in PEM format. + #[serde(skip_serializing_if = "Option::is_none", default)] + pub server_cert_path: Option, + + /// Path to the server CA certificate file in PEM format. + #[serde(skip_serializing_if = "Option::is_none", default)] + pub ca_cert_path: Option, +} + +//--- impl Default + +impl Default for KmipServerTlsCertificateVerificationConfig { + fn default() -> Self { + Self { + verify_certificate: true, + server_cert_path: None, + ca_cert_path: None, + } + } +} + +//------------ KmipClientLimits ---------------------------------------------- + +/// Limits to be imposed on the KMIP client when commmunicating with a KMIP +/// server. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct KmipClientLimits { + /// TCP connect timeout + pub connect_timeout: Duration, + + /// TCP read timeout + pub read_timeout: Duration, + + /// TCP write timeout + pub write_timeout: Duration, + + /// Maximum number of HSM response bytes to accept + pub max_response_bytes: u32, +} + +impl std::fmt::Display for KmipClientLimits { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + writeln!( + f, + "Connect Timeout: {} seconds", + self.connect_timeout.as_secs() + )?; + writeln!( + f, + "Read Timeout: {} seconds", + self.read_timeout.as_secs() + )?; + writeln!( + f, + "Write Timeout: {} seconds", + self.write_timeout.as_secs() + )?; + writeln!( + f, + "Max Response Size: {} bytes", + self.max_response_bytes + ) + } +} + +//------------ KeyLabelConfig ------------------------------------------------ + +/// Whether and how to relabel KMIP keys with human readable labels. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct KeyLabelConfig { + /// Maximum label length. + pub max_label_bytes: u8, + + /// Supports re-labeling. + /// + /// Defaults to true, will be changed to false if relabeling fails to + /// avoid further attempts to relabel. + pub supports_relabeling: bool, + + /// Optional user supplied key label prefix. + /// + /// E.g. to denote the s/w that created the key, and/or to indicate which + /// installation/environment it belongs to, e.g. dev, test, prod, etc. + pub prefix: String, +} + +impl std::fmt::Display for KeyLabelConfig { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + writeln!(f, "Prefix: {}", self.prefix)?; + writeln!(f, "Max Bytes: {}", self.max_label_bytes,)?; + writeln!( + f, + "Supports Re-Labeling: {}", + self.supports_relabeling + )?; + Ok(()) + } +} + +//------------ KmipServerConnectionConfig ------------------------------------ + +/// Settings for connecting to a KMIP HSM server. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct KmipServerConnectionConfig { + /// IP address, hostname or FQDN of the KMIP server. + pub server_addr: String, + + /// The TCP port number on which the KMIP server listens. + pub server_port: u16, + + /// KMIP server TLS certificate verification configuration. + pub server_cert_verification: KmipServerTlsCertificateVerificationConfig, + + /// The credentials to authenticate with the KMIP server. + #[serde(skip_serializing_if = "Option::is_none", default)] + pub client_credentials_path: Option, + + /// KMIP client TLS certificate authentication configuration. + #[serde(skip_serializing_if = "Option::is_none", default)] + pub client_cert_auth: Option, + + /// Limits to be applied by the KMIP client + pub client_limits: KmipClientLimits, + + /// Key labeling configuration. + pub key_label_config: KeyLabelConfig, +} + +//--- impl Display + +/// Displays in multi-line tabulated format like so: +/// +/// ```text +/// Address: 127.0.0.1:5696 +/// Server Certificate Verification: Disabled +/// Server Certificate: None +/// Certificate Authority Certificate: None +/// Client Credentials: /tmp/x.creds +/// Client Certificate Authentication: Disabled +/// Client Limits: +/// Connect Timeout: 10 seconds +/// Read Timeout: 10 seconds +/// Write Timeout: 10 seconds +/// Max Response Size: 8192 bytes +/// ``` +impl std::fmt::Display for KmipServerConnectionConfig { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + use std::fmt::Write; + + fn opt_path_to_string(p: &Option) -> String { + match p { + Some(p) => p.display().to_string(), + None => "None".to_string(), + } + } + + writeln!( + f, + "Address: {}:{}", + self.server_addr, self.server_port + )?; + let enabled = match self.server_cert_verification.verify_certificate { + true => "Enabled", + false => "Disabled", + }; + writeln!(f, "Server Certificate Verification: {enabled}")?; + writeln!( + f, + "Server Certificate: {}", + opt_path_to_string(&self.server_cert_verification.server_cert_path) + )?; + writeln!( + f, + "Certificate Authority Certificate: {}", + opt_path_to_string(&self.server_cert_verification.ca_cert_path) + )?; + writeln!( + f, + "Client Credentials: {}", + opt_path_to_string(&self.client_credentials_path) + )?; + match &self.client_cert_auth { + Some(cfg) => { + writeln!(f, "Client Certificate Authentication: Enabled")?; + writeln!( + f, + " Client Certificate: {}", + cfg.cert_path.display() + )?; + writeln!( + f, + " Private Key: {}", + cfg.private_key_path.display() + )?; + } + None => { + writeln!(f, "Client Certificate Authentication: Disabled")?; + } + } + + { + writeln!(f, "Client Limits:")?; + let mut indented = indenter::indented(f); + write!(indented, "{}", self.client_limits)?; + } + + { + writeln!(f, "Key Label Config:")?; + let mut indented = indenter::indented(f); + write!(indented, "{}", self.key_label_config)?; + } + + Ok(()) + } +} + +impl KmipServerConnectionConfig { + /// Load KMIP connection configuration data into memory. + /// + /// Load and parse the various credential data that can optionally + /// be associated with KMIP connection settings from the separate + /// files on disk where they are stored, and return a populated + /// `ConnectionSettings` object containing the resulting data. + /// + /// TODO: Currently lacks support for configuring timeouts and other + /// limits that the KMIP client can enforce. By default there are no such + /// limits. + pub fn load(&self, server_id: &str) -> Result { + let client_cert = self.load_client_cert()?; + let server_cert = self.load_server_cert()?; + let ca_cert = self.load_ca_cert()?; + let (username, password) = self.load_credentials(server_id)?; + Ok(ConnectionSettings { + host: self.server_addr.clone(), + port: self.server_port, + username, + password, + insecure: self.server_cert_verification.verify_certificate.not(), + client_cert, + server_cert, + ca_cert, + connect_timeout: Some(self.client_limits.connect_timeout), + read_timeout: Some(self.client_limits.read_timeout), + write_timeout: Some(self.client_limits.write_timeout), + max_response_bytes: Some(self.client_limits.max_response_bytes), + }) + } + + /// Load and parse PEM TLS client certificate and key files. + /// + /// TLS client certificate and key files can be used to authenticate + /// against KMIP servers that are configured to require such + /// authentication. + fn load_client_cert(&self) -> Result, Error> { + match &self.client_cert_auth { + Some(cfg) => Ok(Some(ClientCertificate::SeparatePem { + cert_bytes: Self::load_binary_file(&cfg.cert_path)?, + key_bytes: Self::load_binary_file(&cfg.private_key_path)?, + })), + None => Ok(None), + } + } + + /// Load and parse a PEM format TLS server certificate. + /// + /// The certificate contains a public key which can be used to verify the + /// identity of the remote KMIP server. + fn load_server_cert(&self) -> Result>, Error> { + Ok(match &self.server_cert_verification.server_cert_path { + Some(p) => Some(Self::load_binary_file(p)?), + None => None, + }) + } + + /// Load and parse a PEM format TLS certificate authority certificate. + /// + /// The certificate can be used to verify the issuing authority of the + /// TLS server certificate, thereby verifying not just that the server is + /// the owner of the certificate but that the certificate was issued by a + /// trusted party. + fn load_ca_cert(&self) -> Result>, Error> { + Ok(match &self.server_cert_verification.ca_cert_path { + Some(p) => Some(Self::load_binary_file(p)?), + None => None, + }) + } + + /// Load credentials from disk for authenticating with a KMIP server. + /// + /// Currently supports only one credential type: + /// - Username and optional password. + /// + /// In the case of Nameshed-HSM-Relay the username is the PKCS#11 slot + /// label and the password is the PKCS#11 user PIN. + fn load_credentials(&self, server_id: &str) -> Result<(Option, Option), Error> { + if let Some(p) = &self.client_credentials_path { + let mut file = + KmipClientCredentialsFile::new(p, KmipServerCredentialsFileMode::ReadOnly)?; + if let Some(creds) = file.remove(server_id) { + return Ok((Some(creds.username), creds.password)); + } + } + Ok((None, None)) + } + + /// Load an arbitrary file as unparsed bytes into memory. + /// + /// TODO: Lmiit how many bytes we will read? + fn load_binary_file(path: &Path) -> Result, Error> { + use std::{fs::File, io::Read}; + + let mut bytes = Vec::new(); + File::open(path) + .map_err(|e| format!("unable to open {}: {e}", path.display()))? + .read_to_end(&mut bytes) + .map_err(|e| format!("reading from {} failed: {e}", path.display()))?; + + Ok(bytes) + } +} + +//--- Conversions + +impl From for Error { + fn from(err: KmipConnError) -> Self { + Error::new(&format!("KMIP connection error: {err}")) + } +} + +//------------ KmipState ----------------------------------------------------- + +/// KMIP related state. +/// +/// Part of [`KeySetState`]. +#[derive(Default, Deserialize, Serialize)] +pub struct KmipState { + /// KMIP servers to use, keyed by user chosen HSM id. + pub servers: HashMap, + + /// Which KMIP server should new keys be created in, if any? + #[serde(skip_serializing_if = "Option::is_none", default)] + pub default_server_id: Option, + + /// The current set of KMIP server pools. + #[serde(skip)] + pub pools: HashMap, +} + +impl KmipState { + /// Get the default KMIP server pool, if any. + /// + /// Requires KeySetConfig::default_kmip_server to be set. The pool will be + /// created if needed. + /// + /// Returns Ok(None) if no default KMIP server is set. + pub fn get_default_pool(&mut self) -> Result, Error> { + if self.default_server_id.is_some() { + let id = self.default_server_id.clone().unwrap(); + return self.get_pool(&id).map(Some); + } + Ok(None) + } + + /// Get the server pool for a specific KMIP server ID. + /// + /// Requires the server ID to exist in KeySetConfig::kmip_servers. + /// The pool will be created if needed. + /// + /// Returns Ok(pool) or Err if the server ID is not known or the pool + /// cannot be created. + pub fn get_pool(&mut self, id: &str) -> Result { + match self.pools.get(id) { + Some(pool) => Ok(pool.clone()), + None => { + let Some(srv_conn_settings) = self.servers.get(id) else { + return Err(format!("No KMIP server config exists for server '{id}'").into()); + }; + let conn_settings = srv_conn_settings.load(id).map_err(|err| { + format!("Unable to prepare KMIP connection settings for server '{id}': {err}") + })?; + // TODO: Should the timeouts used here be configurable and/or set to some + // other value? + let pool = ConnectionManager::create_connection_pool( + id.to_string(), + conn_settings.into(), + 1, + Some(Duration::from_secs(60)), + Some(Duration::from_secs(60)), + ) + .map_err(|err| format!("Failed to create KMIP connection pool: {err}"))?; + + self.pools.insert(id.to_string(), pool.clone()); + Ok(pool) + } + } + } +} + +//--- impl Display + +/// Displays in muti-line tabulated format like so: +/// +/// ```text +/// Servers: +/// ID: my_server_x [DEFAULT] +/// Address: 127.0.0.1:5696 +/// Server Certificate Verification: Disabled +/// Server Certificate: None +/// Certificate Authority Certificate: None +/// Client Certificate Authentication: Disabled +/// ID: my_server +/// Address: 127.0.0.1:5696 +/// Server Certificate Verification: Disabled +/// Server Certificate: None +/// Certificate Authority Certificate: None +/// Client Certificate Authentication: Enabled +/// Client Certificate: /blah +/// Private Key: /tmp/tmp +/// ``` +impl std::fmt::Display for KmipState { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + writeln!(f, "Servers:")?; + for (server_id, cfg) in &self.servers { + let default = match Some(server_id) == self.default_server_id.as_ref() { + true => " [DEFAULT]", + false => "", + }; + use std::fmt::Write; + let mut indented = indenter::indented(f); + writeln!(indented, "ID: {server_id}{default}")?; + + let mut twice_indented = indenter::indented(&mut indented); + write!(twice_indented, "{cfg}")?; + } + Ok(()) + } +} + +/// Construct from parts a KMIP key label. +pub fn format_key_label( + prefix: &str, + zone_name: &str, + key_tag: &str, + key_type: &str, + suffix: &str, + max_label_bytes: usize, +) -> Result { + let mut public_key_label = format!("{prefix}{zone_name}-{key_tag}-{key_type}{suffix}"); + if public_key_label.len() > max_label_bytes { + let diff = public_key_label.len() - max_label_bytes; + let max_zone_name_len = zone_name.len().saturating_sub(diff); + if max_zone_name_len < 8 { + return Err(format!("Insufficient space to include a useful (partial) zone name in generated KMIP key label: {max_zone_name_len} < 8").into()); + } + // If the name is a valid DNS name, truncate it by + // keeping the right most label (the TLD) but removing + // labels one by one prior to that until the name is + // short enough. + let zone_name = truncate_zone_name(zone_name.to_string(), max_zone_name_len); + public_key_label = format!("{prefix}{zone_name}-{key_tag}-{key_type}{suffix}"); + } + Ok(public_key_label) +} + +/// Trnucate a zone name to a maximum length. +/// +/// First attempt to truncate by removing labels under the TLD label, falling +/// back to truncating to N bytes from the start if needed. +fn truncate_zone_name(mut zone_name: String, max_zone_name_len: usize) -> String { + if zone_name.len() <= max_zone_name_len { + return zone_name; + } + if max_zone_name_len > 0 { + if let Ok(dns_name) = Name::>::from_str(&zone_name) { + // We can only shorten names that have at least + // three labels. + let num_labels = dns_name.iter_labels().count(); + if num_labels >= 3 { + let mut end_name = NameBuilder::new_vec(); + + // Append prior labels until the current + // length + '.' + the final label + '.' would + // be too long. + let mut labels = dns_name.iter_labels().rev(); + + // Keep the root and TLD labels. + end_name + .append_label(labels.next().unwrap().as_slice()) + .unwrap(); + end_name + .append_label(labels.next().unwrap().as_slice()) + .unwrap(); + + // Append labels from the left as long as they fit. + let mut labels = dns_name.iter_labels(); + let mut start_name = NameBuilder::new_vec(); + for _ in 0..num_labels - 2 { + let label = labels.next().unwrap(); + // Minus one to allow space for the '..' that will be used + // instead of '.' to signify that label based truncation + // occurred. + if start_name.len() + label.len() + end_name.len() < (max_zone_name_len - 1) { + start_name.append_label(label.as_slice()).unwrap(); + } + } + + if !start_name.is_empty() { + // Build final name + let mut zone_name = start_name.finish().to_string(); + zone_name.push_str(".."); + zone_name.push_str(&end_name.into_name().unwrap().to_string()); + zone_name.push('.'); + + if zone_name.len() <= max_zone_name_len { + return zone_name; + } + } + } + } + } + + zone_name.truncate(max_zone_name_len); + zone_name +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_truncate_zone_name() { + // Name already shorter than the truncation length + assert_eq!(&truncate_zone_name("".to_string(), 5), ""); + assert_eq!(&truncate_zone_name("nl.".to_string(), 5), "nl."); + + // Names longer than the truncation length but the labels under the + // TLD are too long to allow shortening by dropping of labels, instead + // shortening is done by brute truncation. + assert_eq!(&truncate_zone_name("nlnetlabs.nl.".to_string(), 5), "nlnet"); + assert_eq!( + &truncate_zone_name("a.b.c.d.nlnetlabs.nl.".to_string(), 5), + "a.b.c" + ); + + // Names longer than the truncation length and has labels under the + // TLD that are short enough to permit truncation by dropping of labels + // in the middle. A double dot (..) indicates that truncation occurred. + assert_eq!( + &truncate_zone_name("a.b.c.d.nlnetlabs.nl.".to_string(), 10), + "a.b.c..nl." + ); + assert_eq!( + &truncate_zone_name("a.b.c.d.nlnetlabs.nl.".to_string(), 12), + "a.b.c.d..nl." + ); + assert_eq!( + &truncate_zone_name("a.b.c.d.nlnetlabs.nl.".to_string(), 19), + "a.b.c.d..nl." + ); + assert_eq!( + &truncate_zone_name("a.b.c.d.nlnetlabs.nl.".to_string(), 20), + "a.b.c.d..nl." + ); + + // Name is equal to the truncation length so no truncation needed. + assert_eq!( + &truncate_zone_name("a.b.c.d.nlnetlabs.nl.".to_string(), 21), + "a.b.c.d.nlnetlabs.nl." + ); + } + + #[test] + fn test_format_key_label() { + assert_eq!( + format_key_label("", "a.b.c.d.nlnetlabs.nl.", "12345", "ksk", "", 20).unwrap(), + "a.b.c..nl.-12345-ksk" + ); + assert_eq!( + format_key_label("", "a.b.c.d.nlnetlabs.nl.", "12345", "ksk", "", 31).unwrap(), + "a.b.c.d.nlnetlabs.nl.-12345-ksk" + ); + assert_eq!( + format_key_label( + "prefix-", + "a.b.c.d.nlnetlabs.nl.", + "12345", + "ksk", + "-suffix", + 45 + ) + .unwrap(), + "prefix-a.b.c.d.nlnetlabs.nl.-12345-ksk-suffix" + ); + + // Max len too short to hold the generated label. + assert!(format_key_label("", "a.b.c.d.nlnetlabs.nl.", "12345", "ksk", "", 10).is_err()); + } +} diff --git a/src/commands/keyset/mod.rs b/src/commands/keyset/mod.rs new file mode 100644 index 00000000..65035ae2 --- /dev/null +++ b/src/commands/keyset/mod.rs @@ -0,0 +1,6 @@ +pub mod cmd; + +#[cfg(feature = "kmip")] +pub mod kmip; + +pub use cmd::*; diff --git a/src/commands/mod.rs b/src/commands/mod.rs index e7b9909b..d05d5ec8 100644 --- a/src/commands/mod.rs +++ b/src/commands/mod.rs @@ -2,6 +2,7 @@ pub mod help; pub mod key2ds; pub mod keygen; +pub mod keyset; pub mod notify; pub mod nsec3hash; pub mod signzone; @@ -46,6 +47,10 @@ pub enum Command { #[command(name = "keygen", verbatim_doc_comment)] Keygen(self::keygen::Keygen), + /// Maintain a set of DNSSEC keys. EXPERIMENTAL. + #[command(name = "keyset")] + Keyset(self::keyset::Keyset), + /// Generate a DS RR from the DNSKEYS in keyfile /// /// The following file will be created for each key: @@ -94,10 +99,12 @@ impl Command { match self { Self::Key2ds(key2ds) => key2ds.execute(env), Self::Keygen(keygen) => keygen.execute(env), + Self::Keyset(keyset) => keyset.execute(env), Self::Nsec3Hash(nsec3hash) => nsec3hash.execute(env), Self::Notify(notify) => notify.execute(env), Self::SignZone(signzone) => signzone.execute(env), Self::Update(update) => update.execute(env), + // Self::Help(help) => help.execute(env), Self::LdnsUpdate(ldnsupdate) => ldnsupdate.execute(env), Self::Report(s) => { writeln!(env.stdout(), "{s}");