From 37f4324044104f348f72ae84906965497a2892ac Mon Sep 17 00:00:00 2001 From: jan <152862650+j-haacker@users.noreply.github.com> Date: Wed, 9 Oct 2024 22:58:34 +0200 Subject: [PATCH] add docs --- .buildinfo | 2 +- _sources/getting_started.rst.txt | 2 + _sources/index.rst.txt | 17 +- _sources/prerequisites.rst.txt | 50 +++++ _sources/tests.rst.txt | 15 ++ _sources/tutorials.rst.txt | 9 + _static/basic.css | 2 +- _static/doctools.js | 2 +- _static/language_data.js | 4 +- _static/searchtools.js | 170 +++++++++------ cryoswath.gis.html | 24 ++- cryoswath.l1b.html | 357 +++++++++++++++++++++++-------- cryoswath.l2.html | 14 +- cryoswath.l3.html | 24 ++- cryoswath.l4.html | 14 +- cryoswath.misc.html | 233 ++++++++++++++------ cryoswath.test_plots.html | 36 ++-- genindex.html | 60 ++---- getting_started.html | 121 +++++++++++ index.html | 25 ++- objects.inv | Bin 1266 -> 1320 bytes prerequisites.html | 163 ++++++++++++++ py-modindex.html | 16 +- search.html | 6 +- searchindex.js | 2 +- tests.html | 129 +++++++++++ tutorials.html | 125 +++++++++++ 27 files changed, 1269 insertions(+), 353 deletions(-) create mode 100644 _sources/getting_started.rst.txt create mode 100644 _sources/prerequisites.rst.txt create mode 100644 _sources/tests.rst.txt create mode 100644 _sources/tutorials.rst.txt create mode 100644 getting_started.html create mode 100644 prerequisites.html create mode 100644 tests.html create mode 100644 tutorials.html diff --git a/.buildinfo b/.buildinfo index 1507286..902e6c4 100644 --- a/.buildinfo +++ b/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: a3bc22a83fed366b93fb1c3e9dbb11dd +config: 880689b6b6a958c63327a07fb27cb241 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/_sources/getting_started.rst.txt b/_sources/getting_started.rst.txt new file mode 100644 index 0000000..a13831d --- /dev/null +++ b/_sources/getting_started.rst.txt @@ -0,0 +1,2 @@ +Getting started +=============== diff --git a/_sources/index.rst.txt b/_sources/index.rst.txt index 4614e72..b2a9dda 100644 --- a/_sources/index.rst.txt +++ b/_sources/index.rst.txt @@ -6,10 +6,19 @@ Welcome to cryoswath's documentation! ===================================== +Find the associated GitHub repository at https://github.com/j-haacker/cryoswath. +To set up a working/testing environment, follow the steps discribed in :doc:`prerequisites`. +See the quickstart guide in :doc:`Getting started `, if needed. + + .. toctree:: :maxdepth: 1 :caption: Contents: + prerequisites + getting_started + tutorials + tests cryoswath.l1b cryoswath.l2 cryoswath.l3 @@ -17,11 +26,3 @@ Welcome to cryoswath's documentation! cryoswath.misc cryoswath.gis cryoswath.test_plots - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/_sources/prerequisites.rst.txt b/_sources/prerequisites.rst.txt new file mode 100644 index 0000000..b1f391d --- /dev/null +++ b/_sources/prerequisites.rst.txt @@ -0,0 +1,50 @@ +Prerequisites +============= + +.. _install: + +Installation +------------ + +To install cryoswath, simply clone the GitHub repository. + +``git clone git@github.com/j-haacker/cryoswath.git`` + +This will setup a directory structure, download the package, and download some small auxiliary files. +Large resource dependency need to be downloaded manually. + +Data dependencies +----------------- + +cryoswath needs a reference elevation model. +Currently, ArcticDEM and REMA of the Polar Geospatial Center, University of Minnesota (https://www.pgc.umn.edu/data/) are supported. +To use other sources, add their paths to :func:`cryoswath.misc.get_dem_reader`, e.g., `lines following 459 (frozen, maybe different from current version) `_. +Deposit them in ``data/auxiliary/DEM`` or change ``dem_path`` in :mod:`cryoswath.misc` to your needs. + +Further, if you would like to take advantage of the basin shapes provided in the Randolph Glacier Inventory, download them as needed. +Make sure to download both products, "G" (glaciers/basins) and "C" (complexes). +cryoswath will give you hints if any data is missing as you go. +Deposit the shape files in ``data/auxiliary/RGI`` or change ``rgi_path`` in :mod:`cryoswath.misc` to your needs. + +Software dependencies +--------------------- + +There is a bunge of packages, listed in the `requirements.txt `_, that are needed or beneficial to run cryoswath. +Note, that the package names are "conda" names; "pip" names my be slightly different. +Unfortunately there is an issue with ESA's L1b data: when those are read, some values are scaled. +However, the operation used by "xarray" requires the scaling factor to be of a different type. +The two easiest work-arounds are to either patch xarray, or to restrict xarrays version to "<2024.3". + +I provide a docker container, including the patched xarray version. +To fire-up docker, run: + +``docker run --detach --interactive --volume :/altimetry_project cryoswath/cryoswath:nightly`` + +Then, connect with your favorite IDE or ``docker exec --interactive sh``. + +For the longer term, you may want to have your own environment. If you using conda, follow the steps below: + +1. ``conda create --name env_name --file /docker/conda_requirements.txt`` +2. ``conda activate env_name`` +3. ``conda install patch`` +4. ``find -name variables.py -path */env_name/*/xarray/coding/* -exec patch {} /docker/custom_xarray.patch \;`` (the patch works for ``xarray=2024.9.0`` which listed in the requirements.txt used above) diff --git a/_sources/tests.rst.txt b/_sources/tests.rst.txt new file mode 100644 index 0000000..8a7813a --- /dev/null +++ b/_sources/tests.rst.txt @@ -0,0 +1,15 @@ +Tests +===== + +In the directory ``tests/reports`` you can find notebooks that are build to evaluate cryoswath. +If you modify the core components of cryoswath, which you are encouraged to do(!), you shoud run the notebooks to verify that your results are resonable. +This test is only a first step. +If you are satisfied, do a broader validation campaign. + +``tests/reports/l1b_swath_start.ipynb`` tests edge cases for finding the start of the swath domain. This is + +``tests/reports/l1b_waveform.ipynb`` shows the estimated surface elevations for a waveform overlayed by the crosssection of the glacier. + +``tests/reports/l2_dem_comparison.ipynb`` compares many elevation estimates to a reference elevation model. + +``tests/reports/l2_tested_data_comparison.ipynb`` compares the elevation estimates against the results of cryoswath's mother implementation that was thoroughly tested. diff --git a/_sources/tutorials.rst.txt b/_sources/tutorials.rst.txt new file mode 100644 index 0000000..af61f9c --- /dev/null +++ b/_sources/tutorials.rst.txt @@ -0,0 +1,9 @@ +Tutorials +========= + +There is a small number of tutorials that will be extended if I find support. +Those are Jupyter notebooks located in ``scripts`` with names starting with "tutorial". + +``scripts/tutorial.ipynb`` will contain a step-by-step guide to retrieve gridded glacier surface elevation trends from raw (L1b) data. + +``scripts/tutorial__poca.ipynb`` shows how the points of closest approach (POCA), that have a special meaning, can be retrieved. diff --git a/_static/basic.css b/_static/basic.css index 30fee9d..f316efc 100644 --- a/_static/basic.css +++ b/_static/basic.css @@ -4,7 +4,7 @@ * * Sphinx stylesheet -- basic theme. * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ diff --git a/_static/doctools.js b/_static/doctools.js index d06a71d..4d67807 100644 --- a/_static/doctools.js +++ b/_static/doctools.js @@ -4,7 +4,7 @@ * * Base JavaScript utilities for all Sphinx HTML documentation. * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ diff --git a/_static/language_data.js b/_static/language_data.js index 250f566..367b8ed 100644 --- a/_static/language_data.js +++ b/_static/language_data.js @@ -5,7 +5,7 @@ * This script contains the language-specific data used by searchtools.js, * namely the list of stopwords, stemmer, scorer and splitter. * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @@ -13,7 +13,7 @@ var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; -/* Non-minified version is copied as a separate JS file, is available */ +/* Non-minified version is copied as a separate JS file, if available */ /** * Porter Stemmer diff --git a/_static/searchtools.js b/_static/searchtools.js index 7918c3f..b08d58c 100644 --- a/_static/searchtools.js +++ b/_static/searchtools.js @@ -4,7 +4,7 @@ * * Sphinx JavaScript utilities for the full-text search. * - * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ @@ -99,7 +99,7 @@ const _displayItem = (item, searchTerms, highlightTerms) => { .then((data) => { if (data) listItem.appendChild( - Search.makeSearchSummary(data, searchTerms) + Search.makeSearchSummary(data, searchTerms, anchor) ); // highlight search terms in the summary if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js @@ -116,8 +116,8 @@ const _finishSearch = (resultCount) => { ); else Search.status.innerText = _( - `Search finished, found ${resultCount} page(s) matching the search query.` - ); + "Search finished, found ${resultCount} page(s) matching the search query." + ).replace('${resultCount}', resultCount); }; const _displayNextItem = ( results, @@ -137,6 +137,22 @@ const _displayNextItem = ( // search finished, update title and status message else _finishSearch(resultCount); }; +// Helper function used by query() to order search results. +// Each input is an array of [docname, title, anchor, descr, score, filename]. +// Order the results by score (in opposite order of appearance, since the +// `_displayNextItem` function uses pop() to retrieve items) and then alphabetically. +const _orderResultsByScoreThenName = (a, b) => { + const leftScore = a[4]; + const rightScore = b[4]; + if (leftScore === rightScore) { + // same score: sort alphabetically + const leftTitle = a[1].toLowerCase(); + const rightTitle = b[1].toLowerCase(); + if (leftTitle === rightTitle) return 0; + return leftTitle > rightTitle ? -1 : 1; // inverted is intentional + } + return leftScore > rightScore ? 1 : -1; +}; /** * Default splitQuery function. Can be overridden in ``sphinx.search`` with a @@ -160,13 +176,26 @@ const Search = { _queued_query: null, _pulse_status: -1, - htmlToText: (htmlString) => { + htmlToText: (htmlString, anchor) => { const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); - htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() }); + for (const removalQuery of [".headerlink", "script", "style"]) { + htmlElement.querySelectorAll(removalQuery).forEach((el) => { el.remove() }); + } + if (anchor) { + const anchorContent = htmlElement.querySelector(`[role="main"] ${anchor}`); + if (anchorContent) return anchorContent.textContent; + + console.warn( + `Anchored content block not found. Sphinx search tries to obtain it via DOM query '[role=main] ${anchor}'. Check your theme or template.` + ); + } + + // if anchor not specified or not found, fall back to main content const docContent = htmlElement.querySelector('[role="main"]'); - if (docContent !== undefined) return docContent.textContent; + if (docContent) return docContent.textContent; + console.warn( - "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template." + "Content block not found. Sphinx search tries to obtain it via DOM query '[role=main]'. Check your theme or template." ); return ""; }, @@ -239,16 +268,7 @@ const Search = { else Search.deferQuery(query); }, - /** - * execute search (requires search index to be loaded) - */ - query: (query) => { - const filenames = Search._index.filenames; - const docNames = Search._index.docnames; - const titles = Search._index.titles; - const allTitles = Search._index.alltitles; - const indexEntries = Search._index.indexentries; - + _parseQuery: (query) => { // stem the search terms and add them to the correct list const stemmer = new Stemmer(); const searchTerms = new Set(); @@ -284,21 +304,38 @@ const Search = { // console.info("required: ", [...searchTerms]); // console.info("excluded: ", [...excludedTerms]); - // array of [docname, title, anchor, descr, score, filename] - let results = []; + return [query, searchTerms, excludedTerms, highlightTerms, objectTerms]; + }, + + /** + * execute search (requires search index to be loaded) + */ + _performSearch: (query, searchTerms, excludedTerms, highlightTerms, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + const allTitles = Search._index.alltitles; + const indexEntries = Search._index.indexentries; + + // Collect multiple result groups to be sorted separately and then ordered. + // Each is an array of [docname, title, anchor, descr, score, filename]. + const normalResults = []; + const nonMainIndexResults = []; + _removeChildren(document.getElementById("search-progress")); - const queryLower = query.toLowerCase(); + const queryLower = query.toLowerCase().trim(); for (const [title, foundTitles] of Object.entries(allTitles)) { - if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) { + if (title.toLowerCase().trim().includes(queryLower) && (queryLower.length >= title.length/2)) { for (const [file, id] of foundTitles) { - let score = Math.round(100 * queryLower.length / title.length) - results.push([ + const score = Math.round(Scorer.title * queryLower.length / title.length); + const boost = titles[file] === title ? 1 : 0; // add a boost for document titles + normalResults.push([ docNames[file], titles[file] !== title ? `${titles[file]} > ${title}` : title, id !== null ? "#" + id : "", null, - score, + score + boost, filenames[file], ]); } @@ -308,46 +345,47 @@ const Search = { // search for explicit entries in index directives for (const [entry, foundEntries] of Object.entries(indexEntries)) { if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { - for (const [file, id] of foundEntries) { - let score = Math.round(100 * queryLower.length / entry.length) - results.push([ + for (const [file, id, isMain] of foundEntries) { + const score = Math.round(100 * queryLower.length / entry.length); + const result = [ docNames[file], titles[file], id ? "#" + id : "", null, score, filenames[file], - ]); + ]; + if (isMain) { + normalResults.push(result); + } else { + nonMainIndexResults.push(result); + } } } } // lookup as object objectTerms.forEach((term) => - results.push(...Search.performObjectSearch(term, objectTerms)) + normalResults.push(...Search.performObjectSearch(term, objectTerms)) ); // lookup as search terms in fulltext - results.push(...Search.performTermsSearch(searchTerms, excludedTerms)); + normalResults.push(...Search.performTermsSearch(searchTerms, excludedTerms)); // let the scorer override scores with a custom scoring function - if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item))); - - // now sort the results by score (in opposite order of appearance, since the - // display function below uses pop() to retrieve items) and then - // alphabetically - results.sort((a, b) => { - const leftScore = a[4]; - const rightScore = b[4]; - if (leftScore === rightScore) { - // same score: sort alphabetically - const leftTitle = a[1].toLowerCase(); - const rightTitle = b[1].toLowerCase(); - if (leftTitle === rightTitle) return 0; - return leftTitle > rightTitle ? -1 : 1; // inverted is intentional - } - return leftScore > rightScore ? 1 : -1; - }); + if (Scorer.score) { + normalResults.forEach((item) => (item[4] = Scorer.score(item))); + nonMainIndexResults.forEach((item) => (item[4] = Scorer.score(item))); + } + + // Sort each group of results by score and then alphabetically by name. + normalResults.sort(_orderResultsByScoreThenName); + nonMainIndexResults.sort(_orderResultsByScoreThenName); + + // Combine the result groups in (reverse) order. + // Non-main index entries are typically arbitrary cross-references, + // so display them after other results. + let results = [...nonMainIndexResults, ...normalResults]; // remove duplicate search results // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept @@ -361,7 +399,12 @@ const Search = { return acc; }, []); - results = results.reverse(); + return results.reverse(); + }, + + query: (query) => { + const [searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms] = Search._parseQuery(query); + const results = Search._performSearch(searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms); // for debugging //Search.lastresults = results.slice(); // a copy @@ -466,14 +509,18 @@ const Search = { // add support for partial matches if (word.length > 2) { const escapedWord = _escapeRegExp(word); - Object.keys(terms).forEach((term) => { - if (term.match(escapedWord) && !terms[word]) - arr.push({ files: terms[term], score: Scorer.partialTerm }); - }); - Object.keys(titleTerms).forEach((term) => { - if (term.match(escapedWord) && !titleTerms[word]) - arr.push({ files: titleTerms[word], score: Scorer.partialTitle }); - }); + if (!terms.hasOwnProperty(word)) { + Object.keys(terms).forEach((term) => { + if (term.match(escapedWord)) + arr.push({ files: terms[term], score: Scorer.partialTerm }); + }); + } + if (!titleTerms.hasOwnProperty(word)) { + Object.keys(titleTerms).forEach((term) => { + if (term.match(escapedWord)) + arr.push({ files: titleTerms[term], score: Scorer.partialTitle }); + }); + } } // no match but word was a required one @@ -496,9 +543,8 @@ const Search = { // create the mapping files.forEach((file) => { - if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1) - fileMap.get(file).push(word); - else fileMap.set(file, [word]); + if (!fileMap.has(file)) fileMap.set(file, [word]); + else if (fileMap.get(file).indexOf(word) === -1) fileMap.get(file).push(word); }); }); @@ -549,8 +595,8 @@ const Search = { * search summary for a given text. keywords is a list * of stemmed words. */ - makeSearchSummary: (htmlText, keywords) => { - const text = Search.htmlToText(htmlText); + makeSearchSummary: (htmlText, keywords, anchor) => { + const text = Search.htmlToText(htmlText, anchor); if (text === "") return null; const textLower = text.toLowerCase(); diff --git a/cryoswath.gis.html b/cryoswath.gis.html index 10bee32..8f75dfe 100644 --- a/cryoswath.gis.html +++ b/cryoswath.gis.html @@ -16,7 +16,7 @@ - + @@ -46,6 +46,10 @@