diff --git a/.codecov.yml b/.codecov.yml new file mode 100644 index 0000000..20ad66b --- /dev/null +++ b/.codecov.yml @@ -0,0 +1,10 @@ +# show coverage in CI status, not as a comment. +comment: off +coverage: + status: + project: + default: + target: auto + patch: + default: + target: auto diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000..1ad3218 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,13 @@ +[run] +source = + pyCHX +[report] +omit = + */python?.?/* + */site-packages/nose/* + # ignore _version.py and versioneer.py + .*version.* + *_version.py + +exclude_lines = + if __name__ == '__main__': diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..763d84f --- /dev/null +++ b/.flake8 @@ -0,0 +1,12 @@ +[flake8] +exclude = + .git, + __pycache__, + build, + dist, + versioneer.py, + pyCHX/_version.py, + docs/source/conf.py +max-line-length = 115 +# Ignore some style 'errors' produced while formatting by 'black' +ignore = E203, W503 diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..9ddddbc --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +pyCHX/_version.py export-subst diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 0000000..1783664 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,55 @@ +name: Build Documentation + +on: + push: + pull_request: + +jobs: + build_docs: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.10"] + fail-fast: false + + defaults: + run: + shell: bash -l {0} + + steps: + - name: Set env vars + run: | + export REPOSITORY_NAME=${GITHUB_REPOSITORY#*/} # just the repo, as opposed to org/repo + echo "REPOSITORY_NAME=${REPOSITORY_NAME}" >> $GITHUB_ENV + + - name: Checkout the code + uses: actions/checkout@v3 + with: + fetch-depth: 1000 # should be enough to reach the most recent tag + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Install documentation-building requirements + run: | + # For reference: https://www.gnu.org/software/bash/manual/html_node/The-Set-Builtin.html. + set -vxeuo pipefail + + # These packages are installed in the base environment but may be older + # versions. Explicitly upgrade them because they often create + # installation problems if out of date. + python -m pip install --upgrade pip setuptools numpy + + pip install . + pip install -r requirements-dev.txt + pip list + + - name: Build Docs + run: make -C docs/ html + + - uses: actions/upload-artifact@v3 + with: + name: ${{ env.REPOSITORY_NAME }}-docs + path: docs/build/html/ diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml new file mode 100644 index 0000000..8dd7a87 --- /dev/null +++ b/.github/workflows/pre-commit.yml @@ -0,0 +1,15 @@ +name: pre-commit + +on: + pull_request: + push: + +jobs: + pre-commit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + - uses: pre-commit/action@v3.0.0 + with: + extra_args: --all-files diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml new file mode 100644 index 0000000..2413f97 --- /dev/null +++ b/.github/workflows/publish-pypi.yml @@ -0,0 +1,39 @@ +# This workflow will upload a Python Package using flit when a release is +# created. For more information see: +# https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries + +name: PyPI upload + +on: + release: + types: [created] + +jobs: + publish_pypi: + name: Publish package to PyPI + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install wheel twine setuptools + + - name: Build and publish + env: + TWINE_USERNAME: __token__ + # The PYPI_PASSWORD must be a pypi token with the "pypi-" prefix with sufficient permissions to upload this package + # https://pypi.org/help/#apitoken + TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} + run: | + python setup.py sdist bdist_wheel + twine upload dist/* diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml new file mode 100644 index 0000000..66fe803 --- /dev/null +++ b/.github/workflows/testing.yml @@ -0,0 +1,55 @@ +name: Unit Tests + +on: + push: + pull_request: + # schedule: + # - cron: '00 4 * * *' # daily at 4AM + +jobs: + run_tests: + runs-on: ${{ matrix.host-os }} + strategy: + matrix: + host-os: ["ubuntu-latest"] + # host-os: ["ubuntu-latest", "macos-latest", "windows-latest"] + python-version: ["3.8", "3.9", "3.10"] + fail-fast: false + + defaults: + run: + shell: bash -l {0} + + steps: + - name: Set env vars + run: | + export REPOSITORY_NAME=${GITHUB_REPOSITORY#*/} # just the repo, as opposed to org/repo + echo "REPOSITORY_NAME=${REPOSITORY_NAME}" >> $GITHUB_ENV + + - name: Checkout the code + uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + run: | + # For reference: https://www.gnu.org/software/bash/manual/html_node/The-Set-Builtin.html. + set -vxeuo pipefail + + # These packages are installed in the base environment but may be older + # versions. Explicitly upgrade them because they often create + # installation problems if out of date. + python -m pip install --upgrade pip setuptools numpy + + pip install . + pip install -r requirements-dev.txt + pip list + + - name: Test with pytest + run: | + set -vxeuo pipefail + coverage run -m pytest -vv -s + coverage report -m diff --git a/.isort.cfg b/.isort.cfg new file mode 100644 index 0000000..e0926f4 --- /dev/null +++ b/.isort.cfg @@ -0,0 +1,4 @@ +[settings] +line_length = 115 +multi_line_output = 3 +include_trailing_comma = True diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..4336326 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,26 @@ +default_language_version: + python: python3 +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: check-yaml + - id: end-of-file-fixer + - id: trailing-whitespace + - repo: https://github.com/ambv/black + rev: 23.1.0 + hooks: + - id: black + - repo: https://github.com/pycqa/flake8 + rev: 6.0.0 + hooks: + - id: flake8 + - repo: https://github.com/pycqa/isort + rev: 5.12.0 + hooks: + - id: isort + args: ["--profile", "black"] + - repo: https://github.com/kynan/nbstripout + rev: 0.6.1 + hooks: + - id: nbstripout diff --git a/.travis.yml b/.travis.yml deleted file mode 100755 index 7926e01..0000000 --- a/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -language: python - -matrix: - include: - - python: 3.6 - -install: - - pip install . - # need this because some requirements are pulled from git - # which is ignored in the setup.py file - - pip install -r requirements.txt - - pip install -r test-requirements.txt - # make a fake instance of databroker (so imports work) - - mkdir -p ~/.config/databroker - - cp extra/_legacy_config.yml ~/.config/databroker/_legacy_config.yml - # copy this dummy file to chx.yml as well (this should be correct usage) - - cp extra/_legacy_config.yml ~/.config/databroker/chx.yml - -script: - - coverage run run_tests.py - - coverage report -m - -after_success: - - codecov diff --git a/AUTHORS.rst b/AUTHORS.rst new file mode 100644 index 0000000..0db2f11 --- /dev/null +++ b/AUTHORS.rst @@ -0,0 +1,13 @@ +======= +Credits +======= + +Maintainer +---------- + +* Brookhaven National Laboratory + +Contributors +------------ + +None yet. Why not be the first? See: CONTRIBUTING.rst diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 0000000..9efd99d --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,103 @@ +============ +Contributing +============ + +Contributions are welcome, and they are greatly appreciated! Every +little bit helps, and credit will always be given. + +You can contribute in many ways: + +Types of Contributions +---------------------- + +Report Bugs +~~~~~~~~~~~ + +Report bugs at https://github.com/samclark2/pyCHX/issues. + +If you are reporting a bug, please include: + +* Any details about your local setup that might be helpful in troubleshooting. +* Detailed steps to reproduce the bug. + +Fix Bugs +~~~~~~~~ + +Look through the GitHub issues for bugs. Anything tagged with "bug" +is open to whoever wants to implement it. + +Implement Features +~~~~~~~~~~~~~~~~~~ + +Look through the GitHub issues for features. Anything tagged with "feature" +is open to whoever wants to implement it. + +Write Documentation +~~~~~~~~~~~~~~~~~~~ + +pyCHX could always use more documentation, whether +as part of the official pyCHX docs, in docstrings, +or even on the web in blog posts, articles, and such. + +Submit Feedback +~~~~~~~~~~~~~~~ + +The best way to send feedback is to file an issue at https://github.com/samclark2/pyCHX/issues. + +If you are proposing a feature: + +* Explain in detail how it would work. +* Keep the scope as narrow as possible, to make it easier to implement. +* Remember that this is a volunteer-driven project, and that contributions + are welcome :) + +Get Started! +------------ + +Ready to contribute? Here's how to set up `pyCHX` for local development. + +1. Fork the `pyCHX` repo on GitHub. +2. Clone your fork locally:: + + $ git clone git@github.com:your_name_here/pyCHX.git + +3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development:: + + $ mkvirtualenv pyCHX + $ cd pyCHX/ + $ python setup.py develop + +4. Create a branch for local development:: + + $ git checkout -b name-of-your-bugfix-or-feature + + Now you can make your changes locally. + +5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox:: + + $ flake8 pyCHX tests + $ python setup.py test + $ tox + + To get flake8 and tox, just pip install them into your virtualenv. + +6. Commit your changes and push your branch to GitHub:: + + $ git add . + $ git commit -m "Your detailed description of your changes." + $ git push origin name-of-your-bugfix-or-feature + +7. Submit a pull request through the GitHub website. + +Pull Request Guidelines +----------------------- + +Before you submit a pull request, check that it meets these guidelines: + +1. The pull request should include tests. +2. If the pull request adds functionality, the docs should be updated. Put + your new functionality into a function with a docstring, and add the + feature to the list in README.rst. +3. The pull request should work for Python 2.7, 3.3, 3.4, 3.5 and for PyPy. Check + https://travis-ci.org/samclark2/pyCHX/pull_requests + and make sure that the tests pass for all supported Python versions. diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..b4a5893 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = "-W" # This flag turns warnings into errors. +SPHINXBUILD = sphinx-build +SPHINXPROJ = PackagingScientificPython +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 0000000..ac53d5b --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,36 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build +set SPHINXPROJ=PackagingScientificPython + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% + +:end +popd diff --git a/docs/source/_static/.placeholder b/docs/source/_static/.placeholder new file mode 100644 index 0000000..e69de29 diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 0000000..84ecb08 --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# pyCHX documentation build configuration file, created by +# sphinx-quickstart on Thu Jun 28 12:35:56 2018. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.githubpages", + "sphinx.ext.intersphinx", + "sphinx.ext.mathjax", + "sphinx.ext.viewcode", + "IPython.sphinxext.ipython_directive", + "IPython.sphinxext.ipython_console_highlighting", + "matplotlib.sphinxext.plot_directive", + "numpydoc", + "sphinx_copybutton", +] + +# Configuration options for plot_directive. See: +# https://github.com/matplotlib/matplotlib/blob/f3ed922d935751e08494e5fb5311d3050a3b637b/lib/matplotlib/sphinxext/plot_directive.py#L81 +plot_html_show_source_link = False +plot_html_show_formats = False + +# Generate the API documentation when building +autosummary_generate = True +numpydoc_show_class_members = False + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = ".rst" + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = "pyCHX" +copyright = "2023, Brookhaven National Laboratory" +author = "Brookhaven National Laboratory" + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +import pyCHX + +# The short X.Y version. +version = pyCHX.__version__ +# The full version, including alpha/beta/rc tags. +release = pyCHX.__version__ + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = "en" + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = [] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = "sphinx_rtd_theme" +import sphinx_rtd_theme + +html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# This is required for the alabaster theme +# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars +html_sidebars = { + "**": [ + "relations.html", # needs 'show_related': True theme option to display + "searchbox.html", + ] +} + + +# -- Options for HTMLHelp output ------------------------------------------ + +# Output file base name for HTML help builder. +htmlhelp_basename = "pyCHX" + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + "pyCHX.tex", + "pyCHX Documentation", + "Contributors", + "manual", + ), +] + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + master_doc, + "pyCHX", + "pyCHX Documentation", + [author], + 1, + ) +] + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "pyCHX", + "pyCHX Documentation", + author, + "pyCHX", + "Repository for data collection and analysis scripts that are useful at the CHX beamline at NSLS-II (11-ID) developed by Dr. Yugang Zhang (yuzhang@bnl.gov).", + "Miscellaneous", + ), +] + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("https://docs.python.org/3/", None), + "numpy": ("https://numpy.org/doc/stable/", None), + "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None), + "pandas": ("https://pandas.pydata.org/pandas-docs/stable", None), + "matplotlib": ("https://matplotlib.org/stable", None), +} diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 0000000..071bbe7 --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,15 @@ +.. Packaging Scientific Python documentation master file, created by + sphinx-quickstart on Thu Jun 28 12:35:56 2018. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +pyCHX Documentation +=================== + +.. toctree:: + :maxdepth: 2 + + installation + usage + release-history + min_versions diff --git a/docs/source/installation.rst b/docs/source/installation.rst new file mode 100644 index 0000000..a61452c --- /dev/null +++ b/docs/source/installation.rst @@ -0,0 +1,7 @@ +============ +Installation +============ + +At the command line:: + + $ pip install pyCHX diff --git a/docs/source/min_versions.rst b/docs/source/min_versions.rst new file mode 100644 index 0000000..d28e747 --- /dev/null +++ b/docs/source/min_versions.rst @@ -0,0 +1,28 @@ +=================================== +Minimum Version of Python and NumPy +=================================== + + +- This project supports at least the minor versions of Python + initially released 42 months prior to a planned project release + date. +- The project will always support at least the 2 latest minor + versions of Python. +- The project will support minor versions of ``numpy`` initially + released in the 24 months prior to a planned project release date or + the oldest version that supports the minimum Python version + (whichever is higher). +- The project will always support at least the 3 latest minor + versions of NumPy. + +The minimum supported version of Python will be set to +``python_requires`` in ``setup``. All supported minor versions of +Python will be in the test matrix and have binary artifacts built +for releases. + +The project should adjust upward the minimum Python and NumPy +version support on every minor and major release, but never on a +patch release. + +This is consistent with NumPy `NEP 29 +`__. diff --git a/docs/source/release-history.rst b/docs/source/release-history.rst new file mode 100644 index 0000000..53707fb --- /dev/null +++ b/docs/source/release-history.rst @@ -0,0 +1,6 @@ +=============== +Release History +=============== + +Initial Release (YYYY-MM-DD) +---------------------------- diff --git a/docs/source/usage.rst b/docs/source/usage.rst new file mode 100644 index 0000000..4d26dae --- /dev/null +++ b/docs/source/usage.rst @@ -0,0 +1,9 @@ +===== +Usage +===== + +Start by importing pyCHX. + +.. code-block:: python + + import pyCHX diff --git a/extra/_legacy_config.yml b/extra/_legacy_config.yml deleted file mode 100755 index 930057f..0000000 --- a/extra/_legacy_config.yml +++ /dev/null @@ -1,16 +0,0 @@ -# dummy file -metadatastore: - module: databroker.headersource.mongo - class: MDS - config: - host: localhost - database: datastore - port: 27017 - timezone: 'US/Eastern' -assets: - module: databroker.assets.mongo - class: Registry - config: - host: localhost - database: filestore - port: 27017 diff --git a/pyCHX/Create_Report.py b/pyCHX/Create_Report.py index 930ef45..f434328 100644 --- a/pyCHX/Create_Report.py +++ b/pyCHX/Create_Report.py @@ -119,6 +119,7 @@ class create_pdf_report( object ): def __init__( self, data_dir, uid, out_dir=None, filename=None, load=True, user=None, report_type='saxs',md=None, res_h5_filename=None ): + from datetime import datetime self.data_dir = data_dir self.uid = uid self.md = md @@ -1638,7 +1639,7 @@ def recursively_save_dict_contents_to_group( h5file, path, dic): if not isinstance(key, str): raise ValueError("dict keys must be strings to save to hdf5") # save strings, numpy.int64, and numpy.float64 types - if isinstance(item, (np.int64, np.float64, str, np.float, float, np.float32,int)): + if isinstance(item, (np.int64, np.float64, str, float, np.float32,int)): # removed depreciated np.float LW @06/11/2023 #print( 'here' ) h5file[path + key] = item if not h5file[path + key].value == item: diff --git a/pyCHX/chx_compress.py b/pyCHX/chx_compress.py index 573bb61..adbdd5b 100644 --- a/pyCHX/chx_compress.py +++ b/pyCHX/chx_compress.py @@ -146,8 +146,8 @@ def read_compressed_eigerdata( mask, filename, beg, end, CAL = True if CAL: FD = Multifile( filename, beg, end) - imgsum = np.zeros( FD.end- FD.beg, dtype= np.float ) - avg_img = np.zeros( [FD.md['ncols'], FD.md['nrows'] ] , dtype= np.float ) + imgsum = np.zeros( FD.end- FD.beg, dtype= np.float64 ) + avg_img = np.zeros( [FD.md['ncols'], FD.md['nrows'] ] , dtype= np.float64 ) imgsum, bad_frame_list_ = get_each_frame_intensityc( FD, sampling = 1, bad_pixel_threshold=bad_pixel_threshold, bad_pixel_low_threshold=bad_pixel_low_threshold, hot_pixel_threshold=hot_pixel_threshold, plot_ = False, @@ -345,7 +345,7 @@ def segment_compress_eigerdata( images, mask, md, filename, Nimg_ = len( images) M,N = images[0].shape - avg_img = np.zeros( [M,N], dtype= np.float ) + avg_img = np.zeros( [M,N], dtype= np.float64 ) Nopix = float( avg_img.size ) n=0 good_count = 0 @@ -531,7 +531,7 @@ def init_compress_eigerdata( images, mask, md, filename, fp.write( Header) Nimg_ = len( images) - avg_img = np.zeros_like( images[0], dtype= np.float ) + avg_img = np.zeros_like( images[0], dtype= np.float64 ) Nopix = float( avg_img.size ) n=0 good_count = 0 diff --git a/pyCHX/chx_correlationc.py b/pyCHX/chx_correlationc.py index 2fb54cf..af0dbd4 100644 --- a/pyCHX/chx_correlationc.py +++ b/pyCHX/chx_correlationc.py @@ -1283,14 +1283,14 @@ def get_data(self ): Return: 2-D array, shape as (len(images), len(pixellist)) ''' - data_array = np.zeros([ self.length,len(self.pixelist)], dtype=np.float) + data_array = np.zeros([ self.length,len(self.pixelist)], dtype=np.float64) # changed dtype = np.float (depreciated) to dtype = np.float64 LW @06112023 #fra_pix = np.zeros_like( pixelist, dtype=np.float64) timg = np.zeros( self.FD.md['ncols'] * self.FD.md['nrows'] , dtype=np.int32 ) timg[self.pixelist] = np.arange( 1, len(self.pixelist) + 1 ) if self.norm_inten is not None: #Mean_Int_Qind = np.array( self.qind.copy(), dtype=np.float) - Mean_Int_Qind = np.ones( len( self.qind), dtype = np.float) + Mean_Int_Qind = np.ones( len( self.qind), dtype = np.float64) # changed dtype = np.float (depreciated) to dtype = np.float64 LW @06112023 noqs = len(np.unique( self.qind )) nopr = np.bincount(self.qind-1) noprs = np.concatenate( [ np.array([0]), np.cumsum(nopr) ] ) @@ -1393,14 +1393,14 @@ def get_data(self ): Return: 2-D array, shape as (len(images), len(pixellist)) ''' - data_array = np.zeros([ self.length,len(self.pixelist)], dtype=np.float) + data_array = np.zeros([ self.length,len(self.pixelist)], dtype=np.float64) #fra_pix = np.zeros_like( pixelist, dtype=np.float64) timg = np.zeros( self.FD.md['ncols'] * self.FD.md['nrows'] , dtype=np.int32 ) timg[self.pixelist] = np.arange( 1, len(self.pixelist) + 1 ) if self.mean_int_sets is not None: #Mean_Int_Qind = np.array( self.qind.copy(), dtype=np.float) - Mean_Int_Qind = np.ones( len( self.qind), dtype = np.float) + Mean_Int_Qind = np.ones( len( self.qind), dtype = np.float64) noqs = len(np.unique( self.qind )) nopr = np.bincount(self.qind-1) noprs = np.concatenate( [ np.array([0]), np.cumsum(nopr) ] ) diff --git a/pyCHX/chx_generic_functions.py b/pyCHX/chx_generic_functions.py index 24dbc70..3f7aaaf 100644 --- a/pyCHX/chx_generic_functions.py +++ b/pyCHX/chx_generic_functions.py @@ -886,9 +886,9 @@ def lin2log_g2(lin_tau,lin_g2,num_points=False): #print('from lin-to-log-g2_sampling: ',lin_tau) if num_points == False: # automatically decide how many log-points (8/decade) - dec=np.ceil((np.log10(lin_tau.max())-np.log10(lin_tau.min()))*8) + dec=int(np.ceil((np.log10(lin_tau.max())-np.log10(lin_tau.min()))*8)) else: - dec=num_points + dec=int(num_points) log_tau=np.logspace(np.log10(lin_tau[0]),np.log10(lin_tau.max()),dec) # re-sample correlation function: log_g2=[] @@ -1229,7 +1229,7 @@ def get_waxs_beam_center( gamma, origin = [432, 363], Ldet = 1495, pixel_size output: beam center: for the target gamma, in pixel ''' - return [ np.int( origin[0] + np.tan( np.radians(gamma)) * Ldet/pixel_size) ,origin[1] ] + return [ int( origin[0] + np.tan( np.radians(gamma)) * Ldet/pixel_size) ,origin[1] ] @@ -1385,15 +1385,14 @@ def pad_length(arr,pad_val=np.nan): adds pad_val to each row, to make the length of each row equal to the lenght of the longest row of the original matrix -> used to convert python generic data object to HDF5 native format function fixes python bug in padding (np.pad) integer array with np.nan + update June 2023: remove use of np.shape and np.size that doesn't work (anymore?) on arrays with inhomogenous size by LW 12/30/2017 """ max_len=[] - for i in range(np.shape(arr)[0]): - #print(np.size(arr[i])) - max_len.append([np.size(arr[i])]) - #print(max_len) + for i in range(len(arr)): + max_len.append([len(arr[i])]) max_len=np.max(max_len) - for l in range(np.shape(arr)[0]): + for l in range(len(arr)): arr[l]=np.pad(arr[l]*1.,(0,max_len-np.size(arr[l])),mode='constant',constant_values=pad_val) return arr @@ -1504,25 +1503,24 @@ def get_roi_nr(qdict,q,phi,q_nr=True,phi_nr=False,q_thresh=0, p_thresh=0, silent by LW 10/21/2017 update by LW 08/22/2018: introduced thresholds for comparison of Q and phi values (before: exact match required) update 2019/09/28 add qprecision to get unique Q + update 2020/3/12 explicitly order input dictionary to fix problem with environments >= 2019-3.0.1 """ + import collections + from collections import OrderedDict + qdict = collections.OrderedDict(sorted(qdict.items())) qs=[] phis=[] for i in qdict.keys(): qs.append(qdict[i][0]) - phis.append(qdict[i][1]) - from collections import OrderedDict - + phis.append(qdict[i][1]) qslist=list(OrderedDict.fromkeys(qs)) qslist = np.unique( np.round(qslist, qprecision ) ) phislist=list(OrderedDict.fromkeys(phis)) qslist=list(np.sort(qslist)) - #print('Q_list: %s'%qslist) phislist=list(np.sort(phislist)) if q_nr: qinterest=qslist[q] - #qindices = [i for i,x in enumerate(qs) if x == qinterest] qindices = [i for i,x in enumerate(qs) if np.abs(x-qinterest) < q_thresh] - #print('q_indicies: ',qindices) else: qinterest=q qindices = [i for i,x in enumerate(qs) if np.abs(x-qinterest) < q_thresh] # new @@ -1532,10 +1530,7 @@ def get_roi_nr(qdict,q,phi,q_nr=True,phi_nr=False,q_thresh=0, p_thresh=0, silent else: phiinterest=phi phiindices = [i for i,x in enumerate(phis) if np.abs(x-phiinterest) < p_thresh] # new - #print('phi: %s phi_index: %s'%(phiinterest,phiindices)) - #qindices = [i for i,x in enumerate(qs) if x == qinterest] - #phiindices = [i for i,x in enumerate(phis) if x == phiinterest] - ret_list=[list(set(qindices).intersection(phiindices))[0],qinterest,phiinterest,qslist,phislist] + ret_list=[list(set(qindices).intersection(phiindices))[0],qinterest,phiinterest,qslist,phislist] #-> this is the original if silent == False: print('list of available Qs:') print(qslist) @@ -2378,8 +2373,8 @@ def combine_images( filenames, outputfile, outsize=(2000, 2400)): #nx = np.int( np.ceil( np.sqrt(N)) ) #ny = np.int( np.ceil( N / float(nx) ) ) - ny = np.int( np.ceil( np.sqrt(N)) ) - nx = np.int( np.ceil( N / float(ny) ) ) + ny = int( np.ceil( np.sqrt(N)) ) + nx = int( np.ceil( N / float(ny) ) ) #print(nx,ny) result = Image.new("RGB", outsize, color=(255,255,255,0)) @@ -2887,7 +2882,7 @@ def create_polygon_mask( image, xcorners, ycorners ): from skimage.draw import line_aa, line, polygon, disk imy, imx = image.shape bst_mask = np.zeros_like( image , dtype = bool) - rr, cc = polygon( ycorners,xcorners) + rr, cc = polygon( ycorners,xcorners,shape = image.shape) bst_mask[rr,cc] =1 #full_mask= ~bst_mask return bst_mask @@ -2909,7 +2904,7 @@ def create_rectangle_mask( image, xcorners, ycorners ): from skimage.draw import line_aa, line, polygon, disk imy, imx = image.shape bst_mask = np.zeros_like( image , dtype = bool) - rr, cc = polygon( ycorners,xcorners) + rr, cc = polygon( ycorners,xcorners,shape = image.shape) bst_mask[rr,cc] =1 #full_mask= ~bst_mask return bst_mask @@ -2945,7 +2940,7 @@ def create_multi_rotated_rectangle_mask( image, center=None, length=100, width= wx = width x = np.array( [ max(0, cx - wx//2), min(imx, cx+wx//2), min(imx, cx+wx//2), max(0,cx-wx//2 ) ]) y = np.array( [ cy, cy, min( imy, cy + wy) , min(imy, cy + wy) ]) - rr, cc = polygon( y,x) + rr, cc = polygon( y,x, shape = image.shape) mask[rr,cc] =1 mask_rot= np.zeros( image.shape, dtype = bool) for angle in angles: @@ -2972,7 +2967,7 @@ def create_wedge( image, center, radius, wcors, acute_angle=True) : x = np.array( x ) y = np.array( y ) print(x,y) - rr, cc = polygon( y,x) + rr, cc = polygon( y,x, shape = image.shape) maskp[rr,cc] =1 if acute_angle: return maskc*maskp @@ -3005,7 +3000,7 @@ def create_cross_mask( image, center, wy_left=4, wy_right=4, wx_up=4, wx_down=4 wy = wy_right x = np.array( [ cx, imx, imx, cx ]) y = np.array( [ cy-wy, cy-wy, cy + wy, cy + wy]) - rr, cc = polygon( y,x) + rr, cc = polygon( y,x, shape = image.shape) bst_mask[rr,cc] =1 ### @@ -3013,7 +3008,7 @@ def create_cross_mask( image, center, wy_left=4, wy_right=4, wx_up=4, wx_down=4 wy = wy_left x = np.array( [0, cx, cx,0 ]) y = np.array( [ cy-wy, cy-wy, cy + wy, cy + wy]) - rr, cc = polygon( y,x) + rr, cc = polygon( y,x, shape = image.shape) bst_mask[rr,cc] =1 ### @@ -3021,7 +3016,7 @@ def create_cross_mask( image, center, wy_left=4, wy_right=4, wx_up=4, wx_down=4 wx = wx_up x = np.array( [ cx-wx, cx + wx, cx+wx, cx-wx ]) y = np.array( [ cy, cy, imy, imy]) - rr, cc = polygon( y,x) + rr, cc = polygon( y,x, shape = image.shape) bst_mask[rr,cc] =1 ### @@ -3029,7 +3024,7 @@ def create_cross_mask( image, center, wy_left=4, wy_right=4, wx_up=4, wx_down=4 wx = wx_down x = np.array( [ cx-wx, cx + wx, cx+wx, cx-wx ]) y = np.array( [ 0,0, cy, cy]) - rr, cc = polygon( y,x) + rr, cc = polygon( y,x, shape = image.shape) bst_mask[rr,cc] =1 if center_radius!=0: @@ -5609,3 +5604,102 @@ def R_2(ydata,fit_data): SS_res=np.sum((np.array(ydata)-np.array(fit_data))**2) #print('SS_res: %s'%SS_res) return 1-SS_res/SS_tot + +def is_outlier(points,thresh=3.5,verbose=False): + """MAD test + """ + points.tolist() + if len(points) ==1: + points=points[:,None] + if verbose: + print('input to is_outlier is a single point...') + median = np.median(points)*np.ones(np.shape(points))#, axis=0) + + diff = (points-median)**2 + diff=np.sqrt(diff) + med_abs_deviation= np.median(diff) + modified_z_score = .6745*diff/med_abs_deviation + return modified_z_score > thresh + +def outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False): + """ + outlier_mask(avg_img,mask,roi_mask,outlier_threshold = 7.5,maximum_outlier_fraction = .1,verbose=False,plot=False) + avg_img: average image data (2D) + mask: 2D array, same size as avg_img with pixels that are already masked + roi_mask: 2D array, same size as avg_img, ROI labels 'encoded' as mask values (i.e. all pixels belonging to ROI 5 have the value 5) + outlier_threshold: threshold for MAD test + maximum_outlier_fraction: maximum fraction of pixels in an ROI that can be classifed as outliers. If the detected fraction is higher, no outliers will be masked for that ROI. + verbose: 'True' enables message output + plot: 'True' enables visualization of outliers + returns: mask (dtype=float): 0 for pixels that have been classified as outliers, 1 else + dependency: is_outlier() + + function does outlier detection for each ROI separately based on pixel intensity in avg_img*mask and ROI specified by roi_mask, using the median-absolute-deviation (MAD) method + + by LW 06/21/2023 + """ + hhmask = np.ones(np.shape(roi_mask)) + pc=1 + + for rn in np.arange(1,np.max(roi_mask)+1,1): + rm=np.zeros(np.shape(roi_mask));rm=rm-1;rm[np.where( roi_mask == rn)]=1 + pixel = roi.roi_pixel_values(avg_img*rm, roi_mask, [rn] ) + out_l = is_outlier((avg_img*mask*rm)[rm>-1], thresh=outlier_threshold) + if np.nanmax(out_l)>0: # Did detect at least one outlier + ave_roi_int = np.nanmean((pixel[0][0])[out_l<1]) + if verbose: print('ROI #%s\naverage ROI intensity: %s'%(rn,ave_roi_int)) + try: + upper_outlier_threshold = np.nanmin((out_l*pixel[0][0])[out_l*pixel[0][0]>ave_roi_int]) + if verbose: print('upper outlier threshold: %s'%upper_outlier_threshold) + except: + upper_outlier_threshold = False + if verbose: print('no upper outlier threshold found') + ind1 = (out_l*pixel[0][0])>0; ind2 = (out_l*pixel[0][0])< ave_roi_int + try: + lower_outlier_threshold = np.nanmax((out_l*pixel[0][0])[ind1*ind2]) + except: + lower_outlier_threshold = False + if verbose: print('no lower outlier threshold found') + else: + if verbose: print('ROI #%s: no outliers detected'%rn) + + ### MAKE SURE we don't REMOVE more than x percent of the pixels in the roi + outlier_fraction = np.sum(out_l)/len(pixel[0][0]) + if verbose: print('fraction of pixel values detected as outliers: %s'%np.round(outlier_fraction,2)) + if outlier_fraction > maximum_outlier_fraction: + if verbose: print('fraction of pixel values detected as outliers > than maximum fraction %s allowed -> NOT masking outliers...check threshold for MAD and maximum fraction of outliers allowed'%maximum_outlier_fraction) + upper_outlier_threshold = False; lower_outlier_threshold = False + + if upper_outlier_threshold: + hhmask[avg_img*rm > upper_outlier_threshold] = 0 + if lower_outlier_threshold: + hhmask[avg_img*rm < lower_outlier_threshold] = 0 + + if plot: + if pc == 1: fig,ax = plt.subplots(1,5,figsize=(24,4)) + plt.subplot(1,5,pc);pc+=1; + if pc>5: pc=1 + pixel = roi.roi_pixel_values(avg_img*rm*mask, roi_mask, [rn] ) + plt.plot( pixel[0][0] ,'bo',markersize=1.5 ) + if upper_outlier_threshold or lower_outlier_threshold: + x=np.arange(len(out_l)) + plt.plot([x[0],x[-1]],[ave_roi_int,ave_roi_int],'g--',label='ROI average: %s'%np.round(ave_roi_int,4)) + if upper_outlier_threshold: + ind=(out_l*pixel[0][0])> upper_outlier_threshold + plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') + plt.plot([x[0],x[-1]],[upper_outlier_threshold,upper_outlier_threshold],'r--',label='upper thresh.: %s'%np.round(upper_outlier_threshold,4)) + if lower_outlier_threshold: + ind=(out_l*pixel[0][0])< lower_outlier_threshold + plt.plot(x[ind],(out_l*pixel[0][0])[ind],'r+') + plt.plot([x[0],x[-1]],[lower_outlier_threshold,lower_outlier_threshold],'r--',label='lower thresh.: %s'%np.round(upper_outlier_threshold,4)) + plt.ylabel('Intensity') ;plt.xlabel('pixel');plt.title('ROI #: %s'%rn);plt.legend(loc='best',fontsize=8) + + if plot: + fig,ax = plt.subplots() + plt.imshow(hhmask) + hot_dark=np.nonzero(hhmask<1) + cmap = plt.cm.get_cmap('viridis') + plt.plot(hot_dark[1],hot_dark[0],'+',color=cmap(0)) + plt.xlabel('pixel');plt.ylabel('pixel');plt.title('masked pixels with outlier threshold: %s'%outlier_threshold) + + return hhmask \ No newline at end of file diff --git a/pyCHX/xpcs_timepixel.py b/pyCHX/xpcs_timepixel.py index 8b7d51a..286141e 100644 --- a/pyCHX/xpcs_timepixel.py +++ b/pyCHX/xpcs_timepixel.py @@ -253,7 +253,7 @@ def init_compress_timepix_data( pos, t, binstep, filename, mask=None, css = np.cumsum(cs) imgsum = np.zeros( N ) good_count = 0 - avg_img = np.zeros( [ md['sy'], md['sx'] ], dtype= np.float ) + avg_img = np.zeros( [ md['sy'], md['sx'] ], dtype= np.float64 ) # changed depreciated np.float to np.float64 LW @06/11/2023 for i in tqdm( range(0,N) ): if i ==0: @@ -337,7 +337,7 @@ def init_compress_timepix_data_light_duty( pos, t, binstep, filename, mask=None imgsum = np.zeros( N-1 ) print('There are %s frames to be compressed...'%(N-1)) good_count = 0 - avg_img = np.zeros( [ md['sy'], md['sx'] ], dtype= np.float ) + avg_img = np.zeros( [ md['sy'], md['sx'] ], dtype= np.float64 ) # changed depreciated np.float to np.float64 LW @06/11/2023 for i in tqdm( range(N-1) ): ind1 = np.argmin( np.abs( tx[i] - t) ) ind2 = np.argmin( np.abs( tx[i+1] - t ) ) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..3239179 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,20 @@ +[tool.black] +line-length = 115 +include = '\.pyi?$' +exclude = ''' +/( + \.git + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | _build + | buck-out + | build + | dist + + # The following are specific to Black, you probably don't want those. + | blib2to3 + | tests/data +)/ +''' diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..3b388e1 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,19 @@ +# These are required for developing the package (running the tests, building +# the documentation) but not necessarily required for _using_ it. +black +codecov +coverage +flake8 +isort +nbstripout +pre-commit +pre-commit-hooks +pytest +sphinx +twine +# These are dependencies of various sphinx extensions for documentation. +ipython +matplotlib +numpydoc +sphinx-copybutton +sphinx_rtd_theme diff --git a/requirements_bk.txt b/requirements_bk.txt deleted file mode 100644 index dbe8c3a..0000000 --- a/requirements_bk.txt +++ /dev/null @@ -1,40 +0,0 @@ -numpy -dask -databroker -lmfit -matplotlib -pandas -pillow -pyyaml -scipy -cython -tifffile -tqdm -ipython -dill -reportlab -#pytables -theano -#git+https://github.com/soft-matter/slicerator.git -git+https://github.com/scikit-beam/scikit-beam.git#egg=scikit-beam -git+https://github.com/NSLS-II-CHX/eiger-io.git#eiger-io -git+https://github.com/NSLS-II-CHX/chxtools.git#egg=chxtools -git+https://github.com/Nikea/xray-vision.git#Xray-vision -git+https://github.com/ChrisBeaumont/mpl-modest-image -#git+https://github.com/tqdm/tqdm.git#tqdm -#git+https://github.com/cython/cython.git#cython - -#Problem: because of the proxy problem -#Searching for slicerator>=0.9.7 -#Reading https://pypi.python.org/simple/slicerator/ -#Download error on https://pypi.python.org/simple/slicerator/: [Errno -2] Name or service not known -- Some packages may #not be found! -#Couldn't retrieve index page for 'slicerator' -#Scanning index of all packages (this may take a while) -#Reading https://pypi.python.org/simple/ -#Download error on https://pypi.python.org/simple/: [Errno -2] Name or service not known -- Some packages may not be found! -#Do local packages or working download links found for slicerator>=0.9.7 -#error: Could not find suitable distribution for Requirement.parse('slicerator>=0.9.7') - - - -